gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import typing as t
from functools import update_wrapper
from io import BytesIO
from itertools import chain
from typing import Union
from . import exceptions
from .datastructures import FileStorage
from .datastructures import Headers
from .datastructures import MultiDict
from .http import parse_options_header
from .sansio.multipart import Data
from .sansio.multipart import Epilogue
from .sansio.multipart import Field
from .sansio.multipart import File
from .sansio.multipart import MultipartDecoder
from .sansio.multipart import NeedData
from .urls import url_decode_stream
from .wsgi import _make_chunk_iter
from .wsgi import get_content_length
from .wsgi import get_input_stream
# there are some platforms where SpooledTemporaryFile is not available.
# In that case we need to provide a fallback.
try:
from tempfile import SpooledTemporaryFile
except ImportError:
from tempfile import TemporaryFile
SpooledTemporaryFile = None # type: ignore
if t.TYPE_CHECKING:
import typing as te
from _typeshed.wsgi import WSGIEnvironment
t_parse_result = t.Tuple[t.IO[bytes], MultiDict, MultiDict]
class TStreamFactory(te.Protocol):
def __call__(
self,
total_content_length: t.Optional[int],
content_type: t.Optional[str],
filename: t.Optional[str],
content_length: t.Optional[int] = None,
) -> t.IO[bytes]:
...
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def _exhaust(stream: t.IO[bytes]) -> None:
bts = stream.read(64 * 1024)
while bts:
bts = stream.read(64 * 1024)
def default_stream_factory(
total_content_length: t.Optional[int],
content_type: t.Optional[str],
filename: t.Optional[str],
content_length: t.Optional[int] = None,
) -> t.IO[bytes]:
max_size = 1024 * 500
if SpooledTemporaryFile is not None:
return t.cast(t.IO[bytes], SpooledTemporaryFile(max_size=max_size, mode="rb+"))
elif total_content_length is None or total_content_length > max_size:
return t.cast(t.IO[bytes], TemporaryFile("rb+"))
return BytesIO()
def parse_form_data(
environ: "WSGIEnvironment",
stream_factory: t.Optional["TStreamFactory"] = None,
charset: str = "utf-8",
errors: str = "replace",
max_form_memory_size: t.Optional[int] = None,
max_content_length: t.Optional[int] = None,
cls: t.Optional[t.Type[MultiDict]] = None,
silent: bool = True,
) -> "t_parse_result":
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :doc:`/request_data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`Response._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(
stream_factory,
charset,
errors,
max_form_memory_size,
max_content_length,
cls,
silent,
).parse_from_environ(environ)
def exhaust_stream(f: F) -> F:
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs): # type: ignore
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, "exhaust", None)
if exhaust is not None:
exhaust()
else:
while True:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(t.cast(F, wrapper), f)
class FormDataParser:
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`Response._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(
self,
stream_factory: t.Optional["TStreamFactory"] = None,
charset: str = "utf-8",
errors: str = "replace",
max_form_memory_size: t.Optional[int] = None,
max_content_length: t.Optional[int] = None,
cls: t.Optional[t.Type[MultiDict]] = None,
silent: bool = True,
) -> None:
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(
self, mimetype: str, options: t.Dict[str, str]
) -> t.Optional[
t.Callable[
["FormDataParser", t.IO[bytes], str, t.Optional[int], t.Dict[str, str]],
"t_parse_result",
]
]:
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ: "WSGIEnvironment") -> "t_parse_result":
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get("CONTENT_TYPE", "")
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype, content_length, options)
def parse(
self,
stream: t.IO[bytes],
mimetype: str,
content_length: t.Optional[int],
options: t.Optional[t.Dict[str, str]] = None,
) -> "t_parse_result":
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if (
self.max_content_length is not None
and content_length is not None
and content_length > self.max_content_length
):
# if the input stream is not exhausted, firefox reports Connection Reset
_exhaust(stream)
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype, content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(
self,
stream: t.IO[bytes],
mimetype: str,
content_length: t.Optional[int],
options: t.Dict[str, str],
) -> "t_parse_result":
parser = MultiPartParser(
self.stream_factory,
self.charset,
self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls,
)
boundary = options.get("boundary", "").encode("ascii")
if not boundary:
raise ValueError("Missing boundary")
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(
self,
stream: t.IO[bytes],
mimetype: str,
content_length: t.Optional[int],
options: t.Dict[str, str],
) -> "t_parse_result":
if (
self.max_form_memory_size is not None
and content_length is not None
and content_length > self.max_form_memory_size
):
# if the input stream is not exhausted, firefox reports Connection Reset
_exhaust(stream)
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions: t.Dict[
str,
t.Callable[
["FormDataParser", t.IO[bytes], str, t.Optional[int], t.Dict[str, str]],
"t_parse_result",
],
] = {
"multipart/form-data": _parse_multipart,
"application/x-www-form-urlencoded": _parse_urlencoded,
"application/x-url-encoded": _parse_urlencoded,
}
def _line_parse(line: str) -> t.Tuple[str, bool]:
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] == "\r\n":
return line[:-2], True
elif line[-1:] in {"\r", "\n"}:
return line[:-1], True
return line, False
class MultiPartParser:
def __init__(
self,
stream_factory: t.Optional["TStreamFactory"] = None,
charset: str = "utf-8",
errors: str = "replace",
max_form_memory_size: t.Optional[int] = None,
cls: t.Optional[t.Type[MultiDict]] = None,
buffer_size: int = 64 * 1024,
) -> None:
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
if cls is None:
cls = MultiDict
self.cls = cls
self.buffer_size = buffer_size
def fail(self, message: str) -> "te.NoReturn":
raise ValueError(message)
def get_part_charset(self, headers: Headers) -> str:
# Figure out input charset for current part
content_type = headers.get("content-type")
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get("charset", self.charset)
return self.charset
def start_file_streaming(
self, event: File, total_content_length: t.Optional[int]
) -> t.IO[bytes]:
content_type = event.headers.get("content-type")
try:
content_length = int(event.headers["content-length"])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(
total_content_length=total_content_length,
filename=event.filename,
content_type=content_type,
content_length=content_length,
)
return container
def parse(
self, stream: t.IO[bytes], boundary: bytes, content_length: t.Optional[int]
) -> t.Tuple[MultiDict, MultiDict]:
container: t.Union[t.IO[bytes], t.List[bytes]]
_write: t.Callable[[bytes], t.Any]
iterator = chain(
_make_chunk_iter(
stream,
limit=content_length,
buffer_size=self.buffer_size,
),
[None],
)
parser = MultipartDecoder(boundary, self.max_form_memory_size)
fields = []
files = []
current_part: Union[Field, File]
for data in iterator:
parser.receive_data(data)
event = parser.next_event()
while not isinstance(event, (Epilogue, NeedData)):
if isinstance(event, Field):
current_part = event
container = []
_write = container.append
elif isinstance(event, File):
current_part = event
container = self.start_file_streaming(event, content_length)
_write = container.write
elif isinstance(event, Data):
_write(event.data)
if not event.more_data:
if isinstance(current_part, Field):
value = b"".join(container).decode(
self.get_part_charset(current_part.headers), self.errors
)
fields.append((current_part.name, value))
else:
container = t.cast(t.IO[bytes], container)
container.seek(0)
files.append(
(
current_part.name,
FileStorage(
container,
current_part.filename,
current_part.name,
headers=current_part.headers,
),
)
)
event = parser.next_event()
return self.cls(fields), self.cls(files)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_private_link_resources_request(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DiskAccessesOperations(object):
"""DiskAccessesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
disk_access_name: str,
disk_access: "_models.DiskAccess",
**kwargs: Any
) -> "_models.DiskAccess":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_access, 'DiskAccess')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskAccess', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
disk_access_name: str,
disk_access: "_models.DiskAccess",
**kwargs: Any
) -> LROPoller["_models.DiskAccess"]:
"""Creates or updates a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_access_name: str
:param disk_access: disk access object supplied in the body of the Put disk access operation.
:type disk_access: ~azure.mgmt.compute.v2020_05_01.models.DiskAccess
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskAccess or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_05_01.models.DiskAccess]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
disk_access=disk_access,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
disk_access_name: str,
disk_access: "_models.DiskAccessUpdate",
**kwargs: Any
) -> "_models.DiskAccess":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_access, 'DiskAccessUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskAccess', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
disk_access_name: str,
disk_access: "_models.DiskAccessUpdate",
**kwargs: Any
) -> LROPoller["_models.DiskAccess"]:
"""Updates (patches) a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_access_name: str
:param disk_access: disk access object supplied in the body of the Patch disk access operation.
:type disk_access: ~azure.mgmt.compute.v2020_05_01.models.DiskAccessUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskAccess or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_05_01.models.DiskAccess]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
disk_access=disk_access,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> "_models.DiskAccess":
"""Gets information about a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_access_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiskAccess, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_05_01.models.DiskAccess
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_access_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.DiskAccessList"]:
"""Lists all the disk access resources under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskAccessList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_05_01.models.DiskAccessList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccessList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskAccessList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.DiskAccessList"]:
"""Lists all the disk access resources under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskAccessList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_05_01.models.DiskAccessList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccessList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskAccessList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses'} # type: ignore
@distributed_trace
def get_private_link_resources(
self,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResourceListResult":
"""Gets the private link resources possible under disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_access_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_05_01.models.PrivateLinkResourceListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_private_link_resources_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
template_url=self.get_private_link_resources.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_private_link_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources'} # type: ignore
| |
'''
This file contains the library, for now.
'''
from datetime import datetime
import json
import os
import os.path
import requests
import shutil
import tempfile
from collections import namedtuple
# Sessions are stored in cookies. Each server can have its own cookie
# for this. We just set all the possible ones.
sessionid_strings = ["sessionid",
"prod-edx-sessionid",
"prod-edge-sessionid"]
# Ditto for CSRF tokens
csrf_token_strings = ["csrftoken",
"prod-edx-csrftoken",
"prod-edge-csrftoken"]
# Basic cookies we just need to talk to edX
baseline_cookies = {
"djdt": "hide",
"edxloggedin": "true",
}
# Basic headers we need just to talk to edX
baseline_headers = {
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Windows) AppleWebKit/500"
"(KHTML, like Gecko) Chrome/48.0.0.0 Safari/530.00",
}
class METHODS:
'''
HTTP Request Methods: GET/POST/PUT/etc.
'''
GET = 'get'
POST = 'post'
class DATA_FORMATS:
'''
Data formats for making HTTP headers (accept, etc.)
'''
AJAX = 'ajax'
TARBALL = 'tgz'
NONE = 'none'
class EdXCourse(namedtuple("course_tuple", ["org", "number", "run"])):
'''
A helper class to manage edX course URL encoding.
>>> x=EdXCourse("edx", "edx101", "2000")
>>> print x.org
edx
>>> print x.course_string()
course-v1:edx+edx101+2000
'''
def course_string(self):
return "course-v1:{org}+{number}+{run}".format(org=self.org,
number=self.number,
run=self.run)
if __name__ == "__main__":
import doctest
doctest.testmod()
class EdXConnection(object):
def __init__(self,
session=None,
server="http://127.0.0.1:8001",
csrf="sample_csrf_string"):
'''
Initialize a connection to an edX instance. The key parameter is
session_id. This must be picked out from the network console
in developer tools in your web browser.
'''
if session:
self.sessionid = session
else:
self.sessionid = os.environ['OPEN_EDX_SESSION']
self.server = server
self.csrf = csrf
def compile_header(self,
response_format=DATA_FORMATS.AJAX,
request_format=DATA_FORMATS.AJAX):
'''
This will compile both header and cookies necessary to
access an edX server as if this were a web browser. The
key things needed are the session ID cookie. This can
be grabbed from your browser in the developer tools.
'''
header = {}
cookies = {}
# Take the baseline cookies
header.update(baseline_headers)
cookies.update(baseline_cookies)
# Add the session ID
for sid_string in sessionid_strings:
cookies[sid_string] = self.sessionid
# Add CSRF protection.
for csrf_string in csrf_token_strings:
cookies[csrf_string] = self.csrf
header['X-CSRFToken'] = self.csrf
# And we need appropriate content type headers both
# for what we're sending and what we expect. This is
# usually JSON, but it's sometimes files.
if response_format == DATA_FORMATS.AJAX or \
response_format == DATA_FORMATS.NONE:
header["Accept"] = "application/json, text/javascript, */*; q=0.01"
if response_format == DATA_FORMATS.TARBALL:
pass # No header needed
if request_format == 'ajax':
header["Content-Type"] = "application/json; charset=UTF-8"
# And more CSRF protection -- we do need the referer
header["Referer"] = self.server + "/course"
return (header, cookies)
def ajax(self,
url,
payload=None,
files=None,
response_format=DATA_FORMATS.AJAX,
request_format=DATA_FORMATS.AJAX,
method=METHODS.POST):
'''
Make an AJAX call to edX.
'''
(headers, cookies) = self.compile_header(
response_format=response_format,
request_format=request_format
)
if request_format == DATA_FORMATS.AJAX and \
method != METHODS.GET:
payload = json.dumps(payload)
if method == METHODS.POST:
kwargs = {}
if payload:
kwargs['data'] = payload
if files:
kwargs['files'] = payload
r = requests.post(self.server+url,
cookies=cookies,
headers=headers,
**kwargs)
elif method == METHODS.GET:
if payload:
print payload
raise "Payload doesn't work with get"
print cookies
print headers
r = requests.get(
self.server+url,
cookies=cookies,
headers=headers
)
if response_format == DATA_FORMATS.AJAX:
print r.text
return json.loads(r.text)
return r
def create_course(self,
course,
course_name):
'''
Make a new edX course
'''
print "Creating", course_name
url = "/course/"
payload = {"org": course.org,
"number": course.number,
"run": course.run,
"display_name": course_name}
r = self.ajax(url, payload)
def add_author_to_course(self,
course,
author_email):
print "Adding", author_email, "to", course.course_string()
url = "/course_team/{course}/{author_email}"
url = url.format(course=course.course_string(),
author_email=author_email)
payload = {"role": "instructor"}
r = self.ajax(url,
payload=payload,
response_format=DATA_FORMATS.NONE)
print r, r.text
def download_course(self, course, filepointer, close=True):
'''
This will download a course as a tarball from an Open edX
server. This takes a while. Open edX will synchronously make
the tarball for us.
'''
(headers, cookies) = self.compile_header()
url = "/export/{course}?_accept=application/x-tgz"
if isinstance(course, EdXCourse):
course_string = course.course_string()
elif isinstance(course, basestring):
course_string = course
else:
raise ValueError("Unrecognized course type: "+repr(course))
url = url.format(course=course_string)
r = self.ajax(url,
response_format=DATA_FORMATS.TARBALL,
method=METHODS.GET)
for chunk in r.iter_content(chunk_size=512 * 1024):
if chunk: # filter out keep-alive new chunks
filepointer.write(chunk)
if close:
filepointer.close()
def upload_course(self, course, filepointer):
url = "/import/{course}"
url = url.format(course=course.course_string())
files = {'course-data': ("course.tar.gz",
filepointer.read(),
"application/gzip",
{})}
r = self.ajax(url,
files=files,
request_format=DATA_FORMATS.TARBALL)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
import time
from pyspark.sql import Row
from pyspark.sql.functions import lit
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
from pyspark.testing.sqlutils import ReusedSQLTestCase
class StreamingTests(ReusedSQLTestCase):
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(processingTime='5 seconds', continuous='1 second')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
# SPARK-32516 disables the overwrite behavior by default.
with self.sql_conf({"spark.sql.legacy.pathOptionBehavior.enabled": True}):
df = self.spark.readStream.format('csv')\
.option('path', 'python/test_support/sql/fake')\
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
# SPARK-32516 disables the overwrite behavior by default.
with self.sql_conf({"spark.sql.legacy.pathOptionBehavior.enabled": True}):
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.processAllAvailable()
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self._assert_exception_tree_contains_msg(e, "ZeroDivisionError")
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self._assert_exception_tree_contains_msg(sq.exception(), "ZeroDivisionError")
def _assert_exception_tree_contains_msg(self, exception, msg):
e = exception
contains = msg in e.desc
while e.cause is not None and not contains:
e = e.cause
contains = msg in e.desc
self.assertTrue(contains, "Exception tree doesn't contain the expected message: %s" % msg)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.processAllAvailable()
q.stop()
shutil.rmtree(tmpPath)
class ForeachWriterTester:
def __init__(self, spark):
self.spark = spark
def write_open_event(self, partitionId, epochId):
self._write_event(
self.open_events_dir,
{'partition': partitionId, 'epoch': epochId})
def write_process_event(self, row):
self._write_event(self.process_events_dir, {'value': 'text'})
def write_close_event(self, error):
self._write_event(self.close_events_dir, {'error': str(error)})
def write_input_file(self):
self._write_event(self.input_dir, "text")
def open_events(self):
return self._read_events(self.open_events_dir, 'partition INT, epoch INT')
def process_events(self):
return self._read_events(self.process_events_dir, 'value STRING')
def close_events(self):
return self._read_events(self.close_events_dir, 'error STRING')
def run_streaming_query_on_writer(self, writer, num_files):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
for i in range(num_files):
self.write_input_file()
sq.processAllAvailable()
finally:
self.stop_all()
def assert_invalid_writer(self, writer, msg=None):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
self.write_input_file()
sq.processAllAvailable()
self.fail("invalid writer %s did not fail the query" % str(writer)) # not expected
except Exception as e:
if msg:
assert msg in str(e), "%s not in %s" % (msg, str(e))
finally:
self.stop_all()
def stop_all(self):
for q in self.spark._wrapped.streams.active:
q.stop()
def _reset(self):
self.input_dir = tempfile.mkdtemp()
self.open_events_dir = tempfile.mkdtemp()
self.process_events_dir = tempfile.mkdtemp()
self.close_events_dir = tempfile.mkdtemp()
def _read_events(self, dir, json):
rows = self.spark.read.schema(json).json(dir).collect()
dicts = [row.asDict() for row in rows]
return dicts
def _write_event(self, dir, event):
import uuid
with open(os.path.join(dir, str(uuid.uuid4())), 'w') as f:
f.write("%s\n" % str(event))
def __getstate__(self):
return (self.open_events_dir, self.process_events_dir, self.close_events_dir)
def __setstate__(self, state):
self.open_events_dir, self.process_events_dir, self.close_events_dir = state
# Those foreach tests are failed in Python 3.6 and macOS High Sierra by defined rules
# at http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
# To work around this, OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES.
def test_streaming_foreach_with_simple_function(self):
tester = self.ForeachWriterTester(self.spark)
def foreach_func(row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(foreach_func, 2)
self.assertEqual(len(tester.process_events()), 2)
def test_streaming_foreach_with_basic_open_process_close(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partitionId, epochId):
tester.write_open_event(partitionId, epochId)
return True
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
open_events = tester.open_events()
self.assertEqual(len(open_events), 2)
self.assertSetEqual(set([e['epoch'] for e in open_events]), {0, 1})
self.assertEqual(len(tester.process_events()), 2)
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_with_open_returning_false(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return False
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2)
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_without_open_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 2)
def test_streaming_foreach_without_close_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return True
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_without_open_and_close_methods(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_with_process_throwing_error(self):
from pyspark.sql.utils import StreamingQueryException
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
raise RuntimeError("test error")
def close(self, error):
tester.write_close_event(error)
try:
tester.run_streaming_query_on_writer(ForeachWriter(), 1)
self.fail("bad writer did not fail the query") # this is not expected
except StreamingQueryException as e:
# TODO: Verify whether original error message is inside the exception
pass
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 1)
# TODO: Verify whether original error message is inside the exception
def test_streaming_foreach_with_invalid_writers(self):
tester = self.ForeachWriterTester(self.spark)
def func_with_iterator_input(iter):
for x in iter:
print(x)
tester.assert_invalid_writer(func_with_iterator_input)
class WriterWithoutProcess:
def open(self, partition):
pass
tester.assert_invalid_writer(WriterWithoutProcess(), "does not have a 'process'")
class WriterWithNonCallableProcess():
process = True
tester.assert_invalid_writer(WriterWithNonCallableProcess(),
"'process' in provided object is not callable")
class WriterWithNoParamProcess():
def process(self):
pass
tester.assert_invalid_writer(WriterWithNoParamProcess())
# Abstract class for tests below
class WithProcess():
def process(self, row):
pass
class WriterWithNonCallableOpen(WithProcess):
open = True
tester.assert_invalid_writer(WriterWithNonCallableOpen(),
"'open' in provided object is not callable")
class WriterWithNoParamOpen(WithProcess):
def open(self):
pass
tester.assert_invalid_writer(WriterWithNoParamOpen())
class WriterWithNonCallableClose(WithProcess):
close = True
tester.assert_invalid_writer(WriterWithNonCallableClose(),
"'close' in provided object is not callable")
def test_streaming_foreachBatch(self):
q = None
collected = dict()
def collectBatch(batch_df, batch_id):
collected[batch_id] = batch_df.collect()
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.assertTrue(0 in collected)
self.assertTrue(len(collected[0]), 2)
finally:
if q:
q.stop()
def test_streaming_foreachBatch_propagates_python_errors(self):
from pyspark.sql.utils import StreamingQueryException
q = None
def collectBatch(df, id):
raise RuntimeError("this should fail the query")
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.fail("Expected a failure")
except StreamingQueryException as e:
self.assertTrue("this should fail" in str(e))
finally:
if q:
q.stop()
def test_streaming_read_from_table(self):
with self.table("input_table", "this_query"):
self.spark.sql("CREATE TABLE input_table (value string) USING parquet")
self.spark.sql("INSERT INTO input_table VALUES ('aaa'), ('bbb'), ('ccc')")
df = self.spark.readStream.table("input_table")
self.assertTrue(df.isStreaming)
q = df.writeStream.format('memory').queryName('this_query').start()
q.processAllAvailable()
q.stop()
result = self.spark.sql("SELECT * FROM this_query ORDER BY value").collect()
self.assertEqual(
set([Row(value='aaa'), Row(value='bbb'), Row(value='ccc')]), set(result))
def test_streaming_write_to_table(self):
with self.table("output_table"), tempfile.TemporaryDirectory() as tmpdir:
df = self.spark.readStream.format("rate").option("rowsPerSecond", 10).load()
q = df.writeStream.toTable("output_table", format='parquet', checkpointLocation=tmpdir)
self.assertTrue(q.isActive)
time.sleep(10)
q.stop()
result = self.spark.sql("SELECT value FROM output_table").collect()
self.assertTrue(len(result) > 0)
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_streaming import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
# external control
import datetime
import time
import string
import urllib2
import math
import redis
import base64
import json
import eto
import py_cf
import os
#import sys # Need to have acces to sys.stdout
#fd = open('/media/mmc1/python/eto_debug.out.debug','a+') # open
#old_stdout = sys.stdout # store the default system handler to be able to restore it
#sys.stdout = fd # Now your file is used by print as destination
#fd.write( "this is a debug print \n"3)
#fd.write( "this is a debug print \n" )
class Eto_Management():
def __init__(self, redis ):
self.redis = redis
self.sites = sites = [ "MSRUC1", #SANTA ROSA PLATEAU CA US, Temecula, CA
"MECSC1", #EL CARISO CA US, Lake Elsinore, CA
"MCSPC1" #CSS CASE SPRINGS CA US, Murrieta, CA
]
self.alt = 2400
def calculate_daily_eto( self, chainFlowHandle, chainObj, parameters, event ):
print datetime.datetime.now()
print("calculating yesterday eto")
eto_data = eto.determine_yesterday_eto(self.redis, self.sites, self.alt)
print("eto_data",eto_data)
#self.redis.set("YESTERDAY_ETO", eto_data )
#if int(self.redis.get("YESTERDAY_UPDATE_FLAG")) == 1 :
self.redis.set("YESTERDAY_ETO", eto_data )
self.update_sprinklers_time_bins( eto_data )
#self.redis.set("YESTERDAY_UPDATE_FLAG",0)
self.store_event_queue( "store_eto", eto_data)
def update_sprinklers_time_bins( self, yesterday_eto ):
list_string = self.redis.get( "ETO_RESOURCE_LIST" )
list_data = string.split(list_string,":")
for j in list_data:
try:
temp = self.redis.get( j )
temp = float(temp)
except:
temp = 0
temp = temp + yesterday_eto
if temp > .3 :
temp = .3
self.redis.set( j, temp )
def store_event_queue( self, event, data ):
log_data = {}
log_data["event"] = event
log_data["data"] = data
log_data["time"] = time.time()
json_data = json.dumps(log_data)
json_data = base64.b64encode(json_data)
self.redis.lpush( "cloud_event_queue", json_data)
self.redis.ltrim( "cloud_event_queue", 0,800)
def calculate_current_eto( self, chainFlowHandle, chainObj, parameters, event ):
print( "calculating eto \n")
try:
eto_data = eto.calculate_current_eto( self.sites, self.alt)
print( "current eto",(eto_data["net_et"],"\n"))
self.store_event_queue( "store_eto", eto_data )
self.redis.set("CURRENT_ETO", eto_data["net_et"] )
self.redis.set("CURRENT_ETO_DATA",eto_data)
print("updating eto \n")
except:
fd.write("exception in calculating eto \n")
self.redis.set("CURRENT_ETO", 0 )
self.redis.set("CURRENT_WIND_GUST", 0)
self.redis.set("CURRENT_WIND_GUST_TIME_STAMP", 0)
self.redis.set("CURRENT_ETO_DATA", 0)
self.store_event_queue( "store_eto_exception", eto_data["net_et"] )
def do_house_keeping( self, chainFlowHandle, chainObj, parameters, event ):
pass
#self.redis.set( "YESTERDAY_UPDATE_FLAG", 1 )
def delete_email_files( self,chainFlowHandle, chainOjb, parameters, event ):
print( str(datetime.datetime.now())+"\n")
print("deleteing emails \n")
eto.delete_email()
def restart( self,chainFlowHandle, chainOjb, parameters, event ):
pass
class Ntpd():
def __init__( self ):
pass
def get_time( self, chainFlowHandle, chainObj, parameters, event ):
os.system("ntpdate -b -s -u pool.ntp.org")
class Watch_Dog_Client():
def __init__(self, redis, directory, key, description ):
self.redis = redis
self.directory = directory
self.key = key
self.description = description
self.redis.hset(directory,key,None)
self.pat_wd( None, None, None, None)
def pat_wd( self, chainFlowHandle, chainObj, parameters, event ):
self.redis.delete( self.key )
temp = {}
temp["time"] = time.time()
temp["max_dt"] = 5*60
temp["pid"] = os.getpid()
temp["description"] = self.description
self.redis.set( self.key, json.dumps(temp) )
if __name__ == "__main__":
redis = redis.StrictRedis( host = "127.1.1.1", port=6379, db = 0 )
etm = Eto_Management( redis )
#etm.calculate_daily_eto( None,None,None,None)
print( "made it here on startup")
#etm.calculate_daily_eto( None,None,None,None)
#etm.delete_email_files( None, None, None, None )
ntpd = Ntpd()
device_directory = "WD_DIRECTORY"
wc = Watch_Dog_Client(redis, device_directory,"extern_ctrl","external control")
wc.pat_wd( None, None, None, None )
#
# Adding chains
#
cf = py_cf.CF_Interpreter()
#
# ETO processing elements
#
# cf.define_chain( "master_sequencer", True ) ## auto start thread
# cf.insert_link( "link_3", "Enable_Chain",[["new_day_house_keeping","get_current_eto","delete_cimis_email_data" ]])
# cf.insert_link( "link_4","Disable_Chain",[["master_sequencer"]])
cf.define_chain("get_current_eto",True)
cf.insert_link( "link_1", "WaitTod", ["*",12, "*","*" ] )
cf.insert_link( "link_2", "One_Step", [etm.calculate_daily_eto ] )
cf.insert_link( "link_3", "WaitTod", ["*",13,"*","*" ] )
cf.insert_link( "link_4", "Reset", [] )
cf.define_chain("delete_cimis_email_data",True)
cf.insert_link( "link_1","WaitTod",["*",14,"*","*" ])
cf.insert_link( "link_2","One_Step",[etm.delete_email_files])
cf.insert_link( "link_3","WaitTod",["*",15,"*","*" ])
cf.insert_link( "link_4","Reset",[])
# cf.define_chain("new_day_house_keeping",False)
# cf.insert_link( "link_1","WaitTod",["*",12,"*","*" ])
# cf.insert_link( "link_2","One_Step",[etm.do_house_keeping])
# cf.insert_link( "link_3","WaitTod",["*",13,"*","*" ])
# cf.insert_link( "link_4","Reset",[])
#
# cf.define_chain("get_current_eto",False)
# cf.insert_link( "link_1", "WaitTod", ["*",12, 20,"*" ] )
# cf.insert_link( "link_2", "One_Step", [etm.calculate_current_eto ] )
# cf.insert_link( "link_3", "One_Step", [etm.calculate_daily_eto ] )
# cf.insert_link( "link_4", "WaitTod", ["*",13,50,"*" ] )
# cf.insert_link( "link_5", "Reset", [] )
#
#
#
# internet time update
#
#
cf.define_chain("ntpd",True)
cf.insert_link( "link_9","Log",["ntpd"] )
cf.insert_link( "link_1", "One_Step", [ntpd.get_time] )
cf.insert_link( "link_10", "Log",["got time"] )
cf.insert_link( "link_2", "WaitEvent",[ "HOUR_TICK" ] )
cf.insert_link( "link_3", "Reset",[] )
#
#
# update clocks from internet
#
#
cf.define_chain("watch_dog_thread",True)
cf.insert_link( "link_1","WaitTod",["*","*","*",30 ])
cf.insert_link( "link_2","One_Step",[ wc.pat_wd ])
cf.insert_link( "link_3","WaitTod",["*","*","*",55 ])
cf.insert_link( "link_4","Reset",[])
#
# Executing chains
#
cf_environ = py_cf.Execute_Cf_Environment( cf )
cf_environ.execute()
| |
# -*- coding: utf-8 -*-
import Gedi as gedi
import emcee
import sys
import numpy as np
import matplotlib.pylab as pl; pl.close("all")
import astropy.table as Table
import cPickle as pickle
import corner
### Spots dataset to analyse
ijk= 10
### Defining what's supose to run: 1 runs 0 doesn't
day_1, daydecay_1, daydecaygap_1 = 1, 1, 1 #1 measurement a day
day_4, daydecay_4, daydecaygap_4 = 1, 1, 1 #1 measurement every 4 days
#### Preparing MCMC
burns, runs = 100, 100
###############################################################################
### RV function
rvx,rvy=gedi.RV_function.RV_circular(P=30,K=16.343,T=0,gamma=0,time=100,space=100)
pl.plot(rvx,rvy,'*')
pl.savefig('rv_signal.png')
pl.close('all')
### Priors
def lnprob(p):
global kernel
#p[0]=theta; p[1]=l1; p[2]=L2; p[3]=period; p[4]=whitenoise
if any([p[0] < -6, p[0] > 6,
p[1] < -10, p[1] > np.log(10),
p[2] < np.log(1), p[2] > np.log(200),
p[3] < np.log(10), p[3] > np.log(50),
p[4] < -10, p[4] > np.log(10)]):
return -np.inf
lnprior=0.0
# Update the kernel and compute the lnlikelihood.
kernel=gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
new_likelihood=gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
return lnprior + new_likelihood
from scipy import stats
amplitude_prior=stats.uniform(np.exp(-6), np.exp(6)-np.exp(-6))
lenghtscale1_prior=stats.uniform(np.exp(-10), 10-np.exp(-10))
lenghtscale2_prior=stats.uniform(1, 200-1)
period_prior=stats.uniform(10, 50-10)
wn_prior=stats.uniform(np.exp(-10), 10-np.exp(-10))
def from_prior():
return np.array([amplitude_prior.rvs(),lenghtscale1_prior.rvs(),
lenghtscale2_prior.rvs(),period_prior.rvs(),wn_prior.rvs()])
### SOAP file to use
soap_file= 'output_spots{0}'.format(ijk)
###############################################################################
from time import time
from matplotlib.ticker import MaxNLocator
if day_1 == 1:
f= open("{0}_1day.txt".format(soap_file),"w")
sys.stdout= f
start= time()
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info+yerr+rvy)
t= np.array(range(1,101))
pl.plot(t,y,'*')
pl.savefig('{0}_RV_1day.png'.format(soap_file))
pl.close('all')
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude_prior.rvs(),
lenghtscale1_prior.rvs(),
lenghtscale2_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot((sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot((sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_L1.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_L2.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 4],open("{0}_1day_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_1day.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if daydecay_1==1:
f=open("{0}_1day_decay.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
decay=np.linspace(1,0.5,len(y))
y0= np.array(spots_info)
decay=np.linspace(1,0.5,len(y0))
y=[n*m for n,m in zip(y0,decay)]
y= np.array(y+yerr+rvy)
t= np.array(range(1,101))
pl.plot(t,y,'*')
pl.savefig('{0}_RV_1day_decay.png'.format(soap_file))
pl.close('all')
print "Done."
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude_prior.rvs(),
lenghtscale1_prior.rvs(),
lenghtscale2_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot((sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot((sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day_decay.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_L1.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_L2.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 4],open("{0}_1day_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_1day_decay.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if daydecaygap_1==1:
f=open("{0}_1day_decaygap.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info)
decay=np.linspace(1,0.5,len(y0))
y0=[n*m for n,m in zip(y0,decay)]
#new t and y
t1= range(1,30)
t2= range(60,101)
t=np.array(t1+t2)
y=[]
yerr1=[]
new_rv=[]
for i,e in enumerate(t):
y.append(y0[e-1])
yerr1.append(yerr[e-1])
new_rv.append(rvy[e-1])
yerr=np.array(yerr1)
y=[n+m+o for n,m,o in zip(y,yerr1,new_rv)]
y=np.array(y)
pl.plot(t,y,'*')
pl.savefig('{0}_RV_1day_dgap.png'.format(soap_file))
pl.close('all')
print "Done."
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude_prior.rvs(),
lenghtscale1_prior.rvs(),
lenghtscale2_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot((sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot((sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day_decaygap.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_L1.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_L2.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 4],open("{0}_1day_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_1day_decaygap.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if day_4==1:
f=open("{0}_4days.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per 4 days
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr0= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info)
t0= np.array(range(1,101))
y=[]
yerr=[]
t=[]
new_rv=[]
for ii in np.arange(0,len(t0),4):
y.append(y0[ii])
yerr.append(yerr0[ii])
new_rv.append(rvy[ii])
t.append(t0[ii])
y=[n+m+o for n,m,o in zip(y,yerr,new_rv)]
y=np.array(y)
yerr=np.array(yerr)
t=np.array(t)
pl.plot(t,y,'*')
pl.savefig('{0}_RV_4days.png'.format(soap_file))
pl.close('all')
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude_prior.rvs(),
lenghtscale1_prior.rvs(),
lenghtscale2_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot((sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot((sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_L1.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_L2.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 4],open("{0}_1day_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_4days.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if daydecay_4==1:
f=open("{0}_4days_decay.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per 4 days
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr0= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info)
decay=np.linspace(1,0.5,len(y))
y=[n*m for n,m in zip(y,decay)]
y0=np.array(y)
t0=np.array(range(1,101))
y=[]
yerr=[]
t=[]
new_rv=[]
for ii in np.arange(0,len(t0),4):
y.append(y0[ii])
yerr.append(yerr0[ii])
t.append(t0[ii])
new_rv.append(rvy[ii])
y=[n+m+o for n,m,o in zip(y,yerr,new_rv)]
y=np.array(y)
yerr=np.array(yerr)
t=np.array(t)
pl.plot(t,y,'*')
pl.savefig('{0}_RV_4days_decay.png'.format(soap_file))
pl.close('all')
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude_prior.rvs(),
lenghtscale1_prior.rvs(),
lenghtscale2_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot((sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot((sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days_decay.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_L1.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_L2.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 4],open("{0}_1day_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_4days_decay.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
if daydecaygap_4==1:
f=open("{0}_4days_decaygap.txt".format(soap_file),"w")
sys.stdout = f
start=time()
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info)
decay=np.linspace(1,0.5,len(y0))
y0=[n*m for n,m in zip(y0,decay)]
#new t and y
t1= range(1,30)
t2= range(60,101)
t=np.array(t1+t2)
y=[]
yerr1=[]
old_rv=[]
for i,e in enumerate(t):
y.append(y0[e-1])
yerr1.append(yerr[e-1])
old_rv.append(rvy[e-1])
yerr0=np.array(yerr1)
y0=np.array(y)
t0=t
y=[]
yerr=[]
t=[]
new_rv=[]
for ii in np.arange(0,len(t0),4):
y.append(y0[ii])
yerr.append(yerr0[ii])
t.append(t0[ii])
new_rv.append(old_rv[ii])
y=[n+m+o for n,m,o in zip(y,yerr,new_rv)]
y=np.array(y)
yerr=np.array(yerr)
t=np.array(t)
pl.plot(t,y,'*')
pl.savefig('{0}_RV_4days_dgap.png'.format(soap_file))
pl.close('all')
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude_prior.rvs(),
lenghtscale1_prior.rvs(),
lenghtscale2_prior.rvs(),
period_prior.rvs()) +\
gedi.kernel.WhiteNoise(wn_prior.rvs())
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
print '> Preparing mcmc.'
print
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
p0=[np.log(from_prior()) for i in range(nwalkers)]
assert not np.isinf(map(lnprob, p0)).any()
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, burns)
#sampler.reset()
print "Running production chain"
print
sampler.run_mcmc(p0, runs)
print 'Done.'
print
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot((sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot((sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days_decaygap.png'.format(soap_file))
print 'Done.'
print
print '> Preparing final solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
#save data
pickle.dump(sampler.chain[:, :, 0],open("{0}_1day_A.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 1],open("{0}_1day_L1.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 2],open("{0}_1day_L2.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 3],open("{0}_1day_P.p".format(soap_file), 'w'),protocol=-1)
pickle.dump(sampler.chain[:, :, 4],open("{0}_1day_WN.p".format(soap_file), 'w'),protocol=-1)
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
fig= corner.corner(samples,labels=["$Theta$","$l$","$P$","$WN$"])
fig.savefig('triangle_{0}_4days_decaygap.png'.format(soap_file))
pl.close('all')
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
| |
#!/usr/bin/python
#Author: Michael Brothers
#Date: 10-22-2013
#Title: Vortex Ring Pair Interaction
#
#Note: must be run with four ranks
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import numpy.ma as ma
from mpl_toolkits.mplot3d import Axes3D
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
FLOOR = -30
CEILING = 30
class AnimatedScatter(object):
def __init__(self,
numpoints= 128,
Strength_One = 1.0,
Strength_Two = 1.0,
Radius_One = 10.0,
Radius_Two = 10.0,
X_Coord_One = -2.50,
X_Coord_Two = 2.50,
Timestep = 1.05,
Maxframes = 800,
Framerate = 20.0):
self.numframes = 0
self.maxframes = Maxframes
self.framerate = Framerate
self.timestep = Timestep
self.numpoints = numpoints
self.Radius_One = np.float(Radius_One)
self.Radius_Two = np.float(Radius_Two)
self.Strength_One = Strength_One
self.Strength_Two = Strength_Two
self.X_Coord_One = X_Coord_One
self.X_Coord_Two = X_Coord_Two
self.stream = self.data_stream()
self.angle_sweep = np.linspace(0.0, (1.0 - 1.0/self.numpoints)*2.0*np.pi , self.numpoints)
self.both_vortex_segment_centroids = np.zeros((2*self.numpoints,3))
self.both_vortex_cart_velocities = np.zeros((2*self.numpoints, 3))
self.set_Vortex_Segment_Centroids()
self.recvbuff = np.zeros((8*self.numpoints, 3))
self.scalarbuff = np.float(0.0)
#rank 0 will write to disk, while rank 1 will show realtime output
if(rank < 2):
self.fig = plt.figure()
self.fig.canvas.mpl_connect('draw_event',self.forceUpdate)
self.ax = self.fig.add_subplot(111,projection = '3d')
self.ani = animation.FuncAnimation(self.fig, self.update, interval=1000.0/self.framerate,
init_func=self.setup_plot, frames= self.maxframes)
#force the non-drawing ranks to iterate anyway
if(rank > 1):
while self.numframes < self.maxframes:
data = next(self.stream)
def set_Vortex_Segment_Centroids(self):
for point in range(self.numpoints):
self.both_vortex_segment_centroids[point, 0] = self.X_Coord_One
self.both_vortex_segment_centroids[point, 1] = self.Radius_One*np.sin(self.angle_sweep[point])
self.both_vortex_segment_centroids[point, 2] = self.Radius_One*np.cos(self.angle_sweep[point])
self.both_vortex_segment_centroids[point + self.numpoints, 0] = self.X_Coord_Two
self.both_vortex_segment_centroids[point + self.numpoints, 1] = self.Radius_Two*np.sin(self.angle_sweep[point])
self.both_vortex_segment_centroids[point + self.numpoints, 2] = self.Radius_Two*np.cos(self.angle_sweep[point])
def forceUpdate(self, event):
self.scat.changed()
def setup_plot(self):
X = next(self.stream)
color_one = ['r' for point in range(self.numpoints)]
color_two = ['b' for point in range(self.numpoints)]
colors = color_one + color_two
self.scat = self.ax.scatter(X[:,0], X[:,1], X[:,2] , c=colors)
self.ax.set_xlim3d(FLOOR, CEILING)
self.ax.set_ylim3d(FLOOR, CEILING)
self.ax.set_zlim3d(FLOOR, CEILING)
return self.scat,
def data_stream(self):
data = self.both_vortex_segment_centroids
while True and self.numframes < self.maxframes:
self.numframes += 1
if(rank == 0):
self.compute_U_One_On_One_Vectorized()
if(rank == 1):
self.compute_U_One_On_Two_Vectorized()
if(rank == 2):
self.compute_U_Two_On_One_Vectorized()
if(rank == 3):
self.compute_U_Two_On_Two_Vectorized()
comm.Allgatherv([self.both_vortex_cart_velocities, MPI.DOUBLE], [self.recvbuff, MPI.DOUBLE])
self.compute_Updated_Positions()
#print "rank " + str(rank) + " " + str(self.numframes)
yield data
def compute_U_One_On_One_Vectorized(self):
m = self.angle_sweep
s = self.angle_sweep
dU = np.zeros((self.numpoints, self.numpoints -1, 3))
dl = 0.0
H_cubed = np.zeros(self.numpoints - 1)
omega_direction = np.zeros((self.numpoints -1 , 3))
x = np.zeros((self.numpoints -1, 3))
x_prime = np.zeros((self.numpoints -1, 3))
for self_point in range(self.numpoints):
x = self.both_vortex_segment_centroids[self_point]
x_prime[:self_point] = self.both_vortex_segment_centroids[:self_point]
x_prime[self_point:] = self.both_vortex_segment_centroids[self_point +1:self.numpoints]
omega_direction[:self_point,0] = 0.0 #x-dir magnitude
omega_direction[:self_point,1] = np.cos(s[:self_point]) #y-dir magnitude
omega_direction[:self_point,2] = -np.sin(s[:self_point]) #z-dir magnitude
omega_direction[self_point:,0] = 0.0 #x-dir magnitude
omega_direction[self_point:,1] = np.cos(s[self_point+1:]) #y-dir magnitude
omega_direction[self_point:,2] = -np.sin(s[self_point+1:]) #z-dir magnitude
dl = self.Radius_One* 2.0*np.pi * np.power(self.numpoints, -1.0)
delta_x = x - x_prime
H_cubed = np.power(np.sum(np.power(delta_x, 2.0), axis = 1), 3.0/2.0)
dU[self_point] = self.Strength_One*np.power(4.0*np.pi, -1.0) * dl * np.power(H_cubed, -1.0)[:, np.newaxis] * np.cross(omega_direction, delta_x)
#self.vortex_one_cart_velocities[self_point] = np.sum(dU[self_point], axis=0)
self.both_vortex_cart_velocities[self_point] = np.sum(dU[self_point], axis=0)
return 0
def compute_U_One_On_Two_Vectorized(self):
m = self.angle_sweep
s = self.angle_sweep
dU = np.zeros((self.numpoints, self.numpoints -1, 3))
dl = 0.0
H_cubed = np.zeros(self.numpoints - 1)
omega_direction = np.zeros((self.numpoints -1 , 3))
x = np.zeros((self.numpoints -1, 3))
x_prime = np.zeros((self.numpoints -1, 3))
for self_point in range(self.numpoints):
x = self.both_vortex_segment_centroids[self_point + self.numpoints]
x_prime[:self_point] = self.both_vortex_segment_centroids[:self_point]
x_prime[self_point:] = self.both_vortex_segment_centroids[self_point +1:self.numpoints]
omega_direction[:self_point,0] = 0.0 #x-dir magnitude
omega_direction[:self_point,1] = np.cos(s[:self_point]) #y-dir magnitude
omega_direction[:self_point,2] = -np.sin(s[:self_point]) #z-dir magnitude
omega_direction[self_point:,0] = 0.0 #x-dir magnitude
omega_direction[self_point:,1] = np.cos(s[self_point+1:]) #y-dir magnitude
omega_direction[self_point:,2] = -np.sin(s[self_point+1:]) #z-dir magnitude
dl = self.Radius_One* 2.0*np.pi * np.power(self.numpoints, -1.0)
delta_x = x - x_prime
H_cubed = np.power(np.sum(np.power(delta_x, 2.0), axis = 1), 3.0/2.0)
dU[self_point] = self.Strength_One*np.power(4.0*np.pi, -1.0) * dl * np.power(H_cubed, -1.0)[:, np.newaxis] * np.cross(omega_direction, delta_x)
#self.vortex_one_cart_velocities[self_point] = np.sum(dU[self_point], axis=0)
self.both_vortex_cart_velocities[self_point + self.numpoints] = np.sum(dU[self_point], axis=0)
return 0
def compute_U_Two_On_One_Vectorized(self):
m = self.angle_sweep
s = self.angle_sweep
dU = np.zeros((self.numpoints, self.numpoints -1, 3))
dl = 0.0
H_cubed = np.zeros(self.numpoints - 1)
omega_direction = np.zeros((self.numpoints -1 , 3))
x = np.zeros((self.numpoints -1, 3))
x_prime = np.zeros((self.numpoints -1, 3))
for self_point in range(self.numpoints):
x = self.both_vortex_segment_centroids[self_point]
x_prime[:self_point] = self.both_vortex_segment_centroids[self.numpoints :self_point +self.numpoints]
x_prime[self_point:] = self.both_vortex_segment_centroids[self_point +1 +self.numpoints:]
omega_direction[:self_point,0] = 0.0 #x-dir magnitude
omega_direction[:self_point,1] = np.cos(s[:self_point]) #y-dir magnitude
omega_direction[:self_point,2] = -np.sin(s[:self_point]) #z-dir magnitude
omega_direction[self_point:,0] = 0.0 #x-dir magnitude
omega_direction[self_point:,1] = np.cos(s[self_point+1:]) #y-dir magnitude
omega_direction[self_point:,2] = -np.sin(s[self_point+1:]) #z-dir magnitude
dl = self.Radius_Two* 2.0*np.pi * np.power(self.numpoints, -1.0)
delta_x = x - x_prime
H_cubed = np.power(np.sum(np.power(delta_x, 2.0), axis = 1), 3.0/2.0)
dU[self_point] = self.Strength_Two*np.power(4.0*np.pi, -1.0) * dl * np.power(H_cubed, -1.0)[:, np.newaxis] * np.cross(omega_direction, delta_x)
#self.vortex_one_cart_velocities[self_point] = np.sum(dU[self_point], axis=0)
self.both_vortex_cart_velocities[self_point] = np.sum(dU[self_point], axis=0)
return 0
def compute_U_Two_On_Two_Vectorized(self):
m = self.angle_sweep
s = self.angle_sweep
dU = np.zeros((self.numpoints, self.numpoints -1, 3))
dl = 0.0
H_cubed = np.zeros(self.numpoints - 1)
omega_direction = np.zeros((self.numpoints -1 , 3))
x = np.zeros((self.numpoints -1, 3))
x_prime = np.zeros((self.numpoints -1, 3))
for self_point in range(self.numpoints):
x = self.both_vortex_segment_centroids[self_point + self.numpoints]
x_prime[:self_point] = self.both_vortex_segment_centroids[self.numpoints :self_point + self.numpoints]
x_prime[self_point:] = self.both_vortex_segment_centroids[self_point +1 + self.numpoints: ]
omega_direction[:self_point,0] = 0.0 #x-dir magnitude
omega_direction[:self_point,1] = np.cos(s[:self_point]) #y-dir magnitude
omega_direction[:self_point,2] = -np.sin(s[:self_point]) #z-dir magnitude
omega_direction[self_point:,0] = 0.0 #x-dir magnitude
omega_direction[self_point:,1] = np.cos(s[self_point+1:]) #y-dir magnitude
omega_direction[self_point:,2] = -np.sin(s[self_point+1:]) #z-dir magnitude
dl = self.Radius_Two * 2.0*np.pi * np.power(self.numpoints, -1.0)
delta_x = x - x_prime
H_cubed = np.power(np.sum(np.power(delta_x, 2.0), axis = 1), 3.0/2.0)
dU[self_point] = self.Strength_Two*np.power(4.0*np.pi, -1.0) * dl * np.power(H_cubed, -1.0)[:, np.newaxis] * np.cross(omega_direction, delta_x)
#self.vortex_one_cart_velocities[self_point] = np.sum(dU[self_point], axis=0)
self.both_vortex_cart_velocities[self_point + self.numpoints] = np.sum(dU[self_point], axis=0)
return 0
def compute_Updated_Positions(self):
#self.vortex_one_segment_centroids += self.vortex_one_cart_velocities
#self.vortex_two_segment_centroids += self.vortex_two_cart_velocities
velocity = self.recvbuff[:2*self.numpoints] + self.recvbuff[2*self.numpoints: 4*self.numpoints] + self.recvbuff[4*self.numpoints: 6*self.numpoints] + self.recvbuff[6*self.numpoints: 8*self.numpoints]
self.both_vortex_segment_centroids += self.timestep*velocity
#The new radius equals the average distance to the center of the ring
self.Radius_One = np.linalg.norm(np.sum(np.power(np.power(self.both_vortex_segment_centroids[:self.numpoints] -
np.sum(self.both_vortex_segment_centroids[:self.numpoints],
axis = 0), 2.0), .5), axis = 0)[1:])/self.numpoints
self.Radius_Two = np.linalg.norm(np.sum(np.power(np.power(self.both_vortex_segment_centroids[self.numpoints:] -
np.sum(self.both_vortex_segment_centroids[self.numpoints:],
axis = 0), 2.0), .5), axis = 0)[1:])/self.numpoints
return 0
def update(self, i):
data = next(self.stream)
self.scat._offsets3d = ( np.ma.ravel(data[:,0]) , np.ma.ravel(data[:,1]) , np.ma.ravel(data[:,2]) )
return self.scat,
def show(self):
plt.show()
if __name__ == '__main__':
a = AnimatedScatter()
if(rank==0):
a.ani.save("Rank" +str(rank) +".avi", writer='mencoder')
if(rank==1):
a.show()
| |
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
from __future__ import division
import itertools
from collections import namedtuple
import numpy as np
from dimod.decorators import vartype_argument
from dimod.sampleset import SampleSet, SampleView
from dimod.vartypes import Vartype
__all__ = ['Response']
class Response(SampleSet):
"""Samples and any other data returned by dimod samplers.
Args:
record (:obj:`numpy.recarray`)
A numpy record array. Must have 'sample', 'energy' and 'num_occurrences' as fields.
The 'sample' field should be a 2D numpy int8 array where each row is a sample and each
column represents the value of a variable.
labels (list):
A list of variable labels.
info (dict):
Information about the response as a whole formatted as a dict.
vartype (:class:`.Vartype`/str/set):
Variable type for the response. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
Examples:
>>> import dimod
...
>>> sampler = dimod.ExactSolver()
>>> response = sampler.sample_ising({'a': -0.5, 'b': -0.5}, {('a', 'b'): -1.0})
>>> response.record.sample
array([[-1, -1],
[ 1, -1],
[ 1, 1],
[-1, 1]], dtype=int8)
>>> response.record.energy
array([ 0., 1., -2., 1.])
>>> response.variable_labels # doctest: +SKIP
['a', 'b']
>>> response.labels_to_idx['b'] # doctest: +SKIP
1
>>> response.vartype is dimod.SPIN
True
>>> for sample, energy in response.data(['sample', 'energy']): # doctest: +SKIP
... print(sample, energy)
{'a': 1, 'b': 1} -2.0
{'a': -1, 'b': -1} 0.0
{'a': 1, 'b': -1} 1.0
{'a': -1, 'b': 1} 1.0
"""
__slots__ = ()
def __init__(self, *args, **kwargs):
super(Response, self).__init__(*args, **kwargs)
import warnings
warnings.warn("dimod.Response is deprecated, please use dimod.SampleSet instead.",
DeprecationWarning)
###############################################################################################
# Properties
###############################################################################################
@property
def variable_labels(self):
"""list: Variable labels of the samples.
Corresponds to the columns of the sample field of :attr:`.Response.record`.
"""
return self.variables
@property
def label_to_idx(self):
"""dict: Maps the variable labels to the column in :attr:`.Response.record`."""
return self.variables.index
###############################################################################################
# Constructors
###############################################################################################
@classmethod
def from_samples(cls, samples_like, vectors, info, vartype, variable_labels=None):
"""Build a response from samples.
Args:
samples_like:
A collection of samples. 'samples_like' is an extension of NumPy's array_like
to include an iterable of sample dictionaries (as returned by
:meth:`.Response.samples`).
data_vectors (dict[field, :obj:`numpy.array`/list]):
Additional per-sample data as a dict of vectors. Each vector is the
same length as `samples_matrix`. The key 'energy' and it's vector is required.
info (dict):
Information about the response as a whole formatted as a dict.
vartype (:class:`.Vartype`/str/set):
Variable type for the response. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_labels (list, optional):
Determines the variable labels if samples_like is not an iterable of dictionaries.
If samples_like is not an iterable of dictionaries and if variable_labels is not
provided then index labels are used.
Returns:
:obj:`.Response`
Examples:
From dicts
>>> import dimod
...
>>> samples = [{'a': -1, 'b': +1}, {'a': -1, 'b': -1}]
>>> response = dimod.Response.from_samples(samples, {'energy': [-1, 0]}, {}, dimod.SPIN)
From an array
>>> import dimod
>>> import numpy as np
...
>>> samples = np.ones((2, 3), dtype='int8') # 2 samples, 3 variables
>>> response = dimod.Response.from_samples(samples, {'energy': [-1.0, -1.0]}, {},
... dimod.SPIN, variable_labels=['a', 'b', 'c'])
"""
# there is no np.is_array_like so we use a try-except block
try:
# trying to cast it to int8 rules out list of dictionaries. If we didn't try to cast
# then it would just create a vector of np.object
samples = np.asarray(samples_like, dtype=np.int8)
except TypeError:
# if labels are None, they are set here
samples, variable_labels = _samples_dicts_to_array(samples_like, variable_labels)
assert samples.dtype == np.int8, 'sanity check'
record = data_struct_array(samples, **vectors)
# if labels are still None, set them here. We could do this in an else in the try-except
# block, but the samples-array might not have the correct shape
if variable_labels is None:
__, num_variables = record.sample.shape
variable_labels = list(range(num_variables))
return cls(record, variable_labels, info, vartype)
def _samples_dicts_to_array(samples_dicts, labels):
"""Convert an iterable of samples where each sample is a dict to a numpy 2d array. Also
determines the labels is they are None.
"""
itersamples = iter(samples_dicts)
first_sample = next(itersamples)
if labels is None:
labels = list(first_sample)
num_variables = len(labels)
def _iter_samples():
yield np.fromiter((first_sample[v] for v in labels),
count=num_variables, dtype=np.int8)
try:
for sample in itersamples:
yield np.fromiter((sample[v] for v in labels),
count=num_variables, dtype=np.int8)
except KeyError:
msg = ("Each dict in 'samples' must have the same keys.")
raise ValueError(msg)
return np.stack(list(_iter_samples())), labels
def data_struct_array(sample, **vectors): # data_struct_array(sample, *, energy, **vectors):
"""Combine samples and per-sample data into a numpy structured array.
Args:
sample (array_like):
Samples, in any form that can be converted into a numpy array.
energy (array_like, required):
Required keyword argument. Energies, in any form that can be converted into a numpy
1-dimensional array.
**kwargs (array_like):
Other per-sample data, in any form that can be converted into a numpy array.
Returns:
:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs]
"""
if not len(sample):
# if samples are empty
sample = np.zeros((0, 0), dtype=np.int8)
else:
sample = np.asarray(sample, dtype=np.int8)
if sample.ndim < 2:
sample = np.expand_dims(sample, 0)
num_samples, num_variables = sample.shape
if 'num_occurrences' not in vectors:
vectors['num_occurrences'] = [1] * num_samples
datavectors = {}
datatypes = [('sample', np.dtype(np.int8), (num_variables,))]
for kwarg, vector in vectors.items():
dtype = float if kwarg == 'energy' else None
datavectors[kwarg] = vector = np.asarray(vector, dtype)
if len(vector.shape) < 1 or vector.shape[0] != num_samples:
msg = ('{} and sample have a mismatched shape {}, {}. They must have the same size '
'in the first axis.').format(kwarg, vector.shape, sample.shape)
raise ValueError(msg)
datatypes.append((kwarg, vector.dtype, vector.shape[1:]))
if 'energy' not in datavectors:
# consistent error with the one thrown in python3
raise TypeError('data_struct_array() needs keyword-only argument energy')
elif datavectors['energy'].shape != (num_samples,):
raise ValueError('energy should be a vector of length {}'.format(num_samples))
data = np.rec.array(np.zeros(num_samples, dtype=datatypes))
data['sample'] = sample
for kwarg, vector in datavectors.items():
data[kwarg] = vector
return data
| |
import unittest
import warnings
import numpy as np
import six.moves.cPickle as pickle
import chainer
from chainer import cuda
from chainer import links as L
from chainer import testing
from chainer.testing import attr
class SimpleLink(chainer.Link):
def __init__(self, shape):
super(SimpleLink, self).__init__(p=shape)
self.p.data.fill(0)
self.p.grad.fill(1)
class TestFunctionSetBase(unittest.TestCase):
def setUp(self):
# FunctionSet is deprecated. To suppress warnings, we ignore
# DeprecationWarning.
self.warn = warnings.catch_warnings()
self.warn.__enter__()
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
def tearDown(self):
self.warn.__exit__()
def _check_setter(self, fs, gpu, attribute):
expect = getattr(fs, attribute)
setattr(fs, attribute, expect)
actual = getattr(fs, attribute)
if gpu:
expect = tuple(p.get() for p in expect)
actual = tuple(p.get() for p in actual)
self.assertEqual(len(expect), len(actual))
for e, a in zip(expect, actual):
np.testing.assert_array_equal(e, a)
def _check_setter_invalid(self, fs, diff, xp, attribute):
if diff > 0:
values = getattr(fs, attribute) + (xp.empty((1,)),) * diff
else:
values = getattr(fs, attribute)[:diff]
with self.assertRaises(AssertionError):
setattr(fs, attribute, values)
def _assert_all_is(self, a1, a2):
self.assertEqual(sorted(map(id, a1)), sorted(map(id, a2)))
class TestNestedFunctionSet(TestFunctionSetBase):
def setUp(self):
super(TestNestedFunctionSet, self).setUp()
self.fs1 = chainer.FunctionSet(
a=SimpleLink((1, 2)))
self.fs2 = chainer.FunctionSet(
fs1=self.fs1,
b=SimpleLink((3, 4)))
def test_collect_parameters(self):
params = self.fs2.parameters
grads = self.fs2.gradients
self.assertEqual(len(params), 2)
self.assertEqual(len(grads), 2)
self._assert_all_is(params, [self.fs1.a.p.data, self.fs2.b.p.data])
self._assert_all_is(grads, [self.fs1.a.p.grad, self.fs2.b.p.grad])
def test_pickle_cpu(self):
fs2_serialized = pickle.dumps(self.fs2)
fs2_loaded = pickle.loads(fs2_serialized)
self.assertTrue((self.fs2.b.p.data == fs2_loaded.b.p.data).all())
self.assertTrue(
(self.fs2.fs1.a.p.data == fs2_loaded.fs1.a.p.data).all())
@attr.gpu
def test_pickle_gpu(self):
self.fs2.to_gpu()
fs2_serialized = pickle.dumps(self.fs2)
fs2_loaded = pickle.loads(fs2_serialized)
fs2_loaded.to_cpu()
self.fs2.to_cpu()
self.assertTrue((self.fs2.b.p.data == fs2_loaded.b.p.data).all())
self.assertTrue(
(self.fs2.fs1.a.p.data == fs2_loaded.fs1.a.p.data).all())
def check_getter(self, fs, gpu, attribute):
params = getattr(fs, attribute)
self.assertEqual(len(params), 2)
if attribute == 'parameters':
self._assert_all_is(params, [self.fs1.a.p.data, self.fs2.b.p.data])
elif attribute == 'gradients':
self._assert_all_is(params, [self.fs1.a.p.grad, self.fs2.b.p.grad])
else:
raise ValueError(
'attribute should be parameters or gradients')
def test_parameters_getter_cpu(self):
self.check_getter(self.fs2, False, 'parameters')
@attr.gpu
def test_parameters_getter_gpu(self):
self.fs2.to_gpu()
self.check_getter(self.fs2, True, 'parameters')
def test_parameters_setter_cpu(self):
self._check_setter(self.fs2, False, 'parameters')
@attr.gpu
def test_parameters_setter_gpu(self):
self.fs2.to_gpu()
self._check_setter(self.fs2, True, 'parameters')
def test_parameters_setter_invalid_cpu(self):
self._check_setter_invalid(self.fs2, 1, np, 'parameters')
@attr.gpu
def test_parameters_setter_invalid_gpu(self):
self.fs2.to_gpu()
self._check_setter_invalid(self.fs2, 1, cuda.cupy, 'parameters')
def test_parameters_setter_invalid_2_cpu(self):
self._check_setter_invalid(self.fs2, -1, np, 'parameters')
@attr.gpu
def test_parameters_setter_invalid_2_gpu(self):
self.fs2.to_gpu()
self._check_setter_invalid(self.fs2, -1, cuda.cupy, 'parameters')
def test_gradients_getter_cpu(self):
self.check_getter(self.fs2, False, 'gradients')
@attr.gpu
def test_gradients_getter_gpu(self):
self.fs2.to_gpu()
self.check_getter(self.fs2, True, 'gradients')
def test_gradients_setter_cpu(self):
self._check_setter(self.fs2, False, 'gradients')
@attr.gpu
def test_gradients_setter_gpu(self):
self.fs2.to_gpu()
self._check_setter(self.fs2, True, 'gradients')
def test_gradients_setter_invalid_cpu(self):
self._check_setter_invalid(self.fs2, 1, np, 'gradients')
@attr.gpu
def test_gradients_setter_invalid_gpu(self):
self.fs2.to_gpu()
self._check_setter_invalid(self.fs2, 1, cuda.cupy, 'gradients')
def test_gradients_setter_invalid_2_cpu(self):
self._check_setter_invalid(self.fs2, -1, np, 'gradients')
@attr.gpu
def test_gradients_setter_invalid_2_gpu(self):
self.fs2.to_gpu()
self._check_setter_invalid(self.fs2, -1, cuda.cupy, 'gradients')
class TestFunctionSet(TestFunctionSetBase):
def setUp(self):
super(TestFunctionSet, self).setUp()
self.fs = chainer.FunctionSet(
a=L.Linear(3, 2),
b=L.Linear(3, 2)
)
self.aW = self.fs.a.W.data
self.ab = self.fs.a.b.data
self.bW = self.fs.b.W.data
self.bb = self.fs.b.b.data
self.agW = self.fs.a.W.grad
self.agb = self.fs.a.b.grad
self.bgW = self.fs.b.W.grad
self.bgb = self.fs.b.b.grad
def check_equal_fs(self, fs1, fs2):
self.assertTrue((fs1.a.W.data == fs2.a.W.data).all())
self.assertTrue((fs1.a.b.data == fs2.a.b.data).all())
self.assertTrue((fs1.b.W.data == fs2.b.W.data).all())
self.assertTrue((fs1.b.b.data == fs2.b.b.data).all())
def test_pickle_cpu(self):
s = pickle.dumps(self.fs)
fs2 = pickle.loads(s)
self.check_equal_fs(self.fs, fs2)
@attr.gpu
def test_pickle_gpu(self):
self.fs.to_gpu()
s = pickle.dumps(self.fs)
fs2 = pickle.loads(s)
self.fs.to_cpu()
fs2.to_cpu()
self.check_equal_fs(self.fs, fs2)
def check_copy_parameters_from(self, src_id, dst_id):
aW = np.random.uniform(-1, 1, (2, 3)).astype(np.float32)
ab = np.random.uniform(-1, 1, (2,)).astype(np.float32)
bW = np.random.uniform(-1, 1, (2, 3)).astype(np.float32)
bb = np.random.uniform(-1, 1, (2,)).astype(np.float32)
fs = self.fs.copy()
fs.a.W.data = aW
fs.a.b.data = ab
fs.b.W.data = bW
fs.b.b.data = bb
if src_id >= 0:
fs.to_gpu(src_id)
if dst_id >= 0:
self.fs.to_gpu(dst_id)
self.fs.copy_parameters_from(fs.parameters)
self.fs.to_cpu()
self.assertTrue((self.fs.a.W.data == aW).all())
self.assertTrue((self.fs.a.b.data == ab).all())
self.assertTrue((self.fs.b.W.data == bW).all())
self.assertTrue((self.fs.b.b.data == bb).all())
def test_copy_parameters_from_cpu_to_cpu(self):
self.check_copy_parameters_from(-1, -1)
@attr.gpu
def test_copy_parameters_from_cpu_to_gpu(self):
self.check_copy_parameters_from(-1, cuda.Device().id)
@attr.gpu
def test_copy_parameters_from_gpu_to_cpu(self):
self.check_copy_parameters_from(cuda.Device().id, -1)
@attr.gpu
def test_copy_parameters_from_gpu_to_gpu(self):
device_id = cuda.Device().id
self.check_copy_parameters_from(device_id, device_id)
@attr.multi_gpu(2)
def test_copy_parameters_from_multigpu(self):
self.check_copy_parameters_from(0, 1)
def test_getitem(self):
self.assertIs(self.fs['a'], self.fs.a)
def test_getitem_notfoud(self):
with self.assertRaises(AttributeError):
self.fs['not_found']
def check_getter(self, fs, gpu, attribute):
params = getattr(fs, attribute)
self.assertEqual(len(params), 4)
if attribute == 'parameters':
self._assert_all_is(
params,
[self.fs.a.W.data, self.fs.a.b.data,
self.fs.b.W.data, self.fs.b.b.data])
elif attribute == 'gradients':
self._assert_all_is(
params,
[self.fs.a.W.grad, self.fs.a.b.grad,
self.fs.b.W.grad, self.fs.b.b.grad])
def test_parameters_getter_cpu(self):
self.check_getter(self.fs, False, 'parameters')
@attr.gpu
def test_parameters_getter_gpu(self):
self.fs.to_gpu()
self.check_getter(self.fs, True, 'parameters')
def test_parameters_setter_cpu(self):
self._check_setter(self.fs, False, 'parameters')
@attr.gpu
def test_parameters_setter_gpu(self):
self.fs.to_gpu()
self._check_setter(self.fs, True, 'parameters')
def test_parameters_setter_invalid_cpu(self):
self._check_setter_invalid(self.fs, 1, np, 'parameters')
@attr.gpu
def test_parameters_setter_invalid_gpu(self):
self._check_setter_invalid(self.fs, 1, cuda.cupy, 'parameters')
def test_parameters_setter_invalid_2_cpu(self):
self._check_setter_invalid(self.fs, -1, np, 'parameters')
@attr.gpu
def test_parameters_setter_invalid_2_gpu(self):
self._check_setter_invalid(self.fs, -1, cuda.cupy, 'parameters')
def test_gradients_getter_cpu(self):
self.check_getter(self.fs, False, 'gradients')
@attr.gpu
def test_gradients_getter_gpu(self):
self.fs.to_gpu()
self.check_getter(self.fs, True, 'gradients')
def test_gradients_setter_cpu(self):
self._check_setter(self.fs, False, 'gradients')
@attr.gpu
def test_gradients_setter_gpu(self):
self.fs.to_gpu()
self._check_setter(self.fs, True, 'gradients')
def test_gradients_setter_invalid_cpu(self):
self._check_setter_invalid(self.fs, 1, np, 'gradients')
@attr.gpu
def test_gradients_setter_invalid_gpu(self):
self._check_setter_invalid(self.fs, 1, cuda.cupy, 'gradients')
def test_gradients_setter_invalid_2_cpu(self):
self._check_setter_invalid(self.fs, -1, np, 'gradients')
@attr.gpu
def test_gradients_setter_invalid_2_gpu(self):
self._check_setter_invalid(self.fs, -1, cuda.cupy, 'gradients')
testing.run_module(__name__, __file__)
| |
'''''''''''''''''''''''''''''''''''''''''''''''''''''
// Authored by Christopher Iliffe Sprague //
// Christopher.Iliffe.Sprague@gmail.com //
// +1 703 851 6842 //
// https://github.com/CISprague/Spacecraft_Testbed //
'''''''''''''''''''''''''''''''''''''''''''''''''''''
from jplephem.spk import SPK
from sgp4.ext import invjday
from sgp4.io import twoline2rv
from sgp4.earth_gravity import wgs84 as grav_const
import numpy as np
import sys
import os
# Specify the user's path to this directory
Directory = os.path.dirname(__file__)
# Celestial Body utilities
def Assign_Celestial_Body_Attributes(celestial_body):
'''This function assigns the attributes to a known celestial body.
These attributes include the various facts of the celestial body,
and its orbiting satellites if specified to be included.'''
# The name of the celestial body
name = celestial_body.name
# The attributes of the celestial body
attributes = Celestial_Body_Attributes()[name]
# Assign the factual attributes to the celestial body
for attribute in attributes.keys():
setattr(celestial_body, attribute, attributes[attribute])
pass
return None
def Celestial_Body_Attributes():
'''Returns all the attributes of a specified
celestial body. All units are SI.'''
cba = {
'Sun' : {'jplephem_index': (0, 10),
'mass': 1.989500e+24,
'diameter': 1392e+6
},
'Earth': {'aphelion': 152100000000.0,
'density': 5514.0,
'diameter': 12756000.0,
'distance_from_sun': 149600000000.0,
'escape_velocity': 11200.0,
'global_magnetic_field': True,
'gravity': 9.8,
'jplephem_index': (3, 399),
'length_of_day': 86400.0,
'mass': 5.969999999999999e+24,
'mean_temperature': 15.0,
'number_of_moons': 1,
'obliquity_to_orbit': 0.40840722,
'orbital_eccentricity': 0.017,
'orbital_inclination': 0.0,
'orbital_period': 31553280.0,
'orbital_velocity': 29800.0,
'perihelion': 147100000000.0,
'ring_system': False,
'rotation_period': 86040.0,
'surface_pressure': 100000.0},
'Jupiter': {'aphelion': 816600000000.0,
'density': 1326.0,
'diameter': 142984000.0,
'distance_from_sun': 778600000000.0,
'escape_velocity': 59500.0,
'global_magnetic_field': True,
'gravity': 23.1,
'jplephem_index': (0, 5),
'length_of_day': 35640.0,
'mass': 1.898e+27,
'mean_temperature': -110.0,
'number_of_moons': 67,
'obliquity_to_orbit': 0.054105230000000004,
'orbital_eccentricity': 0.049,
'orbital_inclination': 0.022689290000000004,
'orbital_period': 374198400.0,
'orbital_velocity': 13100.0,
'perihelion': 740500000000.0,
'ring_system': True,
'rotation_period': 35640.0,
'surface_pressure': None},
'Mars': {'aphelion': 249200000000.0,
'density': 3933.0,
'diameter': 6792000.0,
'distance_from_sun': 227900000000.0,
'escape_velocity': 5000.0,
'global_magnetic_field': False,
'gravity': 3.7,
'jplephem_index': (0, 4),
'length_of_day': 88920.0,
'mass': 6.42e+23,
'mean_temperature': -65.0,
'number_of_moons': 2,
'obliquity_to_orbit': 0.43982316000000005,
'orbital_eccentricity': 0.094,
'orbital_inclination': 0.03316127,
'orbital_period': 59356800.0,
'orbital_velocity': 24100.0,
'perihelion': 206600000000.0,
'ring_system': False,
'rotation_period': 88560.0,
'surface_pressure': 1000.0},
'Mercury': {'aphelion': 69800000000.0,
'density': 5427.0,
'diameter': 4879000.0,
'distance_from_sun': 57900000000.0,
'escape_velocity': 4300.0,
'global_magnetic_field': True,
'gravity': 3.7,
'jplephem_index': (1, 199),
'length_of_day': 15201360.000000002,
'mass': 3.3e+23,
'mean_temperature': 167.0,
'number_of_moons': 0,
'obliquity_to_orbit': 0.00017453300000000002,
'orbital_eccentricity': 0.205,
'orbital_inclination': 0.1221731,
'orbital_period': 7603200.0,
'orbital_velocity': 47400.0,
'perihelion': 46000000000.0,
'ring_system': False,
'rotation_period': 5067360.0,
'surface_pressure': 0.0},
'Moon': {'aphelion': 406000000.0,
'density': 3340.0,
'diameter': 3475000.0,
'distance_from_sun': 384000000.0,
'escape_velocity': 2400.0,
'global_magnetic_field': False,
'gravity': 1.6,
'jplephem_index': (3, 301),
'length_of_day': 2551320.0,
'mass': 7.3e+22,
'mean_temperature': -20.0,
'number_of_moons': 0,
'obliquity_to_orbit': 0.11693711000000001,
'orbital_eccentricity': 0.055,
'orbital_inclination': 0.08901183,
'orbital_period': 2358720.0,
'orbital_velocity': 1000.0,
'perihelion': 363000000.0,
'ring_system': False,
'rotation_period': 2360520.0,
'surface_pressure': 0.0},
'Neptune': {'aphelion': 4545700000000.0,
'density': 1638.0,
'diameter': 49528000.0,
'distance_from_sun': 4495100000000.0,
'escape_velocity': 23500.0,
'global_magnetic_field': True,
'gravity': 11.0,
'jplephem_index': (0, 8),
'length_of_day': 57960.00000000001,
'mass': 1.0199999999999999e+26,
'mean_temperature': -200.0,
'number_of_moons': 14,
'obliquity_to_orbit': 0.49392839000000005,
'orbital_eccentricity': 0.011,
'orbital_inclination': 0.03141594,
'orbital_period': 5166720000.0,
'orbital_velocity': 5400.0,
'perihelion': 4444500000000.0,
'ring_system': True,
'rotation_period': 57960.00000000001,
'surface_pressure': None},
'Pluto': {'aphelion': 7375900000000.0,
'density': 2095.0,
'diameter': 2370000.0,
'distance_from_sun': 5906400000000.0,
'escape_velocity': 1300.0,
'global_magnetic_field': True,
'gravity': 0.7,
'jplephem_index': (0, 9),
'length_of_day': 551880.0,
'mass': 1.46e+22,
'mean_temperature': -225.0,
'number_of_moons': 5,
'obliquity_to_orbit': 2.13802925,
'orbital_eccentricity': 0.244,
'orbital_inclination': 0.30019676,
'orbital_period': 7824384000.0,
'orbital_velocity': 4700.0,
'perihelion': 4436800000000.0,
'ring_system': False,
'rotation_period': -551880.0,
'surface_pressure': 0.0},
'Saturn': {'aphelion': 1514500000000.0,
'density': 687.0,
'diameter': 120536000.0,
'distance_from_sun': 1433500000000.0,
'escape_velocity': 35500.0,
'global_magnetic_field': True,
'gravity': 9.0,
'jplephem_index': (0, 6),
'length_of_day': 38520.0,
'mass': 5.68e+26,
'mean_temperature': -140.0,
'number_of_moons': 62,
'obliquity_to_orbit': 0.46600311000000005,
'orbital_eccentricity': 0.057,
'orbital_inclination': 0.043633250000000005,
'orbital_period': 928540800.0,
'orbital_velocity': 9700.0,
'perihelion': 1352600000000.0,
'ring_system': True,
'rotation_period': 38520.0,
'surface_pressure': None},
'Uranus': {'aphelion': 3003600000000.0,
'density': 1271.0,
'diameter': 51118000.0,
'distance_from_sun': 2872500000000.0,
'escape_velocity': 21300.0,
'global_magnetic_field': True,
'gravity': 8.7,
'jplephem_index': (0, 7),
'length_of_day': 61920.0,
'mass': 8.68e+25,
'mean_temperature': -195.0,
'number_of_moons': 27,
'obliquity_to_orbit': 1.70693274,
'orbital_eccentricity': 0.046,
'orbital_inclination': 0.013962640000000002,
'orbital_period': 2642889600.0,
'orbital_velocity': 6800.0,
'perihelion': 2741300000000.0,
'ring_system': True,
'rotation_period': -61920.0,
'surface_pressure': None},
'Venus': {'aphelion': 108900000000.0,
'density': 5243.0,
'diameter': 12104000.0,
'distance_from_sun': 108200000000.0,
'escape_velocity': 10400.0,
'global_magnetic_field': False,
'gravity': 8.9,
'jplephem_index': (2, 299),
'length_of_day': 10087200.0,
'mass': 4.87e+24,
'mean_temperature': 464.0,
'number_of_moons': 0,
'obliquity_to_orbit': 3.0962154200000005,
'orbital_eccentricity': 0.007,
'orbital_inclination': 0.05934122,
'orbital_period': 19414080.0,
'orbital_velocity': 35000.0,
'perihelion': 107500000000.0,
'ring_system': False,
'rotation_period': -20997000.0,
'surface_pressure': 9200000.0}}
return cba
def Position_and_Velocity_Celestial_Body(celestial_body, time):
# The JPL ephemeris index of the celestial body
jpli = celestial_body.jplephem_index
# Path to ephemeris file
path_ephem = Directory + '/Information/Celestial_Bodies/Ephemerides/de430.bsp'
# The ephemeris kernel
kernel = SPK.open(path_ephem)
# The position and velocity
pv = np.vstack(kernel[jpli].compute_and_differentiate(time))
# If the ephemeris was wrt to its local barcyentre
if not jpli[0] == 0:
# Compute barycentric position rather
pv = np.add(pv, np.vstack(
kernel[0, jpli[0]].compute_and_differentiate(time)))
pass
# Convert km to m
pv[0, :] = np.multiply(pv[0, :], 1e3)
# Convert km/day to m/s
pv[1, :] = np.multiply(pv[1, :], 0.0115741)
# Return a (2,3) numpy array
return pv
# Satellite Utilities
def Satellite_Dictionary(celestial_body_name):
'''Returns a dictionary of Satellite class instances
related to the celestial body.'''
# The name of the celestial body
name_cb = celestial_body_name
# Path to celestial body's satellites
path = Directory + '/Information/Celestial_Bodies/' + name_cb + '/Satellites'
# Initialise the dictionary
sat_dict = {}
# Scan the Satellites folder for type collections
for type_col in os.listdir(path):
# Initialise a sub dictionary for the type collection
sat_dict[type_col] = {}
# Descend into the collection
path1 = path + '/' + type_col
# Scan the type collection for satellites
for sat in os.listdir(path1):
# Initialise a sub dictionary for the spacecraft
sat_dict[type_col][sat] = {}
# Descend into satellite
path2 = path1 + '/' + sat
# Scan the satellite folder for TLE
for f_name in os.listdir(path2):
if f_name.endswith('.tle'):
path3 = path2 + '/' + f_name
# Read the TLE
TLE = Read_TLE(path3)
# Scan the TLE for objects
for obj in TLE.keys():
# First line of the object's TLE
line1 = TLE[obj]['line1']
# Second line of the object's TLE
line2 = TLE[obj]['line2']
# Instantiate a Satellite
sat_dict[type_col][sat][obj] = (line1, line2)
return sat_dict
def Read_TLE(file_path):
'''Generates a dictionary from a two line element file'''
# Make sure it is a TLE file
if not file_path.endswith('.tle'):
raise ValueError(
'The supplied file must be a two line element (.tle).')
# Initialise the dictionary
collection = {}
# Open the two line element file of specified file path
TLE = open(file_path)
# Initialise the counter to accomodate three line repetition
counter = 0
# For each line in the TLE file
for line in TLE:
# Clean the line up
line = line.rstrip('\n')
line = line.rstrip('\r')
# If the line is the header line
if counter == 0:
# Than this is the orbital body name
header = line.strip()
# Use underscore for versatility
header = header.replace(' ', '_')
header = header.replace('-', '_')
header = header.title()
# If the header is already used in dictionary
if header in collection.keys():
# Than subscripts must be added
# Add count to times header has been used
header_count += 1
# Add header count subscript to header name
header = header + '_' + str(header_count)
# Instantiate header's dictionary in collection
collection[header] = {}
# If the header has not been used yet
else:
collection[header] = {}
# Set the header counter back to zero
header_count = 0
counter += 1
elif counter == 1:
collection[header]['line1'] = line
counter += 1
elif counter == 2:
collection[header]['line2'] = line
# Reset the counter
counter -= 2
return collection
def Position_and_Velocity_Satellite(satellite, time):
# The first line of the TLE
line1 = satellite.line1
# The second line
line2 = satellite.line2
# Fictitious SGP4 satellite object
sat = twoline2rv(line1, line2, grav_const)
# Convert Julian date to conventional
time_conv = invjday(time)
# The position and velocity of the satellite
pv = np.asarray(sat.propagate(*time_conv))
# Convert km to m and km/s to m/s
pv = np.multiply(pv, 1e3)
# The attracting body ephemeris
PV = satellite.attracting_body.Position_and_Velocity(time)
# Convert from geocentric to barycentric frame
pv = np.add(pv, PV)
return pv
| |
from __future__ import print_function
from __future__ import division
import os
import sys
import time
from collections import deque
from json import dumps
from platform import python_version
from pprint import pformat
from signal import signal, SIGTERM, SIGUSR1, SIGTSTP, SIGCONT
from subprocess import Popen
from threading import Event, Thread
from syslog import syslog, LOG_ERR, LOG_INFO, LOG_WARNING
from traceback import extract_tb, format_tb, format_stack
import py3status.docstrings as docstrings
from py3status.command import CommandServer
from py3status.constants import COLOR_NAMES
from py3status.events import Events
from py3status.helpers import print_stderr
from py3status.i3status import I3status
from py3status.parse_config import process_config
from py3status.module import Module
from py3status.profiling import profile
from py3status.version import version
LOG_LEVELS = {'error': LOG_ERR, 'warning': LOG_WARNING, 'info': LOG_INFO, }
DBUS_LEVELS = {'error': 'critical', 'warning': 'normal', 'info': 'low', }
CONFIG_SPECIAL_SECTIONS = [
'.group_extras',
'.module_groups',
'general',
'i3s_modules',
'on_click',
'order',
'py3_modules',
'py3status',
]
class Runner(Thread):
"""
A Simple helper to run a module in a Thread so it is non-locking.
"""
def __init__(self, module, py3_wrapper, module_name):
Thread.__init__(self)
self.daemon = True
self.module = module
self.module_name = module_name
self.py3_wrapper = py3_wrapper
self.start()
def run(self):
try:
self.module.run()
except:
self.py3_wrapper.report_exception('Runner')
# the module is no longer running so notify the timeout logic
if self.module_name:
self.py3_wrapper.timeout_finished.append(self.module_name)
class NoneSetting:
"""
This class represents no setting in the config.
"""
# this attribute is used to identify that this is a none setting
none_setting = True
def __len__(self):
return 0
def __repr__(self):
# this is for output via module_test
return 'None'
class Task:
"""
A simple task that can be run by the scheduler.
"""
def run(self):
raise NotImplemented()
class CheckI3StatusThread(Task):
"""
Checks that the i3status thread is alive
"""
def __init__(self, i3status_thread, py3_wrapper):
self.i3status_thread = i3status_thread
self.timeout_queue_add = py3_wrapper.timeout_queue_add
self.notify_user = py3_wrapper.notify_user
def run(self):
# check i3status thread
if not self.i3status_thread.is_alive():
err = self.i3status_thread.error
if not err:
err = 'I3status died horribly.'
self.notify_user(err)
else:
# check again in 5 seconds
self.timeout_queue_add(self, int(time.time()) + 5)
class ModuleRunner(Task):
"""
Starts up a Module
"""
def __init__(self, module):
self.module = module
def run(self):
self.module.start_module()
class Common:
"""
This class is used to hold core functionality so that it can be shared more
easily. This allow us to run the module tests through the same code as
when we are running for real.
"""
def __init__(self, py3_wrapper):
self.py3_wrapper = py3_wrapper
self.none_setting = NoneSetting()
self.config = py3_wrapper.config
def get_config_attribute(self, name, attribute):
"""
Look for the attribute in the config. Start with the named module and
then walk up through any containing group and then try the general
section of the config.
"""
# A user can set a param to None in the config to prevent a param
# being used. This is important when modules do something like
#
# color = self.py3.COLOR_MUTED or self.py3.COLOR_BAD
config = self.config['py3_config']
param = config[name].get(attribute, self.none_setting)
if hasattr(param, 'none_setting') and name in config['.module_groups']:
for module in config['.module_groups'][name]:
if attribute in config.get(module, {}):
param = config[module].get(attribute)
break
if hasattr(param, 'none_setting'):
# check py3status config section
param = config['py3status'].get(attribute, self.none_setting)
if hasattr(param, 'none_setting'):
# check py3status general section
param = config['general'].get(attribute, self.none_setting)
if param and (attribute == 'color' or attribute.startswith('color_')):
if param[0] != '#':
# named color
param = COLOR_NAMES.get(param.lower(), self.none_setting)
elif len(param) == 4:
# This is a color like #123 convert it to #112233
param = (
'#' + param[1] + param[1] + param[2] +
param[2] + param[3] + param[3]
)
return param
def report_exception(self, msg, notify_user=True, level='error',
error_frame=None):
"""
Report details of an exception to the user.
This should only be called within an except: block Details of the
exception are reported eg filename, line number and exception type.
Because stack trace information outside of py3status or it's modules is
not helpful in actually finding and fixing the error, we try to locate
the first place that the exception affected our code.
Alternatively if the error occurs in a module via a Py3 call that
catches and reports the error then we receive an error_frame and use
that as the source of the error.
NOTE: msg should not end in a '.' for consistency.
"""
# Get list of paths that our stack trace should be found in.
py3_paths = [os.path.dirname(__file__)]
user_paths = self.config.get('include_paths', [])
py3_paths += [os.path.abspath(path) + '/' for path in user_paths]
traceback = None
try:
# We need to make sure to delete tb even if things go wrong.
exc_type, exc_obj, tb = sys.exc_info()
stack = extract_tb(tb)
error_str = '{}: {}\n'.format(exc_type.__name__, exc_obj)
traceback = [error_str]
if error_frame:
# The error occurred in a py3status module so the traceback
# should be made to appear correct. We caught the exception
# but make it look as though we did not.
traceback += format_stack(error_frame, 1) + format_tb(tb)
filename = os.path.basename(error_frame.f_code.co_filename)
line_no = error_frame.f_lineno
else:
# This is a none module based error
traceback += format_tb(tb)
# Find first relevant trace in the stack.
# it should be in py3status or one of it's modules.
found = False
for item in reversed(stack):
filename = item[0]
for path in py3_paths:
if filename.startswith(path):
# Found a good trace
filename = os.path.basename(item[0])
line_no = item[1]
found = True
break
if found:
break
# all done! create our message.
msg = '{} ({}) {} line {}.'.format(
msg, exc_type.__name__, filename, line_no)
except:
# something went wrong report what we can.
msg = '{}.'.format(msg)
finally:
# delete tb!
del tb
# log the exception and notify user
self.py3_wrapper.log(msg, 'warning')
if traceback:
# if debug is not in the config then we are at an early stage of
# running py3status and logging is not yet available so output the
# error to STDERR so it can be seen
if 'debug' not in self.config:
print_stderr('\n'.join(traceback))
elif self.config.get('log_file'):
self.py3_wrapper.log(''.join(['Traceback\n'] + traceback))
if notify_user:
self.py3_wrapper.notify_user(msg, level=level)
class Py3statusWrapper:
"""
This is the py3status wrapper.
"""
def __init__(self, options):
"""
Useful variables we'll need.
"""
self.config = {}
self.i3bar_running = True
self.last_refresh_ts = time.time()
self.lock = Event()
self.modules = {}
self.notified_messages = set()
self.options = options
self.output_modules = {}
self.py3_modules = []
self.running = True
self.update_queue = deque()
self.update_request = Event()
# shared code
self.common = Common(self)
self.get_config_attribute = self.common.get_config_attribute
self.report_exception = self.common.report_exception
# these are used to schedule module updates
self.timeout_add_queue = deque()
self.timeout_due = None
self.timeout_finished = deque()
self.timeout_keys = []
self.timeout_missed = {}
self.timeout_queue = {}
self.timeout_queue_lookup = {}
self.timeout_running = set()
self.timeout_update_due = deque()
def timeout_queue_add(self, item, cache_time=0):
"""
Add a item to be run at a future time.
This must be a Module, I3statusModule or a Task
"""
# add the info to the add queue. We do this so that actually adding
# the module is done in the core thread.
self.timeout_add_queue.append((item, cache_time))
# if the timeout_add_queue is not due to be processed until after this
# update request is due then trigger an update now.
if self.timeout_due is None or cache_time < self.timeout_due:
self.update_request.set()
def timeout_process_add_queue(self, module, cache_time):
"""
Add a module to the timeout_queue if it is scheduled in the future or
if it is due for an update immediately just trigger that.
the timeout_queue is a dict with the scheduled time as the key and the
value is a list of module instance names due to be updated at that
point. An ordered list of keys is kept to allow easy checking of when
updates are due. A list is also kept of which modules are in the
update_queue to save having to search for modules in it unless needed.
"""
# If already set to update do nothing
if module in self.timeout_update_due:
return
# remove if already in the queue
key = self.timeout_queue_lookup.get(module)
if key:
queue_item = self.timeout_queue[key]
queue_item.remove(module)
if not queue_item:
del self.timeout_queue[key]
self.timeout_keys.remove(key)
if cache_time == 0:
# if cache_time is 0 we can just trigger the module update
self.timeout_update_due.append(module)
self.timeout_queue_lookup[module] = None
else:
# add the module to the timeout queue
if cache_time not in self.timeout_keys:
self.timeout_queue[cache_time] = set([module])
self.timeout_keys.append(cache_time)
# sort keys so earliest is first
self.timeout_keys.sort()
# when is next timeout due?
try:
self.timeout_due = self.timeout_keys[0]
except IndexError:
self.timeout_due = None
else:
self.timeout_queue[cache_time].add(module)
# note that the module is in the timeout_queue
self.timeout_queue_lookup[module] = cache_time
def timeout_queue_process(self):
"""
Check the timeout_queue and set any due modules to update.
"""
# process any items that need adding to the queue
while self.timeout_add_queue:
self.timeout_process_add_queue(*self.timeout_add_queue.popleft())
now = time.time()
due_timeouts = []
# find any due timeouts
for timeout in self.timeout_keys:
if timeout > now:
break
due_timeouts.append(timeout)
if due_timeouts:
# process them
for timeout in due_timeouts:
modules = self.timeout_queue[timeout]
# remove from the queue
del self.timeout_queue[timeout]
self.timeout_keys.remove(timeout)
for module in modules:
# module no longer in queue
del self.timeout_queue_lookup[module]
# tell module to update
self.timeout_update_due.append(module)
# when is next timeout due?
try:
self.timeout_due = self.timeout_keys[0]
except IndexError:
self.timeout_due = None
# process any finished modules.
# Now that the module has finished running it may have been marked to
# be triggered again. This is most likely to happen when events are
# being processed and the events are arriving much faster than the
# module can handle them. It is important as a module may handle
# events but not trigger the module update. If during the event the
# module is due to update the update is not actioned but it needs to be
# once the events have finished or else the module will no longer
# continue to update.
while self.timeout_finished:
module_name = self.timeout_finished.popleft()
self.timeout_running.discard(module_name)
if module_name in self.timeout_missed:
module = self.timeout_missed.pop(module_name)
self.timeout_update_due.append(module)
# run any modules that are due
while self.timeout_update_due:
module = self.timeout_update_due.popleft()
module_name = getattr(module, 'module_full_name', None)
# if the module is running then we do not want to trigger it but
# instead wait till it has finished running and then trigger
if module_name and module_name in self.timeout_running:
self.timeout_missed[module_name] = module
else:
self.timeout_running.add(module_name)
Runner(module, self, module_name)
# we return how long till we next need to process the timeout_queue
if self.timeout_due is not None:
return self.timeout_due - time.time()
def get_config(self):
"""
Create the py3status based on command line options we received.
"""
# get home path
home_path = os.path.expanduser('~')
# defaults
config = {
'minimum_interval': 0.1, # minimum module update interval
}
# include path to search for user modules
config['include_paths'] = [
'{}/.i3/py3status/'.format(home_path),
'{}/i3status/py3status'.format(os.environ.get(
'XDG_CONFIG_HOME', '{}/.config'.format(home_path))),
'{}/i3/py3status'.format(os.environ.get(
'XDG_CONFIG_HOME', '{}/.config'.format(home_path))),
]
config['version'] = version
# override configuration and helper variables
options = self.options
config['cache_timeout'] = options.cache_timeout
config['debug'] = options.debug
config['dbus_notify'] = options.dbus_notify
config['gevent'] = options.gevent
if options.include_paths:
config['include_paths'] = options.include_paths
# FIXME we allow giving interval as a float and then make it an int!
config['interval'] = int(options.interval)
config['log_file'] = options.log_file
config['standalone'] = options.standalone
config['i3status_config_path'] = options.i3status_conf
if options.cli_command:
config['cli_command'] = options.cli_command
return config
def gevent_monkey_patch_report(self):
"""
Report effective gevent monkey patching on the logs.
"""
try:
import gevent.socket
import socket
if gevent.socket.socket is socket.socket:
self.log('gevent monkey patching is active')
else:
self.notify_user('gevent monkey patching failed.')
except ImportError:
self.notify_user(
'gevent is not installed, monkey patching failed.')
def get_user_modules(self):
"""
Search configured include directories for user provided modules.
user_modules: {
'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py')
}
"""
user_modules = {}
for include_path in self.config['include_paths']:
include_path = os.path.abspath(include_path) + '/'
if not os.path.isdir(include_path):
continue
for f_name in sorted(os.listdir(include_path)):
if not f_name.endswith('.py'):
continue
module_name = f_name[:-3]
# do not overwrite modules if already found
if module_name in user_modules:
pass
user_modules[module_name] = (include_path, f_name)
return user_modules
def get_user_configured_modules(self):
"""
Get a dict of all available and configured py3status modules
in the user's i3status.conf.
"""
user_modules = {}
if not self.py3_modules:
return user_modules
for module_name, module_info in self.get_user_modules().items():
for module in self.py3_modules:
if module_name == module.split(' ')[0]:
include_path, f_name = module_info
user_modules[module_name] = (include_path, f_name)
return user_modules
def load_modules(self, modules_list, user_modules):
"""
Load the given modules from the list (contains instance name) with
respect to the user provided modules dict.
modules_list: ['weather_yahoo paris', 'net_rate']
user_modules: {
'weather_yahoo': ('/etc/py3status.d/', 'weather_yahoo.py')
}
"""
for module in modules_list:
# ignore already provided modules (prevents double inclusion)
if module in self.modules:
continue
try:
my_m = Module(module, user_modules, self)
# only handle modules with available methods
if my_m.methods:
self.modules[module] = my_m
elif self.config['debug']:
self.log(
'ignoring module "{}" (no methods found)'.format(
module))
except Exception:
err = sys.exc_info()[1]
msg = 'Loading module "{}" failed ({}).'.format(module, err)
self.report_exception(msg, level='warning')
def setup(self):
"""
Setup py3status and spawn i3status/events/modules threads.
"""
# SIGTSTP will be received from i3bar indicating that all output should
# stop and we should consider py3status suspended. It is however
# important that any processes using i3 ipc should continue to receive
# those events otherwise it can lead to a stall in i3.
signal(SIGTSTP, self.i3bar_stop)
# SIGCONT indicates output should be resumed.
signal(SIGCONT, self.i3bar_start)
# update configuration
self.config.update(self.get_config())
if self.config.get('cli_command'):
self.handle_cli_command(self.config)
sys.exit()
# logging functionality now available
# log py3status and python versions
self.log('=' * 8)
self.log('Starting py3status version {} python {}'.format(
self.config['version'], python_version())
)
try:
# if running from git then log the branch and last commit
# we do this by looking in the .git directory
git_path = os.path.join(os.path.dirname(__file__), '..', '.git')
# branch
with open(os.path.join(git_path, 'HEAD'), 'r') as f:
out = f.readline()
branch = '/'.join(out.strip().split('/')[2:])
self.log('git branch: {}'.format(branch))
# last commit
log_path = os.path.join(git_path, 'logs', 'refs', 'heads', branch)
with open(log_path, 'r') as f:
out = f.readlines()[-1]
sha = out.split(' ')[1][:7]
msg = ':'.join(out.strip().split('\t')[-1].split(':')[1:])
self.log('git commit: {}{}'.format(sha, msg))
except:
pass
if self.config['debug']:
self.log(
'py3status started with config {}'.format(self.config))
if self.config['gevent']:
self.gevent_monkey_patch_report()
# read i3status.conf
config_path = self.config['i3status_config_path']
self.config['py3_config'] = process_config(config_path, self)
# setup i3status thread
self.i3status_thread = I3status(self)
# If standalone or no i3status modules then use the mock i3status
# else start i3status thread.
i3s_modules = self.config['py3_config']['i3s_modules']
if self.config['standalone'] or not i3s_modules:
self.i3status_thread.mock()
i3s_mode = 'mocked'
else:
i3s_mode = 'started'
self.i3status_thread.start()
while not self.i3status_thread.ready:
if not self.i3status_thread.is_alive():
# i3status is having a bad day, so tell the user what went
# wrong and do the best we can with just py3status modules.
err = self.i3status_thread.error
self.notify_user(err)
self.i3status_thread.mock()
i3s_mode = 'mocked'
break
time.sleep(0.1)
if self.config['debug']:
self.log('i3status thread {} with config {}'.format(
i3s_mode, self.config['py3_config']))
# add i3status thread monitoring task
if i3s_mode == 'started':
task = CheckI3StatusThread(self.i3status_thread, self)
self.timeout_queue_add(task)
# setup input events thread
self.events_thread = Events(self)
self.events_thread.daemon = True
self.events_thread.start()
if self.config['debug']:
self.log('events thread started')
# initialise the command server
self.commands_thread = CommandServer(self)
self.commands_thread.daemon = True
self.commands_thread.start()
if self.config['debug']:
self.log('commands thread started')
# suppress modules' ouput wrt issue #20
if not self.config['debug']:
sys.stdout = open('/dev/null', 'w')
sys.stderr = open('/dev/null', 'w')
# get the list of py3status configured modules
self.py3_modules = self.config['py3_config']['py3_modules']
# get a dict of all user provided modules
user_modules = self.get_user_configured_modules()
if self.config['debug']:
self.log('user_modules={}'.format(user_modules))
if self.py3_modules:
# load and spawn i3status.conf configured modules threads
self.load_modules(self.py3_modules, user_modules)
def notify_user(self, msg, level='error', rate_limit=None, module_name=''):
"""
Display notification to user via i3-nagbar or send-notify
We also make sure to log anything to keep trace of it.
NOTE: Message should end with a '.' for consistency.
"""
dbus = self.config.get('dbus_notify')
if dbus:
# force msg to be a string
msg = u'{}'.format(msg)
else:
msg = u'py3status: {}'.format(msg)
if level != 'info' and module_name == '':
fix_msg = u'{} Please try to fix this and reload i3wm (Mod+Shift+R)'
msg = fix_msg.format(msg)
# Rate limiting. If rate limiting then we need to calculate the time
# period for which the message should not be repeated. We just use
# A simple chunked time model where a message cannot be repeated in a
# given time period. Messages can be repeated more frequently but must
# be in different time periods.
limit_key = ''
if rate_limit:
try:
limit_key = time.time() // rate_limit
except TypeError:
pass
# We use a hash to see if the message is being repeated. This is crude
# and imperfect but should work for our needs.
msg_hash = hash(u'{}#{}#{}'.format(module_name, limit_key, msg))
if msg_hash in self.notified_messages:
return
elif module_name:
log_msg = 'Module `%s` sent a notification. "%s"' % (module_name, msg)
self.log(log_msg, level)
else:
self.log(msg, level)
self.notified_messages.add(msg_hash)
try:
if dbus:
# fix any html entities
msg = msg.replace('&', '&')
msg = msg.replace('<', '<')
msg = msg.replace('>', '>')
cmd = ['notify-send', '-u', DBUS_LEVELS.get(level, 'normal'),
'-t', '10000', 'py3status', msg]
else:
py3_config = self.config.get('py3_config', {})
nagbar_font = py3_config.get('py3status', {}).get('nagbar_font')
if nagbar_font:
cmd = ['i3-nagbar', '-f', nagbar_font, '-m', msg, '-t', level]
else:
cmd = ['i3-nagbar', '-m', msg, '-t', level]
Popen(cmd,
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'))
except Exception as err:
self.log('notify_user error: %s' % err)
def stop(self):
"""
Set the Event lock, this will break all threads' loops.
"""
self.running = False
# stop the command server
try:
self.commands_thread.kill()
except:
pass
try:
self.lock.set()
if self.config['debug']:
self.log('lock set, exiting')
# run kill() method on all py3status modules
for module in self.modules.values():
module.kill()
except:
pass
def refresh_modules(self, module_string=None, exact=True):
"""
Update modules.
if module_string is None all modules are refreshed
if module_string then modules with the exact name or those starting
with the given string depending on exact parameter will be refreshed.
If a module is an i3status one then we refresh i3status.
To prevent abuse, we rate limit this function to 100ms for full
refreshes.
"""
if not module_string:
if time.time() > (self.last_refresh_ts + 0.1):
self.last_refresh_ts = time.time()
else:
# rate limiting
return
update_i3status = False
for name, module in self.output_modules.items():
if (module_string is None or
(exact and name == module_string) or
(not exact and name.startswith(module_string))):
if module['type'] == 'py3status':
if self.config['debug']:
self.log('refresh py3status module {}'.format(name))
module['module'].force_update()
else:
if self.config['debug']:
self.log('refresh i3status module {}'.format(name))
update_i3status = True
if update_i3status:
self.i3status_thread.refresh_i3status()
def sig_handler(self, signum, frame):
"""
SIGUSR1 was received, the user asks for an immediate refresh of the bar
"""
self.log('received USR1')
self.refresh_modules()
def terminate(self, signum, frame):
"""
Received request to terminate (SIGTERM), exit nicely.
"""
raise KeyboardInterrupt()
def purge_module(self, module_name):
"""
A module has been removed e.g. a module that had an error.
We need to find any containers and remove the module from them.
"""
containers = self.config['py3_config']['.module_groups']
containers_to_update = set()
if module_name in containers:
containers_to_update.update(set(containers[module_name]))
for container in containers_to_update:
try:
self.modules[container].module_class.items.remove(module_name)
except ValueError:
pass
def notify_update(self, update, urgent=False):
"""
Name or list of names of modules that have updated.
"""
if not isinstance(update, list):
update = [update]
self.update_queue.extend(update)
# find containers that use the modules that updated
containers = self.config['py3_config']['.module_groups']
containers_to_update = set()
for item in update:
if item in containers:
containers_to_update.update(set(containers[item]))
# force containers to update
for container in containers_to_update:
container_module = self.output_modules.get(container)
if container_module:
# If the container registered a urgent_function then call it
# if this update is urgent.
if urgent and container_module.get('urgent_function'):
container_module['urgent_function'](update)
# If a container has registered a content_function we use that
# to see if the container needs to be updated.
# We only need to update containers if their active content has
# changed.
if container_module.get('content_function'):
if set(update) & container_module['content_function']():
container_module['module'].force_update()
else:
# we don't know so just update.
container_module['module'].force_update()
# we need to update the output
if self.update_queue:
self.update_request.set()
def log(self, msg, level='info'):
"""
log this information to syslog or user provided logfile.
"""
if not self.config.get('log_file'):
# If level was given as a str then convert to actual level
level = LOG_LEVELS.get(level, level)
syslog(level, u'{}'.format(msg))
else:
# Binary mode so fs encoding setting is not an issue
with open(self.config['log_file'], 'ab') as f:
log_time = time.strftime("%Y-%m-%d %H:%M:%S")
# nice formating of data structures using pretty print
if isinstance(msg, (dict, list, set, tuple)):
msg = pformat(msg)
# if multiline then start the data output on a fresh line
# to aid readability.
if '\n' in msg:
msg = u'\n' + msg
out = u'{} {} {}\n'.format(log_time, level.upper(), msg)
try:
# Encode unicode strings to bytes
f.write(out.encode('utf-8'))
except (AttributeError, UnicodeDecodeError):
# Write any byte strings straight to log
f.write(out)
def create_output_modules(self):
"""
Setup our output modules to allow easy updating of py3modules and
i3status modules allows the same module to be used multiple times.
"""
py3_config = self.config['py3_config']
i3modules = self.i3status_thread.i3modules
output_modules = self.output_modules
# position in the bar of the modules
positions = {}
for index, name in enumerate(py3_config['order']):
if name not in positions:
positions[name] = []
positions[name].append(index)
# py3status modules
for name in self.modules:
if name not in output_modules:
output_modules[name] = {}
output_modules[name]['position'] = positions.get(name, [])
output_modules[name]['module'] = self.modules[name]
output_modules[name]['type'] = 'py3status'
output_modules[name]['color'] = self.mappings_color.get(name)
# i3status modules
for name in i3modules:
if name not in output_modules:
output_modules[name] = {}
output_modules[name]['position'] = positions.get(name, [])
output_modules[name]['module'] = i3modules[name]
output_modules[name]['type'] = 'i3status'
output_modules[name]['color'] = self.mappings_color.get(name)
self.output_modules = output_modules
def create_mappings(self, config):
"""
Create any mappings needed for global substitutions eg. colors
"""
mappings = {}
for name, cfg in config.items():
# Ignore special config sections.
if name in CONFIG_SPECIAL_SECTIONS:
continue
color = self.get_config_attribute(name, 'color')
if hasattr(color, 'none_setting'):
color = None
mappings[name] = color
# Store mappings for later use.
self.mappings_color = mappings
def process_module_output(self, module):
"""
Process the output for a module and return a json string representing it.
Color processing occurs here.
"""
outputs = module['module'].get_latest()
color = module['color']
if color:
for output in outputs:
# Color: substitute the config defined color
if 'color' not in output:
output['color'] = color
# Create the json string output.
return ','.join([dumps(x) for x in outputs])
def i3bar_stop(self, signum, frame):
self.i3bar_running = False
# i3status should be stopped
self.i3status_thread.suspend_i3status()
self.sleep_modules()
def i3bar_start(self, signum, frame):
self.i3bar_running = True
self.wake_modules()
def sleep_modules(self):
# Put all py3modules to sleep so they stop updating
for module in self.output_modules.values():
if module['type'] == 'py3status':
module['module'].sleep()
def wake_modules(self):
# Wake up all py3modules.
for module in self.output_modules.values():
if module['type'] == 'py3status':
module['module'].wake()
@profile
def run(self):
"""
Main py3status loop, continuously read from i3status and modules
and output it to i3bar for displaying.
"""
# SIGUSR1 forces a refresh of the bar both for py3status and i3status,
# this mimics the USR1 signal handling of i3status (see man i3status)
signal(SIGUSR1, self.sig_handler)
signal(SIGTERM, self.terminate)
# initialize usage variables
py3_config = self.config['py3_config']
# prepare the color mappings
self.create_mappings(py3_config)
# self.output_modules needs to have been created before modules are
# started. This is so that modules can do things like register their
# content_function.
self.create_output_modules()
# start up all our modules
for module in self.modules.values():
task = ModuleRunner(module)
self.timeout_queue_add(task)
# this will be our output set to the correct length for the number of
# items in the bar
output = [None] * len(py3_config['order'])
write = sys.__stdout__.write
flush = sys.__stdout__.flush
# start our output
header = {
'version': 1,
'click_events': True,
'stop_signal': SIGTSTP
}
write(dumps(header))
write('\n[[]\n')
update_due = None
# main loop
while True:
# process the timeout_queue and get interval till next update due
update_due = self.timeout_queue_process()
# wait until an update is requested
if self.update_request.wait(timeout=update_due):
# event was set so clear it
self.update_request.clear()
while not self.i3bar_running:
time.sleep(0.1)
# check if an update is needed
if self.update_queue:
while (len(self.update_queue)):
module_name = self.update_queue.popleft()
module = self.output_modules[module_name]
out = self.process_module_output(module)
for index in module['position']:
# store the output as json
output[index] = out
# build output string
out = ','.join([x for x in output if x])
# dump the line to stdout
write(',[{}]\n'.format(out))
flush()
def handle_cli_command(self, config):
"""Handle a command from the CLI.
"""
cmd = config['cli_command']
# aliases
if cmd[0] in ['mod', 'module', 'modules']:
cmd[0] = 'modules'
# allowed cli commands
if cmd[:2] in (['modules', 'list'], ['modules', 'details']):
docstrings.show_modules(config, cmd[1:])
# docstring formatting and checking
elif cmd[:2] in (['docstring', 'check'], ['docstring', 'update']):
if cmd[1] == 'check':
show_diff = len(cmd) > 2 and cmd[2] == 'diff'
if show_diff:
mods = cmd[3:]
else:
mods = cmd[2:]
docstrings.check_docstrings(show_diff, config, mods)
if cmd[1] == 'update':
if len(cmd) < 3:
print_stderr('Error: you must specify what to update')
sys.exit(1)
if cmd[2] == 'modules':
docstrings.update_docstrings()
else:
docstrings.update_readme_for_modules(cmd[2:])
elif cmd[:2] in (['modules', 'enable'], ['modules', 'disable']):
# TODO: to be implemented
pass
else:
print_stderr('Error: unknown command')
sys.exit(1)
| |
#!/usr/bin/env python3
import time
import random
import socket
from flask import Flask, render_template, redirect, url_for, request, jsonify
import config
log = None
# classes
class Agent():
def __init__(self, ip, cw=True, node=None, state='initial'):
self.ip = ip
self.cw = cw
self.state = state
self.node = node
def __repr__(self):
return 'Agent: ip {}, direction CW: {}, state: {}, node: {}'.format(self.ip, self.cw, self.state, self.node)
class Node():
def __init__(self, label):
assert isinstance(label, int), 'Node constructor accepts numeric label only'
self.label = label
# list of agent ips in the current node
self.agents = []
def add_agent(self, agent_ip):
# add an agent ip to the list of agents in the current node
self.agents.append(agent_ip)
def __repr__(self):
return '<Node {}: [{}]>'.format(self.label, ' | '.join(str(app.agents[ip]) for ip in self.agents))
class Ring():
def __init__(self, n_nodes):
self._nodes = [Node(i) for i in range(n_nodes)]
self.n_nodes = n_nodes
def get_node(self, label):
return self._nodes[label]
def next(self, agent):
"""Return next node."""
i = 1 if agent.cw else -1
return self._nodes[(agent.node+i) % self.n_nodes]
def prev(self, agent):
"""Return prev node."""
i = -1 if agent.cw else 1
return self._nodes[(agent.node+i) % self.n_nodes]
def blocked(self, agent):
"""Check if the next node is blocked."""
next_node = self.next(agent)
if agent.ip == app.malicious_ip:
return len(next_node.agents) > 0
else:
return app.malicious_ip in next_node.agents
def random_place_agents(self):
"""Randomly place agents in the ring."""
#a = app.agents[app.agents_ips[0]]
#a.node = 1
#self.get_node(1).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[1]]
#a.node = 2
#self.get_node(2).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[2]]
#a.node = 4
#self.get_node(4).add_agent(a.ip)
#a.cw = True
#a = app.agents[app.malicious_ip]
#a.node = 6
#self.get_node(6).add_agent(a.ip)
#a.cw = True
# True = clockwise
# False = counterclockwise
a = app.agents[app.agents_ips[0]]
a.node = 3
self.get_node(3).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[1]]
a.node = 6
self.get_node(6).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[2]]
a.node = 5
self.get_node(5).add_agent(a.ip)
a.cw = True
a = app.agents[app.malicious_ip]
a.node = 1
self.get_node(1).add_agent(a.ip)
a.cw = False
return
# at most 1 agent per node, randomize direction in case of unoriented ring
for agent, node in zip(app.agents.values(), random.sample(self._nodes, len(app.agents.keys()))):
agent.cw = True if config.oriented else random.choice([True, False])
agent.node = node.label
self.get_node(node.label).add_agent(agent.ip)
def dump(self):
ring = dict()
for node in self._nodes:
ring[str(node.label)] = [(app.agents[a].ip, str(app.agents[a].cw), app.agents[a].state, app.agents[a].node) for a in node.agents]
return ring
def __repr__(self):
return ', '.join(str(node) for node in self._nodes)
class MTFGRServer(Flask):
'''Wrapper around the Flask class used to store additional information.'''
def __init__(self, *args, **kwargs):
super(MTFGRServer, self).__init__(*args, **kwargs)
self.ring = Ring(config.n_nodes)
self.agents_ips = config.agents_ips
self.agents = dict()
self.malicious_ip = config.malicious_ip
self.oriented = config.oriented
self.started = False
# instance of the web application
app = MTFGRServer(__name__)
# auxiliary functions
def _reset():
"""Reset the global variables by parsing again the config file."""
import config
global log
app.ring = Ring(config.n_nodes)
app.agents = {ip: Agent(ip) for ip in config.agents_ips}
app.malicious_ip = config.malicious_ip
app.agents[app.malicious_ip] = Agent(app.malicious_ip, state='malicious')
app.oriented = config.oriented
app.started = False
app.ring.random_place_agents()
log = open('/tmp/ev3.log', 'a')
log.write('\n\nIIIIIIIIIINNNNNNNNNIIIIIIIIIIITTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\n')
# views
def _communicate_start():
"""Instruct each bot to start."""
port = 31337
for ip in app.agents_ips[::-1] + [app.malicious_ip]:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# s.sendall(b'Go!\n')
s.close()
@app.route('/start')
def start():
app.started = True
try:
_communicate_start()
except Exception:
pass
return redirect(url_for('index'))
@app.route('/reset')
def reset():
_reset()
return redirect(url_for('index'))
@app.route('/status')
def global_status():
"""Get the whole ring status."""
return jsonify(**app.ring.dump())
@app.route('/get/<agent_ip>')
def get_status(agent_ip):
"""Get the list of agents in the current node."""
agent = app.agents[agent_ip]
# aggiungere blocked
return jsonify(agents=[app.agents[ip].state for ip in app.ring.get_node(agent.node).agents if ip != agent_ip],
blocked=app.ring.blocked(agent))
@app.route('/set/<agent_ip>', methods=['GET'])
def set_status(agent_ip):
global log
turned = request.args.get('turned') == '1'
state = request.args.get('state')
stopped = request.args.get('stopped') == '1'
# logging
sss = '\n\n[Request] {} - ip: {}, turned: {}, state: {}, stopped: {}\n'.format(time.time(), agent_ip, turned, state, stopped)
log.write(sss)
log.write('[Status pre]\n')
log.write(str(app.ring.dump()))
agent = app.agents[agent_ip]
agent.state = state
agent.cw = agent.cw if not turned else not agent.cw
blocked = app.ring.blocked(agent)
if not blocked and not stopped:
# advance to the next node if not blocked
node = app.ring.get_node(agent.node)
next_node = app.ring.next(agent)
agent.node = next_node.label
node.agents.remove(agent_ip)
next_node.add_agent(agent_ip)
log.write('\n[Status post]\n')
log.write(str(app.ring.dump()))
return jsonify(blocked=blocked)
@app.route('/')
def index():
return render_template('base.html', started=app.started)
def main():
app.run(host='0.0.0.0', debug=config.debug)
if __name__ == '__main__':
main()
| |
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
try:
import unittest2 as unittest
except ImportError:
import unittest
import dns.exception
import dns.tokenizer
Token = dns.tokenizer.Token
class TokenizerTestCase(unittest.TestCase):
def testStr(self):
tok = dns.tokenizer.Tokenizer('foo')
token = tok.get()
self.failUnless(token == Token(dns.tokenizer.IDENTIFIER, 'foo'))
def testUnicode(self):
tok = dns.tokenizer.Tokenizer(u'foo')
token = tok.get()
self.failUnless(token == Token(dns.tokenizer.IDENTIFIER, 'foo'))
def testQuotedString1(self):
tok = dns.tokenizer.Tokenizer(r'"foo"')
token = tok.get()
self.failUnless(token == Token(dns.tokenizer.QUOTED_STRING, 'foo'))
def testQuotedString2(self):
tok = dns.tokenizer.Tokenizer(r'""')
token = tok.get()
self.failUnless(token == Token(dns.tokenizer.QUOTED_STRING, ''))
def testQuotedString3(self):
tok = dns.tokenizer.Tokenizer(r'"\"foo\""')
token = tok.get()
self.failUnless(token == Token(dns.tokenizer.QUOTED_STRING, '"foo"'))
def testQuotedString4(self):
tok = dns.tokenizer.Tokenizer(r'"foo\010bar"')
token = tok.get()
self.failUnless(token == Token(dns.tokenizer.QUOTED_STRING, 'foo\x0abar'))
def testQuotedString5(self):
def bad():
tok = dns.tokenizer.Tokenizer(r'"foo')
tok.get()
self.failUnlessRaises(dns.exception.UnexpectedEnd, bad)
def testQuotedString6(self):
def bad():
tok = dns.tokenizer.Tokenizer(r'"foo\01')
tok.get()
self.failUnlessRaises(dns.exception.SyntaxError, bad)
def testQuotedString7(self):
def bad():
tok = dns.tokenizer.Tokenizer('"foo\nbar"')
tok.get()
self.failUnlessRaises(dns.exception.SyntaxError, bad)
def testEmpty1(self):
tok = dns.tokenizer.Tokenizer('')
token = tok.get()
self.failUnless(token.is_eof())
def testEmpty2(self):
tok = dns.tokenizer.Tokenizer('')
token1 = tok.get()
token2 = tok.get()
self.failUnless(token1.is_eof() and token2.is_eof())
def testEOL(self):
tok = dns.tokenizer.Tokenizer('\n')
token1 = tok.get()
token2 = tok.get()
self.failUnless(token1.is_eol() and token2.is_eof())
def testWS1(self):
tok = dns.tokenizer.Tokenizer(' \n')
token1 = tok.get()
self.failUnless(token1.is_eol())
def testWS2(self):
tok = dns.tokenizer.Tokenizer(' \n')
token1 = tok.get(want_leading=True)
self.failUnless(token1.is_whitespace())
def testComment1(self):
tok = dns.tokenizer.Tokenizer(' ;foo\n')
token1 = tok.get()
self.failUnless(token1.is_eol())
def testComment2(self):
tok = dns.tokenizer.Tokenizer(' ;foo\n')
token1 = tok.get(want_comment=True)
token2 = tok.get()
self.failUnless(token1 == Token(dns.tokenizer.COMMENT, 'foo') and
token2.is_eol())
def testComment3(self):
tok = dns.tokenizer.Tokenizer(' ;foo bar\n')
token1 = tok.get(want_comment=True)
token2 = tok.get()
self.failUnless(token1 == Token(dns.tokenizer.COMMENT, 'foo bar') and
token2.is_eol())
def testMultiline1(self):
tok = dns.tokenizer.Tokenizer('( foo\n\n bar\n)')
tokens = list(iter(tok))
self.failUnless(tokens == [Token(dns.tokenizer.IDENTIFIER, 'foo'),
Token(dns.tokenizer.IDENTIFIER, 'bar')])
def testMultiline2(self):
tok = dns.tokenizer.Tokenizer('( foo\n\n bar\n)\n')
tokens = list(iter(tok))
self.failUnless(tokens == [Token(dns.tokenizer.IDENTIFIER, 'foo'),
Token(dns.tokenizer.IDENTIFIER, 'bar'),
Token(dns.tokenizer.EOL, '\n')])
def testMultiline3(self):
def bad():
tok = dns.tokenizer.Tokenizer('foo)')
list(iter(tok))
self.failUnlessRaises(dns.exception.SyntaxError, bad)
def testMultiline4(self):
def bad():
tok = dns.tokenizer.Tokenizer('((foo)')
list(iter(tok))
self.failUnlessRaises(dns.exception.SyntaxError, bad)
def testUnget1(self):
tok = dns.tokenizer.Tokenizer('foo')
t1 = tok.get()
tok.unget(t1)
t2 = tok.get()
self.failUnless(t1 == t2 and t1.ttype == dns.tokenizer.IDENTIFIER and \
t1.value == 'foo')
def testUnget2(self):
def bad():
tok = dns.tokenizer.Tokenizer('foo')
t1 = tok.get()
tok.unget(t1)
tok.unget(t1)
self.failUnlessRaises(dns.tokenizer.UngetBufferFull, bad)
def testGetEOL1(self):
tok = dns.tokenizer.Tokenizer('\n')
t = tok.get_eol()
self.failUnless(t == '\n')
def testGetEOL2(self):
tok = dns.tokenizer.Tokenizer('')
t = tok.get_eol()
self.failUnless(t == '')
def testEscapedDelimiter1(self):
tok = dns.tokenizer.Tokenizer(r'ch\ ld')
t = tok.get()
self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'ch\ ld')
def testEscapedDelimiter2(self):
tok = dns.tokenizer.Tokenizer(r'ch\032ld')
t = tok.get()
self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'ch\032ld')
def testEscapedDelimiter3(self):
tok = dns.tokenizer.Tokenizer(r'ch\ild')
t = tok.get()
self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'ch\ild')
def testEscapedDelimiter1u(self):
tok = dns.tokenizer.Tokenizer(r'ch\ ld')
t = tok.get().unescape()
self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'ch ld')
def testEscapedDelimiter2u(self):
tok = dns.tokenizer.Tokenizer(r'ch\032ld')
t = tok.get().unescape()
self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == 'ch ld')
def testEscapedDelimiter3u(self):
tok = dns.tokenizer.Tokenizer(r'ch\ild')
t = tok.get().unescape()
self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'child')
if __name__ == '__main__':
unittest.main()
| |
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in viscm
parameters = {'xp': [28.782590300914933, 59.705365170024862, -27.718282299434122, -58.259294515838988, -31.917671479189778, -2.9037098736051519],
'yp': [16.928446771378731, -47.207678883071537, -85.002181500872581, 2.4214659685864035, 64.64877835951134, 25.70898778359512],
'min_JK': 18.0859375,
'max_JK': 95.0390625}
cm_data = [[ 0.30593816, 0.00266902, 0.0061051 ],
[ 0.31042433, 0.00290942, 0.01120144],
[ 0.31487482, 0.00316892, 0.01705532],
[ 0.31929522, 0.0034243 , 0.02377678],
[ 0.32368145, 0.00369108, 0.03138252],
[ 0.32803605, 0.00395683, 0.03996463],
[ 0.33235797, 0.00422464, 0.04889124],
[ 0.33664671, 0.00449568, 0.05769627],
[ 0.34090418, 0.00475769, 0.06646696],
[ 0.34512721, 0.00502718, 0.07518485],
[ 0.34931976, 0.00527747, 0.08394564],
[ 0.35347816, 0.00552999, 0.09270339],
[ 0.35760415, 0.00577067, 0.10150609],
[ 0.36169739, 0.00599854, 0.11036215],
[ 0.36575644, 0.00622388, 0.11924992],
[ 0.36978375, 0.00641944, 0.12823988],
[ 0.37377653, 0.00660885, 0.13727486],
[ 0.37773556, 0.00677996, 0.14638528],
[ 0.38166083, 0.00692606, 0.15558655],
[ 0.38555095, 0.00706076, 0.16484709],
[ 0.38940658, 0.00716518, 0.17420925],
[ 0.39322665, 0.00724735, 0.18365416],
[ 0.39701037, 0.00731401, 0.19316726],
[ 0.40075782, 0.00734338, 0.20279477],
[ 0.40446776, 0.0073515 , 0.21250246],
[ 0.40813946, 0.00734193, 0.22228406],
[ 0.41177227, 0.00729445, 0.23218125],
[ 0.41536508, 0.00722533, 0.24216243],
[ 0.41891698, 0.00713925, 0.25222092],
[ 0.42242671, 0.00702265, 0.2623854 ],
[ 0.42589305, 0.00688324, 0.27264414],
[ 0.42931493, 0.00673171, 0.28298143],
[ 0.43269091, 0.00656683, 0.29340481],
[ 0.43601883, 0.00637817, 0.30393878],
[ 0.4392978 , 0.00618789, 0.31455025],
[ 0.44252616, 0.00600087, 0.32523894],
[ 0.44570162, 0.00581503, 0.33601771],
[ 0.44882183, 0.00563345, 0.34689141],
[ 0.45188553, 0.00547601, 0.35783771],
[ 0.45489058, 0.00535163, 0.36885498],
[ 0.45783445, 0.00526797, 0.37994531],
[ 0.46071339, 0.00522487, 0.39112549],
[ 0.46352625, 0.00525017, 0.40236761],
[ 0.46627033, 0.00535829, 0.4136684 ],
[ 0.46894281, 0.00556525, 0.42502414],
[ 0.47154041, 0.00588686, 0.43643389],
[ 0.47405858, 0.00633428, 0.44790711],
[ 0.47649567, 0.00694065, 0.45941943],
[ 0.47884828, 0.00772963, 0.4709651 ],
[ 0.48111287, 0.008727 , 0.48253784],
[ 0.48328575, 0.00996081, 0.49413082],
[ 0.48536308, 0.01146143, 0.50573662],
[ 0.48733949, 0.01325656, 0.51735676],
[ 0.48921171, 0.01538581, 0.5289754 ],
[ 0.49097605, 0.01788916, 0.5405796 ],
[ 0.49262816, 0.02080706, 0.55215963],
[ 0.49416356, 0.02418258, 0.5637052 ],
[ 0.49557768, 0.02806141, 0.57520544],
[ 0.49686584, 0.03249183, 0.58664893],
[ 0.49802326, 0.03752463, 0.59802371],
[ 0.49904511, 0.04311441, 0.60931731],
[ 0.49992649, 0.0489259 , 0.62051677],
[ 0.50066244, 0.05493755, 0.63160869],
[ 0.50124802, 0.0611424 , 0.64257928],
[ 0.50167805, 0.06753394, 0.65341504],
[ 0.50194745, 0.07410605, 0.66410175],
[ 0.50205186, 0.08085257, 0.67462323],
[ 0.50198648, 0.08776732, 0.68496474],
[ 0.50174664, 0.09484406, 0.69511141],
[ 0.50132778, 0.10207645, 0.70504838],
[ 0.50072556, 0.10945796, 0.71476088],
[ 0.4999358 , 0.11698182, 0.72423428],
[ 0.4989546 , 0.124641 , 0.7334542 ],
[ 0.49777787, 0.13242856, 0.74240728],
[ 0.49640237, 0.14033686, 0.75107968],
[ 0.49482544, 0.14835776, 0.75945791],
[ 0.49304454, 0.15648302, 0.76752951],
[ 0.49105756, 0.16470409, 0.7752828 ],
[ 0.48886287, 0.17301214, 0.78270692],
[ 0.48645935, 0.18139813, 0.78979192],
[ 0.48384638, 0.18985275, 0.79652888],
[ 0.48102392, 0.19836655, 0.80290995],
[ 0.47799249, 0.20692994, 0.80892843],
[ 0.47475321, 0.21553323, 0.8145788 ],
[ 0.47130781, 0.2241667 , 0.81985683],
[ 0.46765864, 0.23282061, 0.82475954],
[ 0.46380866, 0.24148532, 0.82928527],
[ 0.45976146, 0.25015129, 0.83343365],
[ 0.45552028, 0.25880983, 0.83720573],
[ 0.45109112, 0.26745087, 0.84060357],
[ 0.44647941, 0.27606548, 0.84363073],
[ 0.44169113, 0.28464506, 0.846292 ],
[ 0.43673279, 0.29318136, 0.84859337],
[ 0.43161138, 0.30166653, 0.8505419 ],
[ 0.42633438, 0.31009317, 0.85214573],
[ 0.42090971, 0.3184543 , 0.85341395],
[ 0.41534564, 0.32674346, 0.85435652],
[ 0.40965092, 0.3349546 , 0.85498422],
[ 0.40383518, 0.34308189, 0.85530874],
[ 0.39790721, 0.35112065, 0.85534204],
[ 0.39187644, 0.35906651, 0.85509668],
[ 0.38575248, 0.36691561, 0.85458563],
[ 0.37954506, 0.37466461, 0.85382216],
[ 0.37326401, 0.3823107 , 0.85281975],
[ 0.36691922, 0.38985158, 0.85159201],
[ 0.3605206 , 0.3972854 , 0.85015258],
[ 0.35407804, 0.4046108 , 0.84851507],
[ 0.34760137, 0.41182688, 0.84669298],
[ 0.34110068, 0.418933 , 0.84469981],
[ 0.33458695, 0.42592852, 0.84254941],
[ 0.32806815, 0.43281442, 0.84025373],
[ 0.32155363, 0.43959135, 0.83782518],
[ 0.31505256, 0.44626026, 0.83527582],
[ 0.30857391, 0.45282241, 0.83261726],
[ 0.30212647, 0.45927932, 0.82986069],
[ 0.29571876, 0.46563275, 0.82701682],
[ 0.28935907, 0.47188468, 0.82409585],
[ 0.28305542, 0.47803728, 0.8211075 ],
[ 0.27681555, 0.48409289, 0.81806095],
[ 0.2706469 , 0.490054 , 0.81496488],
[ 0.26455658, 0.49592324, 0.81182741],
[ 0.25855138, 0.50170333, 0.80865617],
[ 0.25263771, 0.50739708, 0.80545824],
[ 0.24682158, 0.51300741, 0.80224018],
[ 0.24110863, 0.51853725, 0.79900805],
[ 0.23550404, 0.5239896 , 0.79576739],
[ 0.23001251, 0.5293675 , 0.79252325],
[ 0.22463828, 0.53467399, 0.78928019],
[ 0.21938504, 0.53991213, 0.78604229],
[ 0.21425593, 0.54508498, 0.78281317],
[ 0.20925351, 0.55019557, 0.77959598],
[ 0.20437973, 0.55524693, 0.77639345],
[ 0.1996359 , 0.56024206, 0.77320787],
[ 0.19502264, 0.56518393, 0.77004111],
[ 0.1905399 , 0.57007544, 0.76689464],
[ 0.18618694, 0.57491948, 0.76376952],
[ 0.18196229, 0.57971887, 0.76066644],
[ 0.17786378, 0.58447638, 0.75758571],
[ 0.17388852, 0.58919471, 0.75452729],
[ 0.17003294, 0.5938765 , 0.75149077],
[ 0.16629282, 0.59852432, 0.74847543],
[ 0.16266329, 0.60314065, 0.74548018],
[ 0.15913894, 0.60772792, 0.74250365],
[ 0.15571382, 0.61228846, 0.73954414],
[ 0.15238157, 0.61682451, 0.73659967],
[ 0.14913546, 0.62133823, 0.73366794],
[ 0.14596854, 0.62583169, 0.73074641],
[ 0.14287371, 0.63030686, 0.72783226],
[ 0.13984385, 0.63476561, 0.72492243],
[ 0.136872 , 0.63920971, 0.72201358],
[ 0.13395143, 0.64364082, 0.7191022 ],
[ 0.13107621, 0.64806046, 0.71618473],
[ 0.12824075, 0.65247006, 0.71325724],
[ 0.12543969, 0.65687103, 0.71031523],
[ 0.12266916, 0.66126459, 0.70735437],
[ 0.11992657, 0.66565184, 0.70437015],
[ 0.11721077, 0.67003376, 0.70135794],
[ 0.11452237, 0.6744112 , 0.69831295],
[ 0.11186399, 0.6787849 , 0.69523031],
[ 0.10924059, 0.68315548, 0.69210502],
[ 0.10665982, 0.68752341, 0.68893201],
[ 0.1041323 , 0.69188907, 0.68570614],
[ 0.10167206, 0.69625269, 0.6824222 ],
[ 0.09929685, 0.70061438, 0.67907498],
[ 0.09702849, 0.70497413, 0.67565922],
[ 0.09489316, 0.70933179, 0.67216965],
[ 0.09292157, 0.71368713, 0.66860104],
[ 0.09114857, 0.71803979, 0.66494786],
[ 0.08961373, 0.7223893 , 0.6612048 ],
[ 0.0883611 , 0.72673501, 0.65736696],
[ 0.08743755, 0.73107618, 0.65342925],
[ 0.0868919 , 0.73541198, 0.64938667],
[ 0.08677335, 0.73974145, 0.6452343 ],
[ 0.0871296 , 0.74406354, 0.64096731],
[ 0.08800467, 0.74837709, 0.63658099],
[ 0.08943689, 0.75268084, 0.63207074],
[ 0.09145705, 0.75697343, 0.62743205],
[ 0.09408706, 0.76125342, 0.62266056],
[ 0.09733931, 0.76551925, 0.61775204],
[ 0.10121671, 0.76976931, 0.61270239],
[ 0.10571346, 0.77400187, 0.60750764],
[ 0.11081631, 0.77821515, 0.60216396],
[ 0.11650607, 0.78240726, 0.59666767],
[ 0.12275934, 0.78657624, 0.59101523],
[ 0.12955007, 0.79072007, 0.58520325],
[ 0.13685091, 0.79483663, 0.57922847],
[ 0.14463436, 0.79892374, 0.5730878 ],
[ 0.15287364, 0.80297914, 0.56677828],
[ 0.16154321, 0.80700049, 0.56029714],
[ 0.17061924, 0.8109854 , 0.55364172],
[ 0.18007975, 0.81493137, 0.54680956],
[ 0.18990469, 0.81883585, 0.53979837],
[ 0.20007598, 0.82269619, 0.53260603],
[ 0.21057737, 0.82650969, 0.52523062],
[ 0.22139437, 0.83027354, 0.51767044],
[ 0.23251411, 0.83398486, 0.50992404],
[ 0.24392516, 0.83764066, 0.50199022],
[ 0.25561743, 0.8412379 , 0.4938681 ],
[ 0.26758204, 0.84477341, 0.48555716],
[ 0.2798111 , 0.84824393, 0.47705727],
[ 0.29229769, 0.8516461 , 0.46836882],
[ 0.30503911, 0.85497633, 0.45948804],
[ 0.31803459, 0.85823075, 0.45041063],
[ 0.3312734 , 0.86140581, 0.4411472 ],
[ 0.34475123, 0.86449765, 0.43170106],
[ 0.35846408, 0.86750233, 0.42207673],
[ 0.37240817, 0.87041578, 0.41228026],
[ 0.38658826, 0.87323309, 0.40230911],
[ 0.40100945, 0.87594898, 0.39216307],
[ 0.41565394, 0.87856011, 0.3818722 ],
[ 0.4305174 , 0.88106203, 0.37145274],
[ 0.44560988, 0.88344846, 0.36090778],
[ 0.46093466, 0.88571342, 0.35025332],
[ 0.4764647 , 0.88785471, 0.33954538],
[ 0.49220945, 0.88986546, 0.32880446],
[ 0.50817213, 0.89173917, 0.31806706],
[ 0.52431221, 0.89347602, 0.30742727],
[ 0.54066232, 0.89506473, 0.29691207],
[ 0.55716338, 0.89650912, 0.28666067],
[ 0.57382757, 0.89780123, 0.27675452],
[ 0.59059978, 0.89894578, 0.26736171],
[ 0.60747495, 0.89993864, 0.25862194],
[ 0.62437986, 0.90079066, 0.25075587],
[ 0.64129372, 0.90150303, 0.2439532 ],
[ 0.65813362, 0.9020926 , 0.23845876],
[ 0.67483995, 0.90257348, 0.23448842],
[ 0.69134066, 0.90296535, 0.232238 ],
[ 0.7075586 , 0.90329222, 0.231855 ],
[ 0.72342267, 0.90357961, 0.23341551],
[ 0.73886245, 0.90385588, 0.23691527],
[ 0.75384169, 0.90414168, 0.2422736 ],
[ 0.76828873, 0.90447092, 0.24933693],
[ 0.78221676, 0.90485188, 0.25792794],
[ 0.79558008, 0.90531303, 0.26782053],
[ 0.80842177, 0.90585258, 0.27883421],
[ 0.8207006 , 0.90649867, 0.29072839],
[ 0.83248598, 0.90723837, 0.30337614],
[ 0.84378474, 0.90808079, 0.31661371],
[ 0.85460273, 0.90903543, 0.33028474],
[ 0.86499688, 0.91008983, 0.3443259 ],
[ 0.87499154, 0.91124343, 0.35865065],
[ 0.88461129, 0.91249469, 0.3731879 ],
[ 0.89387855, 0.91384221, 0.38787579],
[ 0.90281044, 0.9152864 , 0.4026516 ],
[ 0.9114428 , 0.91681782, 0.41750661],
[ 0.91979668, 0.91843305, 0.43241222],
[ 0.9278919 , 0.92012856, 0.44734592],
[ 0.93574701, 0.9219008 , 0.46229025],
[ 0.94337929, 0.92374622, 0.47723183],
[ 0.95080035, 0.92566398, 0.49214548],
[ 0.95802315, 0.92765211, 0.50701436],
[ 0.96506661, 0.92970447, 0.52184916],
[ 0.97194366, 0.93181788, 0.53664677],
[ 0.97866617, 0.93398929, 0.55140545],
[ 0.98524501, 0.93621585, 0.56612453],
[ 0.99169013, 0.93849488, 0.58080418]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| |
# Copyright 2012 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models used by aeta."""
from __future__ import with_statement
__author__ = 'schuppe@google.com (Robert Schuppenies)'
# Classes defined here are only data containers - pylint:disable-msg=R0903
import logging
import unittest
from google.appengine.api import files
from google.appengine.ext import blobstore
from google.appengine.ext import ndb
try:
import json
except ImportError:
import simplejson as json
from aeta import task_deferred as deferred
from aeta import utils
__all__ = ['TestBatch', 'RunTestUnitTask', 'get_ctx_options']
# The maximum size of a JSON object in a JsonHolder. Since memcache and the
# datastore both have a maximum of 1MB, the object must be a bit smaller than
# 1MB.
_MAX_JSON_BYTES = 1024 * 1000
# How long to wait between attempts to delete dead blobs.
_DELETE_BLOB_TIME_SECS = 10 * 60
class JsonHolder(ndb.Model):
"""A superclass for models that hold a potentially large JSON object.
The object will either be stored in the model or in the Blobstore depending
on its size.
Attributes:
data: The text of the JSON, or None if JSON has not been set or it is it
the Blobstore.
blob_key: A BlobKey to the JSON text stored in the Blobstore, or None if no
data is stored in the Blobstore.
"""
data = ndb.TextProperty(default=None)
blob_key = ndb.BlobKeyProperty(default=None)
def set_json(self, json_obj, conf):
"""Sets the JSON value of this object.
If the JSON value is stored in the Blobstore, a task will be added to
eventually delete the blob.
Note that you also have to put() the model after calling this function to
actually update it.
Args:
json_obj: The JSON-convertible object to set.
conf: The configuration to use.
Raises:
ValueError: If this object does not have a key.
"""
if not self.key:
raise ValueError("Set the object's key before calling set_json().")
data = json.dumps(json_obj)
if len(data) <= _MAX_JSON_BYTES:
self.data = data
else:
self.data = None
file_name = files.blobstore.create()
with files.open(file_name, 'a') as f:
f.write(data)
files.finalize(file_name)
self.blob_key = files.blobstore.get_blob_key(file_name)
deferred.defer(_delete_blob_if_done, self.key, self.blob_key, conf,
_queue=conf.test_queue, _countdown=_DELETE_BLOB_TIME_SECS)
def get_json(self):
"""Gets the JSON value of this object.
Returns:
The JSON object that was set.
"""
json_obj = None
if self.data is not None:
json_obj = json.loads(self.data)
elif self.blob_key:
info = blobstore.BlobInfo.get(self.blob_key)
if info:
f = info.open()
json_obj = json.loads(f.read())
f.close()
return json_obj
class TestBatch(JsonHolder):
"""A collection of tests to be run at once.
JSON data is of the following form:
{'fullname': the full name of the test object,
'load_errors': a list of [object name, error string] for load errors,
'test_unit_method': a mapping from test unit name to a list of method
fullnames in that test object
}
or None if the batch has not been initialized.
Attributes:
fullname: The name of the object the tests are being run for. This should
be a period-separated name of a Python test package, module, class, or
method, or the empty string if all tests are being run.
num_units: How many testing units this batch consists of. There will be
one RunTestUnitTask per unit. This will be None if the number of units
is not yet known.
"""
fullname = ndb.StringProperty()
num_units = ndb.IntegerProperty(default=None)
def get_tasks(self, conf):
"""Get all RunTestUnitTasks associated with the TestBatch.
Args:
conf: The configuration to use.
Returns:
A list of RunTestUnitTasks associated with this TestBatch, or None if the
tasks have not yet been initialized.
"""
if self.num_units is None:
return None
keys = []
for i in range(self.num_units):
keys.append(RunTestUnitTask.get_key(self.key, i))
return ndb.get_multi(keys, **get_ctx_options(conf))
def set_info(self, load_errors, test_unit_methods, conf):
"""Sets batch information.
This information can be retrieved as JSON using get_json(). This will also
set num_units according to the size of test_unit_methods.
Args:
load_errors: A list of (object name, error string) pairs for load errors.
test_unit_methods: A mapping from test unit fullname to a list of method
fullnames in that object.
conf: The configuration to use.
"""
utils.check_type(load_errors, 'load_errors', list)
utils.check_type(test_unit_methods, 'test_unit_methods', dict)
self.num_units = len(test_unit_methods)
data = {'num_units': self.num_units,
'load_errors': load_errors,
'test_unit_methods': test_unit_methods,
}
self.set_json(data, conf)
class RunTestUnitTask(JsonHolder):
"""The state of a task that runs a single TestSuite in a batch of them.
When creating a new RunTestUnitTask, always set its key to the return value
of get_key. This enables easy access to RunTestUnitTasks given their batch.
JSON data should be of the form returned by get_test_result_json(), or None
if the test has not finished running.
Attributes:
fullname: The full name to the TestSuite being run.
"""
fullname = ndb.StringProperty()
@classmethod
def get_key(cls, batch_key, index):
"""Gets the key associated with a RunTestUnitTask.
Args:
batch_key: The key to the TestBatch the RunTestUnitTask is part of.
index: The integer index assigned to the task, which is in the range
[0, batch.num_units).
Returns:
A ndb.Key instance corresponding to the RunTestUnitTask.
"""
utils.check_type(batch_key, 'batch_key', ndb.Key)
utils.check_type(index, 'index', int)
return ndb.Key(cls, str(index), parent=batch_key)
def set_test_result(self, load_errors, testresult, output, conf):
"""Sets test result information.
This information can be retrieved as JSON using get_json().
Args:
load_errors: A list of (object name, error string) pairs for load errors.
testresult: The unittest.TestResult for this test run.
output: The output of print statements in the test.
conf: The configuration to use.
"""
utils.check_type(load_errors, 'load_errors', list)
utils.check_type(testresult, 'testresult', unittest.TestResult)
utils.check_type(output, 'output', basestring)
data = {
'fullname': self.fullname,
'load_errors': load_errors,
'errors': [(tc.fullname, exc) for (tc, exc) in testresult.errors],
'failures': [(tc.fullname, exc) for (tc, exc) in testresult.failures],
'output': output,
}
self.set_json(data, conf)
def get_ctx_options(conf):
"""Gets the appropriate context options for storing test information.
These context options are passed as keyword arguments to methods including
ndb.Key.get(), ndb.Key.put(), ndb.get_multi(), and ndb.put_multi() to control
how various storage methods are used.
Args:
conf: The configuration to use. Its storage specifies how results
should be stored.
Returns:
A dictionary of keyword arguments.
"""
method = conf.storage
if method == 'datastore':
return {}
if method == 'memcache':
return {'use_memcache': True, 'use_datastore': False}
if method == 'immediate':
return {'use_cache': True, 'use_memcache': False, 'use_datastore': False}
logging.warning('[aeta] Unknown run method %s. Falling back to memcache.',
method)
return {'use_memcache': True, 'use_datastore': False}
def _delete_blob_if_done(obj_key, blob_key, conf):
"""Deletes a blob if its object has also been deleted.
Otherwise, it will try to delete the blob later.
Args:
obj_key: The ndb.Key of a JsonHolder.
blob_key: The BlobKey to delete.
conf: The configuration to use.
"""
if obj_key.get(**get_ctx_options(conf)):
deferred.defer(_delete_blob_if_done, obj_key, blob_key, conf,
_queue=conf.test_queue, _countdown=_DELETE_BLOB_TIME_SECS)
else:
info = blobstore.BlobInfo.get(blob_key)
if info:
info.delete()
| |
"""The tests for the MQTT binary sensor platform."""
import copy
from datetime import datetime, timedelta
import json
import pytest
from homeassistant.components import binary_sensor, mqtt
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import (
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.async_mock import patch
from tests.common import async_fire_mqtt_message, async_fire_time_changed
DEFAULT_CONFIG = {
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
}
}
async def test_setting_sensor_value_expires_availability_topic(
hass, mqtt_mock, legacy_patchable_time, caplog
):
"""Test the expiration of the value."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"expire_after": 4,
"force_update": True,
"availability_topic": "availability-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
# State should be unavailable since expire_after is defined and > 0
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_UNAVAILABLE
await expires_helper(hass, mqtt_mock, caplog)
async def test_setting_sensor_value_expires(
hass, mqtt_mock, legacy_patchable_time, caplog
):
"""Test the expiration of the value."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"expire_after": 4,
"force_update": True,
}
},
)
await hass.async_block_till_done()
# State should be unavailable since expire_after is defined and > 0
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_UNAVAILABLE
await expires_helper(hass, mqtt_mock, caplog)
async def expires_helper(hass, mqtt_mock, caplog):
"""Run the basic expiry code."""
now = datetime(2017, 1, 1, 1, tzinfo=dt_util.UTC)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_time_changed(hass, now)
async_fire_mqtt_message(hass, "test-topic", "ON")
await hass.async_block_till_done()
# Value was set correctly.
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
# Time jump +3s
now = now + timedelta(seconds=3)
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# Value is not yet expired
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
# Next message resets timer
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_time_changed(hass, now)
async_fire_mqtt_message(hass, "test-topic", "OFF")
await hass.async_block_till_done()
# Value was updated correctly.
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
# Time jump +3s
now = now + timedelta(seconds=3)
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# Value is not yet expired
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
# Time jump +2s
now = now + timedelta(seconds=2)
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# Value is expired now
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_UNAVAILABLE
async def test_setting_sensor_value_via_mqtt_message(hass, mqtt_mock):
"""Test the setting of the value via MQTT."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"payload_on": "ON",
"payload_off": "OFF",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test-topic", "ON")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "test-topic", "OFF")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
async def test_invalid_sensor_value_via_mqtt_message(hass, mqtt_mock, caplog):
"""Test the setting of the value via MQTT."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"payload_on": "ON",
"payload_off": "OFF",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test-topic", "0N")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
assert "No matching payload found for entity" in caplog.text
caplog.clear()
assert "No matching payload found for entity" not in caplog.text
async_fire_mqtt_message(hass, "test-topic", "ON")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "test-topic", "0FF")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
assert "No matching payload found for entity" in caplog.text
async def test_setting_sensor_value_via_mqtt_message_and_template(hass, mqtt_mock):
"""Test the setting of the value via MQTT."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"payload_on": "ON",
"payload_off": "OFF",
"value_template": '{%if is_state(entity_id,"on")-%}OFF'
"{%-else-%}ON{%-endif%}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test-topic", "")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "test-topic", "")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
async def test_setting_sensor_value_via_mqtt_message_and_template2(
hass, mqtt_mock, caplog
):
"""Test the setting of the value via MQTT."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"payload_on": "ON",
"payload_off": "OFF",
"value_template": "{{value | upper}}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test-topic", "on")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "test-topic", "off")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test-topic", "illegal")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
assert "template output: 'ILLEGAL'" in caplog.text
async def test_setting_sensor_value_via_mqtt_message_empty_template(
hass, mqtt_mock, caplog
):
"""Test the setting of the value via MQTT."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"payload_on": "ON",
"payload_off": "OFF",
"value_template": '{%if value == "ABC"%}ON{%endif%}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test-topic", "DEF")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
assert "Empty template output" in caplog.text
async_fire_mqtt_message(hass, "test-topic", "ABC")
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
async def test_valid_device_class(hass, mqtt_mock):
"""Test the setting of a valid sensor class."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"device_class": "motion",
"state_topic": "test-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.attributes.get("device_class") == "motion"
async def test_invalid_device_class(hass, mqtt_mock):
"""Test the setting of an invalid sensor class."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"device_class": "abc123",
"state_topic": "test-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state is None
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_force_update_disabled(hass, mqtt_mock):
"""Test force update option."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"payload_on": "ON",
"payload_off": "OFF",
}
},
)
await hass.async_block_till_done()
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(hass, "test-topic", "ON")
await hass.async_block_till_done()
assert len(events) == 1
async_fire_mqtt_message(hass, "test-topic", "ON")
await hass.async_block_till_done()
assert len(events) == 1
async def test_force_update_enabled(hass, mqtt_mock):
"""Test force update option."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"payload_on": "ON",
"payload_off": "OFF",
"force_update": True,
}
},
)
await hass.async_block_till_done()
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(hass, "test-topic", "ON")
await hass.async_block_till_done()
assert len(events) == 1
async_fire_mqtt_message(hass, "test-topic", "ON")
await hass.async_block_till_done()
assert len(events) == 2
async def test_off_delay(hass, mqtt_mock):
"""Test off_delay option."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
binary_sensor.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test-topic",
"payload_on": "ON",
"payload_off": "OFF",
"off_delay": 30,
"force_update": True,
}
},
)
await hass.async_block_till_done()
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(hass, "test-topic", "ON")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
assert len(events) == 1
async_fire_mqtt_message(hass, "test-topic", "ON")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
assert len(events) == 2
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_OFF
assert len(events) == 3
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one sensor per unique_id."""
config = {
binary_sensor.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, binary_sensor.DOMAIN, config)
async def test_discovery_removal_binary_sensor(hass, mqtt_mock, caplog):
"""Test removal of discovered binary_sensor."""
data = json.dumps(DEFAULT_CONFIG[binary_sensor.DOMAIN])
await help_test_discovery_removal(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, data
)
async def test_discovery_update_binary_sensor(hass, mqtt_mock, caplog):
"""Test update of discovered binary_sensor."""
config1 = copy.deepcopy(DEFAULT_CONFIG[binary_sensor.DOMAIN])
config2 = copy.deepcopy(DEFAULT_CONFIG[binary_sensor.DOMAIN])
config1["name"] = "Beer"
config2["name"] = "Milk"
data1 = json.dumps(config1)
data2 = json.dumps(config2)
await help_test_discovery_update(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, data1, data2
)
async def test_expiration_on_discovery_and_discovery_update_of_binary_sensor(
hass, mqtt_mock, legacy_patchable_time, caplog
):
"""Test that binary_sensor with expire_after set behaves correctly on discovery and discovery update."""
entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", entry)
config = {
"name": "Test",
"state_topic": "test-topic",
"expire_after": 4,
"force_update": True,
}
config_msg = json.dumps(config)
# Set time and publish config message to create binary_sensor via discovery with 4 s expiry
now = datetime(2017, 1, 1, 1, tzinfo=dt_util.UTC)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_time_changed(hass, now)
async_fire_mqtt_message(
hass, "homeassistant/binary_sensor/bla/config", config_msg
)
await hass.async_block_till_done()
# Test that binary_sensor is not available
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_UNAVAILABLE
# Publish state message
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_mqtt_message(hass, "test-topic", "ON")
await hass.async_block_till_done()
# Test that binary_sensor has correct state
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
# Advance +3 seconds
now = now + timedelta(seconds=3)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# binary_sensor is not yet expired
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
# Resend config message to update discovery
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_time_changed(hass, now)
async_fire_mqtt_message(
hass, "homeassistant/binary_sensor/bla/config", config_msg
)
await hass.async_block_till_done()
# Test that binary_sensor has not expired
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_ON
# Add +2 seconds
now = now + timedelta(seconds=2)
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
# Test that binary_sensor has expired
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_UNAVAILABLE
# Resend config message to update discovery
with patch(("homeassistant.helpers.event.dt_util.utcnow"), return_value=now):
async_fire_mqtt_message(
hass, "homeassistant/binary_sensor/bla/config", config_msg
)
await hass.async_block_till_done()
# Test that binary_sensor is still expired
state = hass.states.get("binary_sensor.test")
assert state.state == STATE_UNAVAILABLE
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer",' ' "off_delay": -1 }'
data2 = '{ "name": "Milk",' ' "state_topic": "test_topic" }'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT binary sensor device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT binary sensor device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
| |
#! /usr/bin/env python
import os
import sys
import math
import argparse
import warnings
from glob import glob
from functools import wraps
import numpy as np
from qtpy import QtWidgets, QtCore
from planetaryimage import PDS3Image
from ginga.BaseImage import BaseImage
from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
from .histogram import HistogramWidget, HistogramModel
from .channels_dialog import ChannelsDialog, ChannelsDialogModel
try:
from . import label
except ImportError:
from pdsview import label
app = QtWidgets.QApplication.instance()
if not app:
app = QtWidgets.QApplication(sys.argv)
class ImageStamp(BaseImage):
"""A ginga BaseImage object that will be displayed in PDSViewer.
Parameters
----------
filepath: string
A file and its relative path from the current working directory
Attributes
----------
image_name : string
The filename of the filepath
pds_image : planetaryimage object
A planetaryimage object
label : array
The images label in an array
cuts : tuple (int, int)
The min and max pixel value scaling
sarr : np array
The color map of the array in an array
zoom : float
Zoom level of the image
rotation : float
The degrees the image is rotated
transforms : tuple (bool, bool, bool)
Whether the image is flipped across the x-axis, y-axis, or x/y is
switched
not_been_displayed : bool
Whether the image has been displayed already
"""
def __init__(self, filepath, name, pds_image, data_np, metadata=None,
logger=None):
BaseImage.__init__(self, data_np=data_np, metadata=metadata,
logger=logger)
self.set_data(data_np)
with open(filepath, 'rb') as f:
label_array = []
for lineno, line in enumerate(f):
line = line.decode().rstrip()
label_array.append(line)
if line.strip() == 'END':
break
self.data = data_np
self.image_name = name
self.file_name = os.path.basename(filepath)
self.pds_image = pds_image
self.label = label_array
self.cuts = None
self.sarr = None
self.zoom = None
self.rotation = None
self.transforms = None
self.not_been_displayed = True
def __repr__(self):
return self.image_name
class ImageSet(object):
"""A set of ginga images to be displayed and methods to control the images.
Parameters
----------
filepaths: list
A list of filepaths to pass through ImageStamp
Attribute
---------
images : list
A list of ginga images with attributes set in ImageStamp that can be
displayed in PDSViewer
current_image : ImageStamp object
The currently displayed image
current_image_index : int
Index value of the current image
file_dict : dictionary
dictionary of images list, makes accessing images by name easier
channel : int
Which channel in the image the view should be in
next_prev_enabled : bool
Whether the next and previous buttons should be enabled
"""
def __init__(self, filepaths):
# Remove any duplicate filepaths and sort the list alpha-numerically.
filepaths = sorted(list(set(filepaths)))
self._views = set()
# Create image objects with attributes set in ImageStamp
# These objects contain the data ginga will use to display the image
self.images = []
self.create_image_set(filepaths)
self._current_image_index = 0
self._channel = 0
# self._last_channel = None
self._x_value = 0
self._y_value = 0
self._pixel_value = (0, )
self.use_default_text = True
self.rgb = []
if self.images:
self.current_image = self.images[self.current_image_index]
else:
self.current_image = None
def register(self, view):
"""Register a view with the model"""
self._views.add(view)
def unregister(self, view):
"""Unregister a view with the model"""
self._views.remove(view)
def create_image_set(self, filepaths):
rgb = ['R', 'G', 'B']
for filepath in filepaths:
try:
channels = []
pds_image = PDS3Image.open(filepath)
bands = pds_image.label['IMAGE']['BANDS']
if bands == 3:
for n in range(bands):
name = os.path.basename(filepath) + '(%s)' % (rgb[n])
data = pds_image.image[:, :, n]
image = ImageStamp(
filepath=filepath, name=name, data_np=data,
pds_image=pds_image)
# self.file_dict[image.image_name] = image
channels.append(image)
self.images.append(channels)
else:
name = os.path.basename(filepath)
data = pds_image.image
image = ImageStamp(
filepath=filepath, name=name, data_np=data,
pds_image=pds_image)
self.images.append([image])
# self.file_dict[image.image_name] = image
except:
warnings.warn(filepath + " cannnot be opened")
@property
def next_prev_enabled(self):
"""Set whether the next and previous buttons are enabled."""
return len(self.images) > 1
@property
def current_image_index(self):
return self._current_image_index
@current_image_index.setter
def current_image_index(self, index):
while index >= len(self.images):
index -= len(self.images)
while index < 0:
index += len(self.images)
self._current_image_index = index
self.current_image = self.images[index]
self._channel = 0
for view in self._views:
view.display_image()
@property
def channel(self):
return self._channel
# @property
# def last_channel(self):
# return self._last_channel
@channel.setter
def channel(self, new_channel):
number_channels = len(self.current_image)
if number_channels == 1:
return
self._previous_channel = self._channel
self._channel = new_channel
if self._channel == number_channels:
self._channel = 0
for view in self._views:
view.display_image()
@property
def x_value(self):
return self._x_value
@x_value.setter
def x_value(self, new_x_value):
self._x_value = int(round(new_x_value, 0))
for view in self._views:
view.set_x_value_text()
@property
def x_value_text(self):
return 'X: %d' % (self.x_value)
@property
def y_value(self):
return self._y_value
@y_value.setter
def y_value(self, new_y_value):
self._y_value = int(round(new_y_value, 0))
for view in self._views:
view.set_y_value_text()
@property
def y_value_text(self):
return 'Y: %d' % (self.y_value)
@property
def pixel_value(self):
return tuple([float(round(value, 3)) for value in self._pixel_value])
@pixel_value.setter
def pixel_value(self, new_pixel_value):
if isinstance(new_pixel_value, (tuple, list, np.ndarray)):
_new_pixel_value = [float(pixel) for pixel in new_pixel_value]
else:
_new_pixel_value = [float(new_pixel_value)]
self._pixel_value = tuple(_new_pixel_value)
for view in self._views:
view.set_pixel_value_text()
@property
def pixel_value_text(self):
current_image = self.current_image[self.channel]
if current_image.ndim == 3:
return 'R: %.3f G: %.3f B: %.3f' % (self.pixel_value)
else:
return 'Value: %.3f' % (self.pixel_value)
def append(self, new_files, dipslay_first_new_image):
"""Append a new image to the images list if it is pds compatible"""
self.create_image_set(new_files)
if dipslay_first_new_image == len(self.images):
return
self.current_image_index = dipslay_first_new_image
self.current_image = self.images[self.current_image_index]
@property
def bands_are_composite(self):
r_band = self.rgb[0]
# Use logic that if a=b and a=c then b=c
return all([r_band.data.shape == band.data.shape for band in self.rgb])
def _create_rgb_image_wrapper(func):
@wraps(func)
def wrapper(self):
if not self.bands_are_composite:
raise ValueError(
(
'The bands must all be the same shape in order to' +
'make a composite image'
)
)
else:
return func(self)
return wrapper
@_create_rgb_image_wrapper
def create_rgb_image(self):
rgb_image = np.stack(
[band.data for band in self.rgb],
axis=-1
)
return rgb_image
def ROI_data(self, left, bottom, right, top):
"""Calculate the data in the Region of Interest
Parameters
----------
left : float
The x coordinate value of the left side of the Region of Interest
bottom : float
The y coordinate value of the bottom side of the Region of Interest
right : float
The x coordinate value of the right side of the Region of Interest
bottom : float
The y coordinate value of the top side of the Region of Interest
Returns
-------
data : array
The data within the Region of Interest
"""
left = int(math.ceil(left))
bottom = int(math.ceil(bottom))
right = int(math.ceil(right))
top = int(math.ceil(top))
data = self.current_image[self.channel].cutout_data(
left, bottom, right, top)
return data
def ROI_pixels(self, left, bottom, right, top):
"""Calculate the number of pixels in the Region of Interest
Parameters
----------
See ROI_data
Returns
-------
pixels : int
The number of pixels in the Region of Interest
"""
pixels = (right - left) * (top - bottom)
return pixels
def ROI_std_dev(
self, left=None, bottom=None, right=None, top=None, data=None):
"""Calculate the standard deviation in the Region of Interest
Note
----
If data is not provided, the left, bottom, right, and top parameters
must be provided. Otherwise it will result in a TypeError.
Parameters
----------
left : Optional[float]
The x coordinate value of the left side of the Region of Interest
bottom : Optional[float]
The y coordinate value of the bottom side of the Region of Interest
right : Optional[float]
The x coordinate value of the right side of the Region of Interest
bottom : Optional[float]
The y coordinate value of the top side of the Region of Interest
data : Optional[array]
The data within the Region of Interest
Returns
-------
std_dev : float
The standard deviation of the pixels in the Region of Interest
"""
if data is None:
data = self.ROI_data(left, bottom, right, top)
std_dev = round(np.std(data), 6)
return std_dev
def ROI_mean(
self, left=None, bottom=None, right=None, top=None, data=None):
"""Calculate the mean of the Region of Interest
Parameters
----------
See ROI_std_dev
Note
----
See ROI_std_dev
Returns
-------
mean : float
The mean pixel value of the Region of Interest
"""
if data is None:
data = self.ROI_data(left, bottom, right, top)
mean = round(np.mean(data), 4)
return mean
def ROI_median(
self, left=None, bottom=None, right=None, top=None, data=None):
"""Find the median of the Region of Interest
Parameters
----------
See ROI_std_dev
Note
----
See ROI_std_dev
Returns
-------
median : float
The median pixel value of the Region of Interest
"""
if data is None:
data = self.ROI_data(left, bottom, right, top)
median = np.median(data)
return median
def ROI_min(
self, left=None, bottom=None, right=None, top=None, data=None):
"""Find the minimum pixel value of the Region of Interest
Parameters
----------
See ROI_std_dev
Note
----
See ROI_std_dev
Returns
-------
minimum : float
The minimum pixel value of the Region of Interest
"""
if data is None:
data = self.ROI_data(left, bottom, right, top)
minimum = np.nanmin(data)
return minimum
def ROI_max(
self, left=None, bottom=None, right=None, top=None, data=None):
"""Find the maximum pixel value of the Region of Interest
Parameters
----------
See ROI_std_dev
Note
----
See ROI_std_dev
Returns
-------
maximum : float
The maximum pixel value of the Region of Interest
"""
if data is None:
data = self.ROI_data(left, bottom, right, top)
maximum = np.nanmax(data)
return maximum
class PDSController(object):
def __init__(self, model, view):
self.model = model
self.view = view
def next_image(self):
self.model.current_image_index += 1
def previous_image(self):
self.model.current_image_index -= 1
def next_channel(self):
self.model.channel += 1
def previous_channel(self):
self.model.channel -= 1
def new_x_value(self, new_x):
self.model.x_value = new_x
def new_y_value(self, new_y):
self.model.y_value = new_y
def new_pixel_value(self, new_pixel_value):
self.model.pixel_value = new_pixel_value
def _populate_rgb(self, image_index):
rgb = []
number_of_images = len(self.model.images)
while len(rgb) < 3:
for band in self.model.images[image_index]:
rgb.append(band)
if len(rgb) == 3:
break
at_end_of_image_list = image_index == (number_of_images - 1)
image_index = 0 if at_end_of_image_list else image_index + 1
return rgb
def update_rgb(self):
"""Update the rgb list to have the 3 channels or the next 3 images"""
self.model.rgb = []
current_image = self.model.current_image
image_is_not_rgb = len(current_image) < 3
if image_is_not_rgb:
self.model.rgb = self._populate_rgb(self.model.current_image_index)
else:
for band in current_image:
self.model.rgb.append(band)
class PDSViewer(QtWidgets.QMainWindow):
"""A display of a single image with the option to view other images
Parameters
----------
image_set: list
A list of ginga objects with attributes set in ImageStamp"""
def __init__(self, image_set):
super(PDSViewer, self).__init__()
self.image_set = image_set
self.image_set.register(self)
self.controller = PDSController(self.image_set, self)
# Set the sub window names here. This implementation will help prevent
# the main window from spawning duplicate children. Even if the
# duplication prevention is not set up for a window, this will be a
# handy reference list of windows(or dialogs in most cases) that can
# be spawned out of this window.
self._label_window = None
self._label_window_pos = None
self.channels_window = None
self.channels_window_is_open = False
self.channels_window_pos = None
self.view_canvas = ImageViewCanvas(render='widget')
self.view_canvas.set_autocut_params('zscale')
self.view_canvas.enable_autozoom('override')
self.view_canvas.enable_autocuts('override')
self.view_canvas.set_callback('drag-drop', self.drop_file)
self.view_canvas.set_bg(0.5, 0.5, 0.5)
self.view_canvas.ui_setActive(True)
self.view_canvas.get_bindings().enable_all(True)
# Activate left mouse click to display values
self.view_canvas.set_callback('cursor-down', self.display_values)
# Activate click and drag to update values
self.view_canvas.set_callback('cursor-move', self.display_values)
self.view_canvas.set_callback('draw-down', self.start_ROI)
self.view_canvas.set_callback('draw-up', self.stop_ROI)
self.view_canvas.enable_draw(True)
self.view_canvas.set_drawtype('rectangle')
main_layout = QtWidgets.QGridLayout()
# self.open_label is need as an attribute to determine whether the user
# should be able to open the label window. The other side of this
# toggle is found in load_file().
open_file = QtWidgets.QPushButton("Open File")
open_file.clicked.connect(self.open_file)
self.next_image_btn = QtWidgets.QPushButton("Next")
self.next_image_btn.clicked.connect(self.next_image)
self.next_image_btn.setEnabled(image_set.next_prev_enabled)
self.previous_image_btn = QtWidgets.QPushButton("Previous")
self.previous_image_btn.clicked.connect(self.previous_image)
self.previous_image_btn.setEnabled(image_set.next_prev_enabled)
self.open_label = QtWidgets.QPushButton("Label")
self.open_label.clicked.connect(self.display_label)
quit_button = QtWidgets.QPushButton("Quit")
quit_button.clicked.connect(self.quit)
self.rgb_check_box = QtWidgets.QCheckBox("RGB")
self.rgb_check_box.stateChanged.connect(self.switch_rgb)
self.next_channel_btn = QtWidgets.QPushButton('CH +')
self.next_channel_btn.clicked.connect(self.next_channel)
self.previous_channel_btn = QtWidgets.QPushButton('CH -')
self.previous_channel_btn.clicked.connect(self.previous_channel)
self.restore_defaults = QtWidgets.QPushButton("Restore Defaults")
self.restore_defaults.clicked.connect(self.restore)
self.channels_button = QtWidgets.QPushButton("Channels")
self.channels_button.clicked.connect(self.channels_dialog)
# Set Text so the size of the boxes are at an appropriate size
self.x_value_lbl = QtWidgets.QLabel('X: #####')
self.y_value_lbl = QtWidgets.QLabel('Y: #####')
self.pixel_value_lbl = QtWidgets.QLabel(
'R: ######, G: ###### B: ######')
self.pixels = QtWidgets.QLabel('#Pixels: #######')
self.std_dev = QtWidgets.QLabel(
'Std Dev: R: ######### G: ######### B: #########')
self.mean = QtWidgets.QLabel(
'Mean: R: ######## G: ######## B: ########')
self.median = QtWidgets.QLabel(
'Median: R: ######## G: ######## B: ########')
self.min = QtWidgets.QLabel('Min: R: ### G: ### B: ###')
self.max = QtWidgets.QLabel('Max: R: ### G: ### B: ###')
main_layout.setHorizontalSpacing(10)
# Set format for each information box to be the same
for info_box in (self.x_value_lbl, self.y_value_lbl,
self.pixel_value_lbl, self.pixels, self.std_dev,
self.mean, self.median, self.min, self.max):
info_box.setFrameShape(QtWidgets.QFrame.Panel)
info_box.setFrameShadow(QtWidgets.QFrame.Sunken)
info_box.setLineWidth(3)
info_box.setMidLineWidth(1)
info_box.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft)
for main_box, second_box in ((self.std_dev, self.pixels),
(self.mean, self.median),
(self.min, self.max)):
main_box.setMinimumSize(main_box.sizeHint())
main_box.setMaximumSize(main_box.sizeHint())
second_box.setMinimumSize(main_box.sizeHint())
second_box.setMaximumSize(main_box.sizeHint())
self.histogram = HistogramModel(self.view_canvas, bins=100)
self.histogram_widget = HistogramWidget(self.histogram)
min_width = self.histogram_widget.histogram.width()
for widget in (open_file, self.next_image_btn, self.previous_image_btn,
self.channels_button, self.open_label,
self.restore_defaults, self.rgb_check_box,
self.x_value_lbl, self.y_value_lbl, quit_button,
self.next_channel_btn, self.previous_channel_btn,
self.pixel_value_lbl):
widget.setMinimumWidth(min_width)
widget.setMaximumWidth(min_width)
fixed_size = self.pixel_value_lbl.sizeHint().width()
self.x_value_lbl.setMinimumWidth(fixed_size / 2)
self.x_value_lbl.setMaximumWidth(fixed_size / 2)
self.y_value_lbl.setMinimumWidth(fixed_size / 2)
self.y_value_lbl.setMaximumWidth(fixed_size / 2)
column_spacing_x_y = 5
self.pixel_value_lbl.setMinimumWidth(fixed_size + column_spacing_x_y)
self.pixel_value_lbl.setMaximumWidth(fixed_size + column_spacing_x_y)
main_layout.addWidget(open_file, 0, 0)
main_layout.addWidget(quit_button, 0, 1)
main_layout.addWidget(self.pixels, 0, 2)
main_layout.addWidget(self.mean, 0, 3)
main_layout.addWidget(self.min, 0, 4)
main_layout.addWidget(self.previous_image_btn, 1, 0)
main_layout.addWidget(self.next_image_btn, 1, 1)
main_layout.addWidget(self.std_dev, 1, 2)
main_layout.addWidget(self.median, 1, 3)
main_layout.addWidget(self.max, 1, 4)
main_layout.addWidget(self.previous_channel_btn, 2, 0)
main_layout.addWidget(self.next_channel_btn, 2, 1)
main_layout.addWidget(self.channels_button, 3, 0)
main_layout.addWidget(self.open_label, 3, 1)
main_layout.addWidget(self.restore_defaults, 4, 0)
main_layout.addWidget(self.rgb_check_box, 4, 1)
main_layout.addWidget(self.histogram_widget, 5, 0, 2, 2)
x_y_layout = QtWidgets.QGridLayout()
x_y_layout.setHorizontalSpacing(column_spacing_x_y)
x_y_layout.addWidget(self.x_value_lbl, 0, 0)
x_y_layout.addWidget(self.y_value_lbl, 0, 1)
main_layout.addLayout(x_y_layout, 7, 0)
main_layout.addWidget(self.pixel_value_lbl, 8, 0, 1, 2)
main_layout.addWidget(self.view_canvas.get_widget(), 2, 2, 9, 4)
main_layout.setRowStretch(9, 1)
main_layout.setColumnStretch(5, 1)
vw = QtWidgets.QWidget()
self.setCentralWidget(vw)
vw.setLayout(main_layout)
self.view_canvas.set_desired_size(100, 100)
if self.image_set.current_image:
self.display_image()
self._reset_display_values()
@property
def current_image(self):
return self.image_set.current_image[self.image_set.channel]
def display_image(self):
self.controller.update_rgb()
self._set_rgb_state()
self._update_channels_image()
self.view_canvas.set_image(self.current_image)
if self.current_image.not_been_displayed:
self.restore()
else:
self.apply_parameters(self.current_image, self.view_canvas)
self.view_canvas.delayed_redraw()
self.current_image.not_been_displayed = False
self.histogram.set_data()
self._disable_next_previous()
self._reset_ROI()
self._update_label()
self.setWindowTitle(self.current_image.image_name)
def _refresh_ROI_text(self):
self.stop_ROI(self.view_canvas, None, None, None)
def _reset_ROI(self):
if len(self.view_canvas.objects) > 1:
self._refresh_ROI_text()
self.view_canvas.update_canvas()
else:
self.set_ROI_text(
0, 0, self.current_image.width, self.current_image.height)
def _update_channels_image(self):
if self.channels_window:
self.channels_window.change_image()
def _set_rgb_state(self):
state = self.rgb_check_box.checkState()
self.switch_rgb(state)
def _disable_next_previous(self):
if len(self.image_set.current_image) < 3:
self.next_channel_btn.setEnabled(False)
self.previous_channel_btn.setEnabled(False)
def _renew_display_values(self):
try:
data_x = self.image_set.x_value
data_y = self.image_set.y_value
self.display_values(self.view_canvas, None, data_x, data_y)
except ValueError:
pass
def _reset_display_values(self):
self.x_value_lbl.setText('X: ????')
self.y_value_lbl.setText('Y: ????')
if self.current_image.ndim == 3:
self.pixel_value_lbl.setText('R: ???? G: ???? B: ????')
elif self.current_image.ndim == 2:
self.pixel_value_lbl.setText('Value: ????')
def _change_wrapper(image_was_changed):
# To be more explicit later
channel_was_changed = not image_was_changed
def decorator(func):
@wraps(func)
def wrapper(self):
self.save_parameters()
result = func(self)
if image_was_changed:
self._reset_display_values()
elif channel_was_changed:
self._renew_display_values()
return result
return wrapper
return decorator
@_change_wrapper(True)
def next_image(self):
self.controller.next_image()
@_change_wrapper(True)
def previous_image(self):
self.controller.previous_image()
@_change_wrapper(False)
def next_channel(self):
self.controller.next_channel()
@_change_wrapper(False)
def previous_channel(self):
self.controller.previous_channel()
def display_rgb_image(self):
rgb_image = self.image_set.create_rgb_image()
self.current_image.set_data(rgb_image)
self.next_channel_btn.setEnabled(False)
self.previous_channel_btn.setEnabled(False)
def _undo_display_rgb_image(self):
self.current_image.set_data(self.current_image.data)
if len(self.image_set.current_image) == 3:
self.next_channel_btn.setEnabled(True)
self.previous_channel_btn.setEnabled(True)
if self.channels_window:
self.channels_window.rgb_check_box.setCheckState(
QtCore.Qt.Unchecked)
def switch_rgb(self, state):
"""Display rgb image when rgb box is checked, single band otherwise"""
if state == QtCore.Qt.Checked:
if self.channels_window:
self.channels_window.rgb_check_box.setCheckState(
QtCore.Qt.Checked)
else:
if self.image_set.bands_are_composite:
self.display_rgb_image()
else:
print("Images must be the same size")
print("Use the channels button to select the bands")
self.rgb_check_box.setCheckState(QtCore.Qt.Unchecked)
elif state == QtCore.Qt.Unchecked:
self._undo_display_rgb_image()
if len(self.view_canvas.objects) >= 1:
self._refresh_ROI_text()
if self.view_canvas.get_image() is not None:
self.histogram.set_data()
def _point_is_in_image(self, point):
data_x, data_y = point
height, width = self.current_image.shape[:2]
in_width = data_x >= -0.5 and data_x <= (width + 0.5)
in_height = data_y >= -0.5 and data_y <= (height + 0.5)
return in_width and in_height
def _set_point_in_image(self, point):
data_x, data_y = point
image = self.view_canvas.get_image()
self.controller.new_x_value(data_x)
self.controller.new_y_value(data_y)
x, y = self.image_set.x_value, self.image_set.y_value
self.controller.new_pixel_value(image.get_data_xy(x, y))
def _set_point_out_of_image(self):
x, y = self.view_canvas.get_last_data_xy()
self.controller.new_x_value(x)
self.controller.new_y_value(y)
if self.current_image.ndim == 3:
self.controller.new_pixel_value((0, 0, 0))
elif self.current_image.ndim == 2:
self.controller.new_pixel_value(0)
def set_x_value_text(self):
self.x_value_lbl.setText(self.image_set.x_value_text)
def set_y_value_text(self):
self.y_value_lbl.setText(self.image_set.y_value_text)
def set_pixel_value_text(self):
self.pixel_value_lbl.setText(self.image_set.pixel_value_text)
def display_values(self, view_canvas, button, data_x, data_y):
"Display the x, y, and pixel value when the mouse is pressed and moved"
point = (data_x, data_y)
if self._point_is_in_image(point):
self._set_point_in_image(point)
else:
self._set_point_out_of_image()
def display_label(self):
"""Display the label over the image"""
# Utilizing the sub window variables to check if the label window has
# been opened before. If not, the window is initialized.
if self._label_window is None:
self._label_window = label.LabelView(self)
self._label_window.is_open = True
self._label_window.show()
self._label_window.activateWindow()
def _update_label(self):
# Update label
self.image_label = self.current_image.label
# This checks to see if the label window exists and is open. If so,
# this resets the label field so that the label being displayed is the
# label for the current product. The label does not reset its position.
if self._label_window is not None:
pos = self._label_window.pos()
label_text = '\n'.join(self.image_label)
self._label_window.label_contents.setText(label_text)
if self._label_window.is_open:
self._label_window.cancel()
self._label_window.move(pos)
self._label_window.show()
self._label_window.is_open = True
self._label_window.activateWindow()
def open_file(self):
"""Open a new image file from a file explorer"""
file_name = QtWidgets.QFileDialog()
file_name.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
new_files = file_name.getOpenFileNames(self)[0]
if new_files:
if self.image_set.current_image:
self.save_parameters()
first_new_image = len(self.image_set.images)
self.image_set.append(new_files, first_new_image)
# If there are no new images, don't continue
if first_new_image == len(self.image_set.images):
warnings.warn("The image(s) chosen are not PDS compatible")
return
self.next_image_btn.setEnabled(
self.image_set.next_prev_enabled)
self.previous_image_btn.setEnabled(
self.image_set.next_prev_enabled)
self._display_image()
else:
# integrate with logger
print("No file selected!")
return
def channels_dialog(self):
"""Display the channels dialog box"""
if not self.channels_window:
self.channels_window = ChannelsDialog(ChannelsDialogModel(self))
self.channels_window_is_open = True
if self.channels_window_pos:
self.channels_window.move(self.channels_window_pos)
self.channels_window.show()
def save_parameters(self):
"""Save the view parameters on the image"""
last_image = self.image_set.current_image[self.image_set.channel]
last_image.sarr = self.view_canvas.get_rgbmap().get_sarr()
last_image.zoom = self.view_canvas.get_zoom()
last_image.rotation = self.view_canvas.get_rotation()
last_image.transforms = self.view_canvas.get_transforms()
last_image.cuts = self.view_canvas.get_cut_levels()
def apply_parameters(self, image, view):
"""Display image with the images parameters"""
if image.sarr is None:
pass
else:
view.get_rgbmap().set_sarr(image.sarr)
if image.zoom is None:
pass
else:
view.zoom_to(image.zoom)
if image.rotation is None:
pass
else:
view.rotate(image.rotation)
if image.transforms is None:
pass
else:
flip_x = image.transforms[0]
flip_y = image.transforms[1]
switch_xy = image.transforms[2]
view.transform(flip_x, flip_y, switch_xy)
if image.cuts is None:
pass
else:
loval, hival = image.cuts
view.cut_levels(loval, hival, True)
def restore(self):
"""Restore image to the default settings"""
self.view_canvas.get_rgbmap().reset_sarr()
self.view_canvas.enable_autocuts('on')
self.view_canvas.auto_levels()
self.view_canvas.enable_autocuts('override')
self.view_canvas.rotate(0.0)
# The default transform/rotation of the image will be image specific so
# transform bools will change in the future
self.view_canvas.transform(False, False, False)
self.view_canvas.zoom_fit()
self.histogram.restore()
def start_ROI(self, view_canvas, button, data_x, data_y):
"""Ensure only one Region of Interest (ROI) exists at a time
Note
----
This method is called when the right mouse button is pressed. Even
though the arguments are not used, they are necessary to catch the
right mouse button press event.
Parameters
----------
view_canvas : ImageViewCanvas object
The view that displays the image
button : Qt.RightButton
The right mouse button
data_x : float
The x-value of the location of the right button
data_y : float
The y-value of the location of the right button
"""
if len(view_canvas.objects) > 1:
self.delete_ROI()
def stop_ROI(self, view_canvas, button, data_x, data_y):
"""Create a Region of Interest (ROI)
When drawing stops (release of the right mouse button), the ROI border
snaps to inclusive pixel (see top_right_pixel_snap and
bottom_left_pixel_snap). The ROI's information is set as an attributes
of the current image (see calculate_ROI_info).
Note
----
This method is called when the right mouse button is released. Even
though only the view_canvas argument is used, they are all necessary to
catch the right mouse button release event.
Parameters
----------
See start_ROI parameters
"""
# If there are no draw objects, stop
current_image = self.image_set.current_image[self.image_set.channel]
if len(view_canvas.objects) == 1:
self.set_ROI_text(0, 0, current_image.width, current_image.height)
return
draw_obj = view_canvas.objects[1]
# Retrieve the left, right, top, & bottom x and y values
roi = self.left_right_bottom_top(
draw_obj.x1, draw_obj.x2, draw_obj.y1, draw_obj.y2)
left_x, right_x, bot_y, top_y, x2_is_right, y2_is_top = roi
# Single right click deletes any ROI & sets the whole image as the ROI
if left_x == right_x and bot_y == top_y:
self.set_ROI_text(0, 0, current_image.width, current_image.height)
self.delete_ROI()
return
# Determine if the ROI is outside the image.
max_height = current_image.height
max_width = current_image.width
top_y, top_in_image = self.top_right_pixel_snap(top_y, max_height)
bot_y, bot_in_image = self.bottom_left_pixel_snap(bot_y, max_height)
right_x, right_in_image = self.top_right_pixel_snap(right_x, max_width)
left_x, left_in_image = self.bottom_left_pixel_snap(left_x, max_width)
# If the entire ROI is outside the ROI, delete the ROI and set the ROI
# to the whole image
in_image = any(
(left_in_image, right_in_image, top_in_image, bot_in_image)
)
if not in_image:
self.set_ROI_text(0, 0, current_image.width, current_image.height)
self.delete_ROI()
return
# Snap the ROI to the edge of the image if it is outside the image
if y2_is_top:
draw_obj.y2 = top_y
draw_obj.y1 = bot_y
else:
draw_obj.y1 = top_y
draw_obj.y2 = bot_y
if x2_is_right:
draw_obj.x2 = right_x
draw_obj.x1 = left_x
else:
draw_obj.x1 = right_x
draw_obj.x2 = left_x
# Calculate the ROI information
self.set_ROI_text(left_x, bot_y, right_x, top_y)
def top_right_pixel_snap(self, ROI_side, image_edge):
"""Snaps the top or right side of the ROI to the inclusive pixel
Parameters
----------
ROI_side : float
Either the ROI's top-y or right-x value
image_edge : float
The top or right edge of the image
Returns
-------
ROI_side : float
The x or y value of the right or top ROI side respectively
side_in_image : bool
True if the side is in the image, False otherwise
"""
# If the top/right ROI edge is outside the image, reset the top/right
# ROI side value to the edge
if ROI_side > image_edge:
ROI_side = image_edge + .5
side_in_image = True
# If the right side is to the left of the image or the top side is
# below the image, then the entire ROI is outside the image
elif ROI_side < 0.0:
side_in_image = False
# If the top/right ROI values is inside the image, snap it the edge
# of the inclusive pixel. If the value is already on the edge, pass
else:
if ROI_side - int(ROI_side) == .5:
pass
else:
ROI_side = round(ROI_side) + .5
side_in_image = True
return (ROI_side, side_in_image)
def bottom_left_pixel_snap(self, ROI_side, image_edge):
"""Snaps the bottom or left side of the ROI to the inclusive pixel
Parameters
----------
ROI_side : float
Either the ROI's bottom-y or left-x value
image_edge : float
The top or right edge of the image
Returns
-------
ROI_side : float
The x or y value of the left or bottom ROI side respectively
side_in_image : bool
True if the side is in the image, False otherwise
"""
# If the bottom/left ROI edge is outside the image, reset the
# bottom/left ROI side value to the bottom/left edge (-0.5)
if ROI_side < 0.0:
ROI_side = -0.5
side_in_image = True
# If the left side is to the right of the image or the bottom side is
# above the image, then the entire ROI is outside the image
elif ROI_side > image_edge:
side_in_image = False
# If the bottom/left ROI values is inside the image, snap it the edge
# of the inclusive pixel. If the value is already on the edge, pass
else:
if ROI_side - int(ROI_side) == .5:
pass
else:
ROI_side = round(ROI_side) - 0.5
side_in_image = True
return (ROI_side, side_in_image)
def left_right_bottom_top(self, x1, x2, y1, y2):
"""Determines the values for the left, right, bottom, and top vertices
Parameters
----------
x1 : float
The x-value of where the right cursor was clicked
x2 : float
The x-value of where the right cursor was released
y1 : float
The y-value of where the right cursor was clicked
y2 : float
The y-value of where the right cursor was released
Returns
-------
left_x : float
The x-value of the left side of the ROI
right_x : float
The x-value of the right side of the ROI
bot_y : float
The y-value of the bottom side of the ROI
top_y : float
The y-value of the top side of the ROI
x2_is_right : bool
True if the x2 input is the right side of the ROI, False otherwise
y2_is_top : bool
True if the y2 input is the top side of the ROI, False otherwise
"""
if x2 > x1:
right_x = x2
left_x = x1
x2_is_right = True
else:
right_x = x1
left_x = x2
x2_is_right = False
if y2 > y1:
top_y = y2
bot_y = y1
y2_is_top = True
else:
top_y = y1
bot_y = y2
y2_is_top = False
return (left_x, right_x, bot_y, top_y, x2_is_right, y2_is_top)
def delete_ROI(self):
"""Deletes the Region of Interest"""
try:
self.view_canvas.deleteObject(self.view_canvas.objects[1])
except:
return
def set_ROI_text(self, left, bottom, right, top):
"""Set the text of the ROI information boxes
When the image has three bands (colored), the ROI value boxes will
display the values for each band.
Parameters
----------
left : float
The x coordinate value of the left side of the ROI
bottom : float
The y coordinate value of the bottom side of the ROI
right : float
The x coordinate value of the right side of the ROI
bottom : float
The y coordinate value of the top side of the ROI
"""
data = self.image_set.ROI_data(
left, bottom, right, top)
# Calculate the number of pixels in the ROI
ROI_pixels = self.image_set.ROI_pixels(left, bottom, right, top)
self.pixels.setText('#Pixels: %d' % (ROI_pixels))
if data.ndim == 2:
# 2 band image is a gray scale image
self.set_ROI_gray_text(data)
elif data.ndim == 3:
# Three band image is a RGB colored image
try:
self.set_ROI_RGB_text(data)
except:
# If the ROI does not contain values for each band, treat the
# ROI like a gray scale image
self.set_ROI_gray_text(data)
def set_ROI_gray_text(self, data):
"""Set the values for the ROI in the text boxes for a gray image
Parameters
----------
data : array
The data from the Region of Interest
"""
ROI_std_dev = self.image_set.ROI_std_dev(data=data)
ROI_mean = self.image_set.ROI_mean(data=data)
ROI_median = self.image_set.ROI_median(data=data)
ROI_min = self.image_set.ROI_min(data=data)
ROI_max = self.image_set.ROI_max(data=data)
self.std_dev.setText('Std Dev: %.6f' % (ROI_std_dev))
self.mean.setText('Mean: %.4f' % (ROI_mean))
self.median.setText('Median: %.1f' % (ROI_median))
self.min.setText('Min: %d' % (ROI_min))
self.max.setText('Max: %d' % (ROI_max))
def set_ROI_RGB_text(self, data):
"""Set the values for the ROI in the text boxes for a RGB image
Parameters
----------
data : array
The data from the Region of Interest
"""
calc = self.image_set
ROI_stdev = [calc.ROI_std_dev(data=data[:, :, n]) for n in range(3)]
ROI_mean = [calc.ROI_mean(data=data[:, :, n]) for n in range(3)]
ROI_median = [calc.ROI_median(data=data[:, :, n]) for n in range(3)]
ROI_max = [int(calc.ROI_max(data=data[:, :, n])) for n in range(3)]
ROI_min = [int(calc.ROI_min(data=data[:, :, n])) for n in range(3)]
self.std_dev.setText(
'Std Dev: R: %.6f G: %.6f B: %.6f' % (tuple(ROI_stdev)))
self.mean.setText(
'Mean: R: %s G: %s B: %s' % (tuple(ROI_mean)))
self.median.setText(
'Median: R: %s G: %s B: %s' % (tuple(ROI_median)))
self.max.setText(
'Max: R: %s G: %s B: %s' % (tuple(ROI_max)))
self.min.setText(
'Min: R: %s G: %s B: %s' % (tuple(ROI_min)))
def drop_file(self, pdsimage, paths):
"""This function is not yet supported"""
# file_name = paths[0]
# self.load_file(file_name)
pass
def quit(self, *args):
"""Close pdsview"""
if self._label_window is not None:
self._label_window.cancel()
if self.channels_window:
self.channels_window.hide()
self.close()
def pdsview(inlist=None):
"""Run pdsview from python shell or command line with arguments
Parameters
----------
inlist : list
A list of file names/paths to display in the pdsview
Examples
--------
From the command line:
To view all images from current directory
pdsview
To view all images in a different directory
pdsview path/to/different/directory/
This is the same as:
pdsview path/to/different/directory/*
To view a specific image or types of images
pdsview 1p*img
To view images from multiple directories:
pdsview * path/to/other/directory/
From the (i)python command line:
>>> from pdsview.pdsview import pdsview
>>> pdsview()
Displays all of the images from current directory
>>> pdsview('path/to/different/directory')
Displays all of the images in the different directory
>>> pdsview ('1p*img')
Displays all of the images that follow the glob pattern
>>> pdsview ('a1.img, b*.img, example/path/x*img')
You can display multiple images, globs, and paths in one window by
separating each item by a command
>>> pdsview (['a1.img, b3.img, c1.img, d*img'])
You can also pass in a list of files/globs
"""
files = []
if isinstance(inlist, list):
if inlist:
for item in inlist:
files += arg_parser(item)
else:
files = glob('*')
elif isinstance(inlist, str):
names = inlist.split(',')
for name in names:
files = files + arg_parser(name.strip())
elif inlist is None:
files = glob('*')
image_set = ImageSet(files)
w = PDSViewer(image_set)
w.resize(780, 770)
w.show()
w.view_canvas.zoom_fit()
app.setActiveWindow(w)
sys.exit(app.exec_())
def arg_parser(args):
if os.path.isdir(args):
files = glob(os.path.join('%s' % (args), '*'))
elif args:
files = glob(args)
else:
files = glob('*')
return files
def cli():
"""Give pdsview ability to run from command line"""
parser = argparse.ArgumentParser()
parser.add_argument(
'file', nargs='*',
help="Input filename or glob for files with certain extensions"
)
args = parser.parse_args()
pdsview(args.file)
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General IAM utilities used by the Cloud SDK."""
import httplib
import json
from apitools.base.protorpclite import messages as apitools_messages
from apitools.base.py import encoding
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions as gcloud_exceptions
from googlecloudsdk.core import apis as core_apis
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import yaml
msgs = core_apis.GetMessagesModule('iam', 'v1')
MANAGED_BY = (msgs.IamProjectsServiceAccountsKeysListRequest
.KeyTypesValueValuesEnum)
CREATE_KEY_TYPES = (msgs.CreateServiceAccountKeyRequest
.PrivateKeyTypeValueValuesEnum)
KEY_TYPES = (msgs.ServiceAccountKey.PrivateKeyTypeValueValuesEnum)
PUBLIC_KEY_TYPES = (
msgs.IamProjectsServiceAccountsKeysGetRequest.PublicKeyTypeValueValuesEnum)
class IamEtagReadError(core_exceptions.Error):
"""IamEtagReadError is raised when etag is badly formatted."""
def _AddRoleArgument(
parser, help_text, completion_resource_arg, completion_resource_collection):
"""Helper function to add the --role flag with remote completion."""
def CompletionCallback(parsed_args):
resource_ref = resources.REGISTRY.Parse(
getattr(parsed_args, completion_resource_arg),
collection=completion_resource_collection)
resource_uri = resource_ref.SelfLink()
return ['beta', 'iam', 'list-grantable-roles', '--format=value(name)',
resource_uri]
have_completion = (completion_resource_arg and completion_resource_collection)
parser.add_argument(
'--role', required=True,
completion_resource='iam.roles' if have_completion else None,
list_command_callback_fn=CompletionCallback if have_completion else None,
help=help_text)
def AddArgsForAddIamPolicyBinding(
parser, completion_resource_arg=None, completion_resource_collection=None):
"""Adds the IAM policy binding arguments for role and members.
Args:
parser: An argparse.ArgumentParser-like object to which we add the argss.
completion_resource_arg: str, Name of the argument that holds the resource
upon which the policy is applied to.
completion_resource_collection: str, Collection of the resource.
completion_resource_arg and completion_resource_collection are optional,
but role tab completion is not possible without specifying them.
Raises:
ArgumentError if one of the arguments is already defined in the parser.
"""
_AddRoleArgument(parser, 'Define the role of the member.',
completion_resource_arg, completion_resource_collection)
parser.add_argument(
'--member', required=True,
help='The member to add to the binding.')
def AddArgsForRemoveIamPolicyBinding(
parser, completion_resource_arg=None, completion_resource_collection=None):
"""Adds the IAM policy binding arguments for role and members.
Args:
parser: An argparse.ArgumentParser-like object to which we add the argss.
completion_resource_arg: str, Name of the argument that hold the resource
upon which the policy is applied to.
completion_resource_collection: str, Collection of the resource.
completion_resource_arg and completion_resource_collection are optional,
but role tab completion is not possible without specifying them.
Raises:
ArgumentError if one of the arguments is already defined in the parser.
"""
_AddRoleArgument(parser, 'The role to remove the member from.',
completion_resource_arg, completion_resource_collection)
parser.add_argument(
'--member', required=True,
help='The member to remove from the binding.')
def AddBindingToIamPolicy(messages, policy, member, role):
"""Given an IAM policy, add new bindings as specified by args.
An IAM binding is a pair of role and member. Check if the arguments passed
define both the role and member attribute, create a binding out of their
values, and append it to the policy.
Args:
messages: ToolResults API message classes generated by apitools.
Required to create new bindings of the proper type.
policy: IAM policy to which we want to add the bindings.
member: The member to add to IAM policy.
role: The role the member should have.
"""
# First check all bindings to see if the member is already in a binding with
# the same role.
# A policy can have multiple bindings with the same role. This is why we need
# to explicitly do this as a separate, first, step and check all bindings.
for binding in policy.bindings:
if binding.role == role:
if member in binding.members:
return # Nothing to do. Member already has the role.
# Second step: check to see if a binding already exists with the same role and
# add the member to this binding. This is to not create new bindings with
# the same role.
for binding in policy.bindings:
if binding.role == role:
binding.members.append(member)
return
# Third step: no binding was found that has the same role. Create a new one.
policy.bindings.append(messages.Binding(
members=[member], role='{0}'.format(role)))
def RemoveBindingFromIamPolicy(policy, member, role):
"""Given an IAM policy, add remove bindings as specified by the args.
An IAM binding is a pair of role and member. Check if the arguments passed
define both the role and member attribute, search the policy for a binding
that contains this role and member, and remove it from the policy.
Args:
policy: IAM policy from which we want to remove bindings.
member: The member to remove from the IAM policy.
role: The role the member should be removed from.
"""
# First, remove the member from any binding that has the given role.
# A server policy can have duplicates.
for binding in policy.bindings:
if binding.role == role and member in binding.members:
binding.members.remove(member)
# Second, remove any empty bindings.
policy.bindings[:] = [b for b in policy.bindings if b.members]
def ParsePolicyFile(policy_file_path, policy_message_type):
"""Construct an IAM Policy protorpc.Message from a JSON or YAML formated file.
Args:
policy_file_path: Path to the JSON or YAML IAM policy file.
policy_message_type: Policy message type to convert JSON or YAML to.
Returns:
a protorpc.Message of type policy_message_type filled in from the JSON or
YAML policy file.
Raises:
BadFileException if the JSON or YAML file is malformed.
"""
try:
policy = ParseJsonPolicyFile(policy_file_path, policy_message_type)
except gcloud_exceptions.BadFileException:
try:
policy = ParseYamlPolicyFile(policy_file_path, policy_message_type)
except gcloud_exceptions.BadFileException:
raise gcloud_exceptions.BadFileException(
'Policy file {0} is not a properly formatted JSON or YAML policy file'
'.'.format(policy_file_path))
if not policy.etag:
msg = ('The specified policy does not contain an "etag" field '
'identifying a specific version to replace. Changing a '
'policy without an "etag" can overwrite concurrent policy '
'changes.')
console_io.PromptContinue(
message=msg, prompt_string='Replace existing policy', cancel_on_no=True)
return policy
def ParseJsonPolicyFile(policy_file_path, policy_message_type):
"""Construct an IAM Policy protorpc.Message from a JSON formated file.
Args:
policy_file_path: Path to the JSON IAM policy file.
policy_message_type: Policy message type to convert JSON to.
Returns:
a protorpc.Message of type policy_message_type filled in from the JSON
policy file.
Raises:
BadFileException if the JSON file is malformed.
IamEtagReadError if the etag is badly formatted.
"""
try:
with open(policy_file_path) as policy_file:
policy_json = policy_file.read()
except EnvironmentError:
# EnvironmnetError is parent of IOError, OSError and WindowsError.
# Raised when file does not exist or can't be opened/read.
raise core_exceptions.Error(
'Unable to read policy file {0}'.format(policy_file_path))
try:
policy = encoding.JsonToMessage(policy_message_type, policy_json)
except (ValueError) as e:
# ValueError is raised when JSON is badly formatted
raise gcloud_exceptions.BadFileException(
'Policy file {0} is not a properly formatted JSON policy file. {1}'
.format(policy_file_path, str(e)))
except (apitools_messages.DecodeError) as e:
# DecodeError is raised when etag is badly formatted (not proper Base64)
raise IamEtagReadError(
'The etag of policy file {0} is not properly formatted. {1}'
.format(policy_file_path, str(e)))
return policy
def ParseYamlPolicyFile(policy_file_path, policy_message_type):
"""Construct an IAM Policy protorpc.Message from a YAML formatted file.
Args:
policy_file_path: Path to the YAML IAM policy file.
policy_message_type: Policy message type to convert YAML to.
Returns:
a protorpc.Message of type policy_message_type filled in from the YAML
policy file.
Raises:
BadFileException if the YAML file is malformed.
IamEtagReadError if the etag is badly formatted.
"""
try:
with open(policy_file_path) as policy_file:
policy_to_parse = yaml.safe_load(policy_file)
except EnvironmentError:
# EnvironmnetError is parent of IOError, OSError and WindowsError.
# Raised when file does not exist or can't be opened/read.
raise core_exceptions.Error('Unable to read policy file {0}'.format(
policy_file_path))
except (yaml.scanner.ScannerError, yaml.parser.ParserError) as e:
# Raised when the YAML file is not properly formatted.
raise gcloud_exceptions.BadFileException(
'Policy file {0} is not a properly formatted YAML policy file. {1}'
.format(policy_file_path, str(e)))
try:
policy = encoding.PyValueToMessage(policy_message_type, policy_to_parse)
except (AttributeError) as e:
# Raised when the YAML file is not properly formatted YAML policy file.
raise gcloud_exceptions.BadFileException(
'Policy file {0} is not a properly formatted YAML policy file. {1}'
.format(policy_file_path, str(e)))
except (apitools_messages.DecodeError) as e:
# DecodeError is raised when etag is badly formatted (not proper Base64)
raise IamEtagReadError(
'The etag of policy file {0} is not properly formatted. {1}'
.format(policy_file_path, str(e)))
return policy
def GetDetailedHelpForSetIamPolicy(collection, example_id, example_see_more=''):
"""Returns a detailed_help for a set-iam-policy command.
Args:
collection: Name of the command collection (ex: "project", "dataset")
example_id: Collection identifier to display in a sample command
(ex: "my-project", '1234')
example_see_more: Optional "See ... for details" message. If not specified,
includes a default reference to IAM managing-policies documentation
Returns:
a dict with boilerplate help text for the set-iam-policy command
"""
if not example_see_more:
example_see_more = """
See https://cloud.google.com/iam/docs/managing-policies for details
of the policy file format and contents."""
return {
'brief': 'Set IAM policy for a {0}.'.format(collection),
'DESCRIPTION': '{description}',
'EXAMPLES': """\
The following command will read an IAM policy defined in a JSON file
'policy.json' and set it for a {0} with identifier '{1}'
$ {{command}} {1} policy.json
{2}""".format(collection, example_id, example_see_more)
}
def GetDetailedHelpForAddIamPolicyBinding(collection, example_id):
"""Returns a detailed_help for an add-iam-policy-binding command.
Args:
collection: Name of the command collection (ex: "project", "dataset")
example_id: Collection identifier to display in a sample command
(ex: "my-project", '1234')
Returns:
a dict with boilerplate help text for the add-iam-policy-binding command
"""
return {
'brief': 'Add IAM policy binding for a {0}.'.format(collection),
'DESCRIPTION': '{description}',
'EXAMPLES': """\
The following command will add an IAM policy binding for the role
of 'roles/editor' for the user 'test-user@gmail.com' on a {0} with
identifier '{1}'
$ {{command}} {1} --member='user:test-user@gmail.com' --role='roles/editor'
See https://cloud.google.com/iam/docs/managing-policies for details
of policy role and member types.
""".format(collection, example_id)
}
def GetDetailedHelpForRemoveIamPolicyBinding(collection, example_id):
"""Returns a detailed_help for a remove-iam-policy-binding command.
Args:
collection: Name of the command collection (ex: "project", "dataset")
example_id: Collection identifier to display in a sample command
(ex: "my-project", '1234')
Returns:
a dict with boilerplate help text for the remove-iam-policy-binding command
"""
return {
'brief': 'Remove IAM policy binding for a {0}.'.format(collection),
'DESCRIPTION': '{description}',
'EXAMPLES': """\
The following command will remove a IAM policy binding for the role
of 'roles/editor' for the user 'test-user@gmail.com' on {0} with
identifier '{1}'
$ {{command}} {1} --member='user:test-user@gmail.com' --role='roles/editor'
See https://cloud.google.com/iam/docs/managing-policies for details
of policy role and member types.
""".format(collection, example_id)
}
def ManagedByFromString(managed_by):
"""Parses a string into a MANAGED_BY enum.
MANAGED_BY is an enum of who manages a service account key resource. IAM
will rotate any SYSTEM_MANAGED keys by default.
Args:
managed_by: A string representation of a MANAGED_BY. Can be one of *user*,
*system* or *any*.
Returns:
A KeyTypeValueValuesEnum (MANAGED_BY) value.
"""
if managed_by == 'user':
return [MANAGED_BY.USER_MANAGED]
elif managed_by == 'system':
return [MANAGED_BY.SYSTEM_MANAGED]
elif managed_by == 'any':
return []
else:
return [MANAGED_BY.KEY_TYPE_UNSPECIFIED]
def KeyTypeFromString(key_str):
"""Parses a string into a KeyType enum.
Args:
key_str: A string representation of a KeyType. Can be either *p12* or
*json*.
Returns:
A PrivateKeyTypeValueValuesEnum value.
"""
if key_str == 'p12':
return KEY_TYPES.TYPE_PKCS12_FILE
elif key_str == 'json':
return KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE
else:
return KEY_TYPES.TYPE_UNSPECIFIED
def KeyTypeToString(key_type):
"""Get a string version of a KeyType enum.
Args:
key_type: An enum of either KEY_TYPES or CREATE_KEY_TYPES.
Returns:
The string representation of the key_type, such that
parseKeyType(keyTypeToString(x)) is a no-op.
"""
if (key_type == KEY_TYPES.TYPE_PKCS12_FILE or
key_type == CREATE_KEY_TYPES.TYPE_PKCS12_FILE):
return 'p12'
elif (key_type == KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE or
key_type == CREATE_KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE):
return 'json'
else:
return 'unspecified'
def KeyTypeToCreateKeyType(key_type):
"""Transforms between instances of KeyType enums.
Transforms KeyTypes into CreateKeyTypes.
Args:
key_type: A ServiceAccountKey.PrivateKeyTypeValueValuesEnum value.
Returns:
A IamProjectsServiceAccountKeysCreateRequest.PrivateKeyTypeValueValuesEnum
value.
"""
# For some stupid reason, HTTP requests generates different enum types for
# each instance of an enum in the proto buffer. What's worse is that they're
# not equal to one another.
if key_type == KEY_TYPES.TYPE_PKCS12_FILE:
return CREATE_KEY_TYPES.TYPE_PKCS12_FILE
elif key_type == KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE:
return CREATE_KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE
else:
return CREATE_KEY_TYPES.TYPE_UNSPECIFIED
def KeyTypeFromCreateKeyType(key_type):
"""The inverse of *toCreateKeyType*."""
if key_type == CREATE_KEY_TYPES.TYPE_PKCS12_FILE:
return KEY_TYPES.TYPE_PKCS12_FILE
elif key_type == CREATE_KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE:
return KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE
else:
return KEY_TYPES.TYPE_UNSPECIFIED
class IAMServiceAccountException(core_exceptions.Error):
"""An exception for IAM service account related errors."""
def __init__(self, status_msg, address, key_id=None):
error_msg = status_msg
if key_id:
error_msg = '{0}: key [{1}] for service account [{2}]'.format(
error_msg, key_id, address)
elif address:
error_msg = '{0}: service account [{1}]'.format(error_msg, address)
super(IAMServiceAccountException, self).__init__(error_msg)
def ConvertToServiceAccountException(http_error, address, key_id=None):
"""Convert HTTP error to IAM specific exception, based on the status code."""
error_msg = None
if http_error.status_code == httplib.NOT_FOUND:
error_msg = 'Not found'
elif http_error.status_code == httplib.FORBIDDEN:
error_msg = 'Permission denied'
elif http_error.status_code == httplib.BAD_REQUEST:
# If the request is not valid then simply report the error message.
# In this case, the service account email (the 'address' variable)
# should not be included in the error message.
content = json.loads(http_error.content)
# The variable 'error' is the content of the error message, as can be
# seen by using the --log-http flag, in JSON format.
error = content.get('error', {})
error_msg = error.get('message', 0)
address = None
elif http_error.status_code == httplib.CONFLICT:
return http_error # Let it retry.
if error_msg:
return IAMServiceAccountException(error_msg, address, key_id)
# TODO(user): Add a test for this exception type.
return gcloud_exceptions.ToolException.FromCurrent()
def AccountNameValidator():
# https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/create
return arg_parsers.RegexpValidator(
r'[a-z][a-z0-9\-]{4,28}[a-z0-9]',
'Service account name must be between 6 and 30 characters (inclusive), '
'must begin with a lowercase letter, and consist of alphanumeric '
'characters that can be separated by hyphens.')
def ProjectToProjectResourceName(project):
"""Turns a project id into a project resource name."""
return 'projects/{0}'.format(project)
def EmailToAccountResourceName(email):
"""Turns an email into a service account resource name."""
return 'projects/-/serviceAccounts/{0}'.format(email)
def EmailAndKeyToResourceName(email, key):
"""Turns an email and key id into a key resource name."""
return 'projects/-/serviceAccounts/{0}/keys/{1}'.format(email, key)
def GetKeyIdFromResourceName(name):
"""Gets the key id from a resource name. No validation is done."""
return name.split('/')[5]
def PublicKeyTypeFromString(key_str):
"""Parses a string into a PublicKeyType enum.
Args:
key_str: A string representation of a PublicKeyType. Can be either *pem* or
*raw*.
Returns:
A PublicKeyTypeValueValuesEnum value.
"""
if key_str == 'pem':
return PUBLIC_KEY_TYPES.TYPE_X509_PEM_FILE
return PUBLIC_KEY_TYPES.TYPE_RAW_PUBLIC_KEY
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes a generator of values, and accumulates them for a frontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
from tensorflow.core.framework import graph_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.platform import gfile
from tensorflow.python.platform import logging
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import event_file_loader
from tensorflow.python.summary.impl import reservoir
namedtuple = collections.namedtuple
ScalarEvent = namedtuple('ScalarEvent',
['wall_time', 'step', 'value'])
CompressedHistogramEvent = namedtuple('CompressedHistogramEvent',
['wall_time', 'step',
'compressed_histogram_values'])
CompressedHistogramValue = namedtuple('CompressedHistogramValue',
['basis_point', 'value'])
HistogramEvent = namedtuple('HistogramEvent',
['wall_time', 'step', 'histogram_value'])
HistogramValue = namedtuple('HistogramValue',
['min', 'max', 'num', 'sum', 'sum_squares',
'bucket_limit', 'bucket'])
ImageEvent = namedtuple('ImageEvent',
['wall_time', 'step', 'encoded_image_string',
'width', 'height'])
## Different types of summary events handled by the event_accumulator
SUMMARY_TYPES = ('_scalars', '_histograms', '_compressed_histograms', '_images')
## The tagTypes below are just arbitrary strings chosen to pass the type
## information of the tag from the backend to the frontend
COMPRESSED_HISTOGRAMS = 'compressedHistograms'
HISTOGRAMS = 'histograms'
IMAGES = 'images'
SCALARS = 'scalars'
GRAPH = 'graph'
## Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)
## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,
## and then the long tail.
NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
DEFAULT_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 500,
IMAGES: 4,
SCALARS: 10000,
HISTOGRAMS: 1,
}
STORE_EVERYTHING_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 0,
IMAGES: 0,
SCALARS: 0,
HISTOGRAMS: 0,
}
def IsTensorFlowEventsFile(path):
"""Check the path name to see if it is probably a TF Events file."""
return 'tfevents' in path
class EventAccumulator(object):
"""An `EventAccumulator` takes an event generator, and accumulates the values.
The `EventAccumulator` is intended to provide a convenient Python interface
for loading Event data written during a TensorFlow run. TensorFlow writes out
`Event` protobuf objects, which have a timestamp and step number, and often
contain a `Summary`. Summaries can have different kinds of data like an image,
a scalar value, or a histogram. The Summaries also have a tag, which we use to
organize logically related data. The `EventAccumulator` supports retrieving
the `Event` and `Summary` data by its tag.
Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,
`'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those
data types. Then, various functional endpoints (eg
`Accumulator.Scalars(tag)`) allow for the retrieval of all data
associated with that tag.
Before usage, the `EventAccumulator` must be activated via `Reload()`. This
method synchronosly loads all of the data written so far.
Histograms and images are very large, so storing all of them is not
recommended.
@@Reload
@@Tags
@@Scalars
@@Graph
@@Histograms
@@CompressedHistograms
@@Images
"""
def __init__(self, path, size_guidance=DEFAULT_SIZE_GUIDANCE,
compression_bps=NORMAL_HISTOGRAM_BPS):
"""Construct the `EventAccumulator`.
Args:
path: A file path to a directory containing tf events files, or a single
tf events file. The accumulator will load events from this path.
size_guidance: Information on how much data the EventAccumulator should
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
so as to avoid OOMing the client. The size_guidance should be a map
from a `tagType` string to an integer representing the number of
items to keep per tag for items of that `tagType`. If the size is 0,
all events are stored.
compression_bps: Information on how the `EventAccumulator` should compress
histogram data for the `CompressedHistograms` tag (for details see
`ProcessCompressedHistogram`).
"""
sizes = {}
for key in DEFAULT_SIZE_GUIDANCE:
if key in size_guidance:
sizes[key] = size_guidance[key]
else:
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
self._scalars = reservoir.Reservoir(size=sizes[SCALARS])
self._graph = None
self._histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])
self._compressed_histograms = reservoir.Reservoir(
size=sizes[COMPRESSED_HISTOGRAMS])
self._images = reservoir.Reservoir(size=sizes[IMAGES])
self._generator_mutex = threading.Lock()
self._generator = _GeneratorFromPath(path)
self._activated = False
self._compression_bps = compression_bps
self.most_recent_step = -1
self.most_recent_wall_time = -1
self.file_version = None
def Reload(self):
"""Loads all events added since the last call to `Reload`.
If `Reload` was never called, loads all events in the file.
Calling `Reload` activates the `EventAccumulator`.
Returns:
The `EventAccumulator`.
"""
self._activated = True
with self._generator_mutex:
for event in self._generator.Load():
if event.HasField('file_version'):
new_file_version = _ParseFileVersion(event.file_version)
if self.file_version and self.file_version != new_file_version:
## This should not happen.
logging.warn(('Found new file_version for event.proto. This will '
'affect purging logic for TensorFlow restarts. '
'Old: {0} New: {1}').format(self.file_version,
new_file_version))
self.file_version = new_file_version
## Check if the event happened after a crash, and purge expired tags.
if self.file_version and self.file_version >= 2:
## If the file_version is recent enough, use the SessionLog enum
## to check for restarts.
self._CheckForRestartAndMaybePurge(event)
else:
## If there is no file version, default to old logic of checking for
## out of order steps.
self._CheckForOutOfOrderStepAndMaybePurge(event)
## Process the event
if event.HasField('graph_def'):
if self._graph is not None:
logging.warn(('Found more than one graph event per run.'
'Overwritting the graph with the newest event.'))
self._graph = event.graph_def
elif event.HasField('summary'):
for value in event.summary.value:
if value.HasField('simple_value'):
self._ProcessScalar(value.tag, event.wall_time, event.step,
value.simple_value)
elif value.HasField('histo'):
self._ProcessHistogram(value.tag, event.wall_time, event.step,
value.histo)
self._ProcessCompressedHistogram(value.tag, event.wall_time,
event.step, value.histo)
elif value.HasField('image'):
self._ProcessImage(value.tag, event.wall_time, event.step,
value.image)
return self
def Tags(self):
"""Return all tags found in the value stream.
Raises:
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
A `{tagType: ['list', 'of', 'tags']}` dictionary.
"""
self._VerifyActivated()
return {IMAGES: self._images.Keys(),
HISTOGRAMS: self._histograms.Keys(),
SCALARS: self._scalars.Keys(),
COMPRESSED_HISTOGRAMS: self._compressed_histograms.Keys(),
GRAPH: self._graph is not None}
def Scalars(self, tag):
"""Given a summary tag, return all associated `ScalarEvent`s.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `ScalarEvent`s.
"""
self._VerifyActivated()
return self._scalars.Items(tag)
def Graph(self):
"""Return the graph definition, if there is one.
Raises:
ValueError: If there is no graph for this run.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
The `graph_def` proto.
"""
self._VerifyActivated()
if self._graph is None:
raise ValueError('There is no graph in this EventAccumulator')
graph = graph_pb2.GraphDef()
graph.ParseFromString(self._graph)
return graph
def Histograms(self, tag):
"""Given a summary tag, return all associated histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `HistogramEvent`s.
"""
self._VerifyActivated()
return self._histograms.Items(tag)
def CompressedHistograms(self, tag):
"""Given a summary tag, return all associated compressed histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `CompressedHistogramEvent`s.
"""
self._VerifyActivated()
return self._compressed_histograms.Items(tag)
def Images(self, tag):
"""Given a summary tag, return all associated images.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `ImageEvent`s.
"""
self._VerifyActivated()
return self._images.Items(tag)
def _CheckForRestartAndMaybePurge(self, event):
"""Check and discard expired events using SessionLog.START.
Check for a SessionLog.START event and purge all previously seen events
with larger steps, because they are out of date. Because of supervisor
threading, it is possible that this logic will cause the first few event
messages to be discarded since supervisor threading does not guarantee
that the START message is deterministically written first.
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
can inadvertently discard events due to supervisor threading.
Args:
event: The event to use as reference. If the event is a START event, all
previously seen events with a greater event.step will be purged.
"""
if event.HasField(
'session_log') and event.session_log.status == SessionLog.START:
self._Purge(event, by_tags=False)
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
"""Check for out-of-order event.step and discard expired events for tags.
Check if the event is out of order relative to the global most recent step.
If it is, purge outdated summaries for tags that the event contains.
Args:
event: The event to use as reference. If the event is out-of-order, all
events with the same tags, but with a greater event.step will be purged.
"""
if event.step < self.most_recent_step and event.HasField('summary'):
self._Purge(event, by_tags=True)
else:
self.most_recent_step = event.step
self.most_recent_wall_time = event.wall_time
def _Percentile(self, compression_bps, bucket_limit, cumsum_weights,
histo_min, histo_max, histo_num):
"""Linearly interpolates a histogram weight for a particular basis point.
Uses clamping methods on `histo_min` and `histo_max` to produce tight
linear estimates of the histogram weight at a particular basis point.
Args:
compression_bps: The desired basis point at which to estimate the weight
bucket_limit: An array of the RHS histogram bucket limits
cumsum_weights: A cumulative sum of the fraction of weights in each
histogram bucket, represented in basis points.
histo_min: The minimum weight observed in the weight histogram
histo_max: The maximum weight observed in the weight histogram
histo_num: The number of items in the weight histogram
Returns:
A linearly interpolated value of the histogram weight estimate.
"""
if histo_num == 0: return 0
for i, cumsum in enumerate(cumsum_weights):
if cumsum >= compression_bps:
cumsum_prev = cumsum_weights[i-1] if i > 0 else 0
# Prevent cumsum = 0, cumsum_prev = 0, lerp divide by zero.
if cumsum == cumsum_prev: continue
# Calculate the lower bound of interpolation
lhs = bucket_limit[i-1] if (i > 0 and cumsum_prev > 0) else histo_min
lhs = max(lhs, histo_min)
# Calculate the upper bound of interpolation
rhs = bucket_limit[i]
rhs = min(rhs, histo_max)
weight = _Remap(compression_bps, cumsum_prev, cumsum, lhs, rhs)
return weight
## We have not exceeded cumsum, so return the max observed.
return histo_max
def _ProcessCompressedHistogram(self, tag, wall_time, step, histo):
"""Processes a histogram by adding a compression to accumulated state.
Adds a compressed histogram by linearly interpolating histogram buckets to
represent the histogram weight at multiple compression points. Uses
self._compression_bps (passed to EventAccumulator constructor) as the
compression points (represented in basis points, 1/100ths of a precent).
Args:
tag: A string name of the tag for which histograms are retrieved.
wall_time: Time in seconds since epoch
step: Number of steps that have passed
histo: proto2 histogram Object
"""
def _CumulativeSum(arr):
return [sum(arr[:i+1]) for i in range(len(arr))]
# Convert from proto repeated field into a Python list.
bucket = list(histo.bucket)
bucket_limit = list(histo.bucket_limit)
bucket_total = sum(bucket)
if bucket_total == 0:
bucket_total = 1
fraction_weights = [10000 * x / bucket_total for x in bucket]
cumsum_weights = _CumulativeSum(fraction_weights)
percentiles = [
self._Percentile(bps, bucket_limit, cumsum_weights, histo.min,
histo.max, histo.num) for bps in self._compression_bps
]
compressed_histogram_values = [CompressedHistogramValue(
basis_point=bps,
value=value) for bps, value in zip(self._compression_bps, percentiles)]
histogram_event = CompressedHistogramEvent(
wall_time=wall_time,
step=step,
compressed_histogram_values=compressed_histogram_values)
self._compressed_histograms.AddItem(tag, histogram_event)
def _ProcessHistogram(self, tag, wall_time, step, histo):
"""Processes a histogram by adding it to accumulated state."""
histogram_value = HistogramValue(min=histo.min,
max=histo.max,
num=histo.num,
sum=histo.sum,
sum_squares=histo.sum_squares,
# Convert from proto repeated to list.
bucket_limit=list(histo.bucket_limit),
bucket=list(histo.bucket),)
histogram_event = HistogramEvent(wall_time=wall_time,
step=step,
histogram_value=histogram_value,)
self._histograms.AddItem(tag, histogram_event)
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(
wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height
)
self._images.AddItem(tag, event)
def _ProcessScalar(self, tag, wall_time, step, scalar):
"""Processes a simple value by adding it to accumulated state."""
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
self._scalars.AddItem(tag, sv)
def _Purge(self, event, by_tags):
"""Purge all events that have occurred after the given event.step.
If by_tags is True, purge all events that occurred after the given
event.step, but only for the tags that the event has. Non-sequential
event.steps suggest that a Tensorflow restart occured, and we discard
the out-of-order events to display a consistent view in TensorBoard.
Discarding by tags is the safer method, when we are unsure whether a restart
has occured, given that threading in supervisor can cause events of
different tags to arrive with unsynchronized step values.
If by_tags is False, then purge all events with event.step greater than the
given event.step. This can be used when we are certain that a TensorFlow
restart has occurred and these events can be discarded.
Args:
event: The event to use as reference for the purge. All events with
the same tags, but with a greater event.step will be purged.
by_tags: Bool to dictate whether to discard all out-of-order events or
only those that are associated with the given reference event.
"""
## Keep data in reservoirs that has a step less than event.step
_NotExpired = lambda x: x.step < event.step
if by_tags:
def _ExpiredPerTag(value):
return [getattr(self, x).FilterItems(_NotExpired, value.tag)
for x in SUMMARY_TYPES]
expired_per_tags = [_ExpiredPerTag(value)
for value in event.summary.value]
expired_per_type = [sum(x) for x in zip(*expired_per_tags)]
else:
expired_per_type = [getattr(self, x).FilterItems(_NotExpired)
for x in SUMMARY_TYPES]
if sum(expired_per_type) > 0:
purge_msg = _GetPurgeMessage(self.most_recent_step,
self.most_recent_wall_time, event.step,
event.wall_time, *expired_per_type)
logging.warn(purge_msg)
def _VerifyActivated(self):
if not self._activated:
raise RuntimeError('Accumulator must be activated before it may be used.')
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,
event_wall_time, num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images):
"""Return the string message associated with TensorBoard purges."""
return ('Detected out of order event.step likely caused by '
'a TensorFlow restart. Purging expired events from Tensorboard'
' display between the previous step: {} (timestamp: {}) and '
'current step: {} (timestamp: {}). Removing {} scalars, {} '
'histograms, {} compressed histograms, and {} images.').format(
most_recent_step, most_recent_wall_time, event_step,
event_wall_time, num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images)
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
loader_factory = event_file_loader.EventFileLoader
if gfile.IsDirectory(path):
return directory_watcher.DirectoryWatcher(path, loader_factory,
IsTensorFlowEventsFile)
else:
return loader_factory(path)
def _ParseFileVersion(file_version):
"""Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
"""
tokens = file_version.split('brain.Event:')
try:
return float(tokens[-1])
except ValueError:
## This should never happen according to the definition of file_version
## specified in event.proto.
logging.warn(('Invalid event.proto file_version. Defaulting to use of '
'out-of-order event.step logic for purging expired events.'))
return -1
def _Remap(x, x0, x1, y0, y1):
"""Linearly map from [x0, x1] unto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0) / (x1 - x0)
| |
"""Unit tests for the Travis CI integration."""
from __future__ import unicode_literals
import json
from django.core.urlresolvers import reverse
from django.utils.six.moves.urllib.error import HTTPError
from djblets.conditions import ConditionSet, Condition
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.reviews.conditions import ReviewRequestRepositoriesChoice
from reviewboard.reviews.models import StatusUpdate
from reviewboard.reviews.signals import status_update_request_run
from rbintegrations.travisci.api import TravisAPI
from rbintegrations.travisci.forms import TravisCIIntegrationConfigForm
from rbintegrations.travisci.integration import TravisCIIntegration
from rbintegrations.travisci.views import TravisCIWebHookView
from rbintegrations.testing.testcases import IntegrationTestCase
try:
# Review Board >= 4.0
from reviewboard.hostingsvcs.service import (HostingServiceHTTPRequest,
HostingServiceHTTPResponse)
except ImportError:
# Review Board < 4.0
HostingServiceHTTPRequest = None
HostingServiceHTTPResponse = None
class BaseTravisCITestCase(IntegrationTestCase):
"""Base class for Travis CI tests."""
integration_cls = TravisCIIntegration
fixtures = ['test_scmtools', 'test_users']
def _create_repository(self, github=True, repository_plan='public-org'):
"""Create and return a repository for testing.
Args:
github (bool, optional):
Whether the repository should use the GitHub hosting service.
repository_plan (unicode, optional):
The type of GitHub repository plan.
Returns:
reviewboard.scmtools.models.Repository:
A repository for use in unit tests.
"""
if github:
account = HostingServiceAccount(service_name='github',
username='myuser')
# Review Board <= 3.0.17.
def _http_post_authorize(self, *args, **kwargs):
return json.dumps({
'id': 1,
'url': 'https://api.github.com/authorizations/1',
'scopes': ['user', 'repo'],
'token': 'abc123',
'note': '',
'note_url': '',
'updated_at': '2012-05-04T03:30:00Z',
'created_at': '2012-05-04T03:30:00Z',
}).encode('utf-8'), {}
# Review Board >= 3.0.18.
def _http_get_user(_self, url, *args, **kwargs):
self.assertEqual(url, 'https://api.github.com/user')
payload = b'{}'
headers = {
str('X-OAuth-Scopes'): str('admin:repo_hook, repo, user'),
}
if HostingServiceHTTPResponse is not None:
# Review Board >= 4.0
return HostingServiceHTTPResponse(
request=HostingServiceHTTPRequest(url=url),
url=url,
data=payload,
headers=headers,
status_code=200)
else:
# Review Board < 4.0
return payload, headers
service = account.service
self.spy_on(service.client.http_post,
call_fake=_http_post_authorize)
self.spy_on(service.client.http_get,
call_fake=_http_get_user)
service.authorize('myuser', 'mypass', None)
self.assertTrue(account.is_authorized)
service.client.http_post.unspy()
service.client.http_get.unspy()
repository = self.create_repository()
repository.hosting_account = account
repository.extra_data['repository_plan'] = repository_plan
if repository_plan == 'public':
repository.extra_data['github_public_repo_name'] = \
'mypublicrepo'
elif repository_plan == 'public-org':
repository.extra_data['github_public_org_name'] = 'mypublicorg'
repository.extra_data['github_public_org_repo_name'] = \
'mypublicorgrepo'
elif repository_plan == 'private':
repository.extra_data['github_private_repo_name'] = \
'myprivaterepo'
elif repository_plan == 'private-org':
repository.extra_data['github_private_org_name'] = \
'myprivateorg'
repository.extra_data['github_private_org_repo_name'] = \
'myprivateorgrepo'
repository.save()
return repository
else:
return self.create_repository()
def _create_config(self, enterprise=False, with_local_site=False,
run_manually=False):
"""Create an integration config.
Args:
enterprise (bool, optional):
Whether to use an enterprise endpoint or the default
open-source endpoint.
with_local_site (bool, optional):
Whether to limit the config to a local site.
"""
choice = ReviewRequestRepositoriesChoice()
condition_set = ConditionSet(conditions=[
Condition(choice=choice,
operator=choice.get_operator('any'))
])
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
config = self.integration.create_config(name='Config 1',
enabled=True,
local_site=local_site)
config.set('conditions', condition_set.serialize())
config.set('travis_yml', 'script:\n python ./tests/runtests.py')
config.set('branch_name', 'review-requests')
config.set('run_manually', run_manually)
if enterprise:
config.set('travis_endpoint', TravisAPI.ENTERPRISE_ENDPOINT)
config.set('travis_custom_endpoint', 'https://travis.example.com/')
else:
config.set('travis_endpoint', TravisAPI.OPEN_SOURCE_ENDPOINT)
config.save()
return config
def _spy_on_make_request(self):
"""Wrapper function for spying on the urlopen function.
Returns:
dict:
Faked response from TravisCI.
"""
data = {}
def _make_request(api, url, body=None, method='GET', headers={},
content_type=None):
# We can't actually do any assertions in here, because they'll get
# swallowed by SignalHook's sandboxing. We therefore record the
# data we need and assert later.
data['url'] = url
data['request'] = json.loads(body)['request']
return '{}'
self.spy_on(TravisAPI._make_request, owner=TravisAPI,
call_fake=_make_request)
return data
class TravisCIIntegrationTests(BaseTravisCITestCase):
"""Tests for Travis CI."""
def test_build_new_review_request(self):
"""Testing TravisCIIntegration builds a new review request"""
repository = self._create_repository()
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
config = self._create_config()
self.integration.enable_integration()
data = self._spy_on_make_request()
review_request.publish(review_request.submitter)
self.assertTrue(TravisAPI._make_request.called)
self.assertEqual(
data['url'],
'https://api.travis-ci.org/repo/mypublicorg%2Fmypublicorgrepo/'
'requests')
self.assertEqual(
data['request']['config']['env']['global'],
[
'REVIEWBOARD_STATUS_UPDATE_ID=1',
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,
])
self.assertEqual(data['request']['message'],
'Test Summary\n\nTest Description')
self.assertTrue('git fetch --unshallow origin || true'
in data['request']['config']['before_install'])
self.assertTrue('git checkout %s' % diffset.base_commit_id
in data['request']['config']['before_install'])
self.assertEqual(data['request']['branch'], 'review-requests')
def test_build_new_review_request_on_enterprise_travis(self):
"""Testing TravisCIIntegration builds a new review request with Travis
CI Enterprise
"""
repository = self._create_repository()
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
config = self._create_config(enterprise=True)
self.integration.enable_integration()
data = self._spy_on_make_request()
review_request.publish(review_request.submitter)
self.assertTrue(TravisAPI._make_request.called)
self.assertEqual(
data['url'],
'https://travis.example.com/api/repo/'
'mypublicorg%2Fmypublicorgrepo/requests')
self.assertEqual(
data['request']['config']['env']['global'],
[
'REVIEWBOARD_STATUS_UPDATE_ID=1',
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,
])
self.assertEqual(data['request']['message'],
'Test Summary\n\nTest Description')
self.assertTrue('git checkout %s' % diffset.base_commit_id
in data['request']['config']['before_install'])
self.assertEqual(data['request']['branch'], 'review-requests')
def test_build_new_review_request_with_parent_diff(self):
"""Testing TravisCIIntegration build script with a parent diff"""
repository = self._create_repository()
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
filediff = self.create_filediff(diffset)
filediff.parent_diff = (
b'--- README\trevision 123\n'
b'+++ README\trevision 123\n'
b'@@ -1 +1 @@\n'
b'-Hello, world!\n'
b'+Hello, everybody!\n'
)
filediff.save()
config = self._create_config(enterprise=True)
self.integration.enable_integration()
data = self._spy_on_make_request()
review_request.publish(review_request.submitter)
self.assertTrue(TravisAPI._make_request.called)
self.assertEqual(
data['url'],
'https://travis.example.com/api/repo/'
'mypublicorg%2Fmypublicorgrepo/requests')
self.assertEqual(
data['request']['config']['env']['global'],
[
'REVIEWBOARD_STATUS_UPDATE_ID=1',
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,
])
self.assertEqual(data['request']['message'],
'Test Summary\n\nTest Description')
self.assertTrue('git checkout %s' % diffset.base_commit_id
in data['request']['config']['before_install'])
self.assertEqual(data['request']['branch'], 'review-requests')
patch_count = len(
[cmd for cmd in data['request']['config']['before_install']
if 'patch -p1' in cmd])
self.assertEqual(patch_count, 2)
def test_build_new_review_request_with_git_depth(self):
"""Testing TravisCIIntegration builds with git: depth: False"""
repository = self._create_repository()
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
config = self._create_config()
config.set('travis_yml',
'git:\n'
' depth:\n'
' False\n'
'\n'
'script:\n'
' python ./tests/runtests.py')
config.save()
self.integration.enable_integration()
data = self._spy_on_make_request()
review_request.publish(review_request.submitter)
self.assertTrue(TravisAPI._make_request.called)
self.assertEqual(
data['url'],
'https://api.travis-ci.org/repo/mypublicorg%2Fmypublicorgrepo/'
'requests')
self.assertEqual(
data['request']['config']['env']['global'],
[
'REVIEWBOARD_STATUS_UPDATE_ID=1',
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,
])
self.assertEqual(data['request']['message'],
'Test Summary\n\nTest Description')
self.assertFalse('git fetch --unshallow origin || true'
in data['request']['config']['before_install'])
self.assertEqual(data['request']['branch'], 'review-requests')
def test_non_github_review_request(self):
"""Testing TravisCIIntegration skipping a non-GitHub review request"""
repository = self._create_repository(github=False)
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
self._create_config()
self.integration.enable_integration()
self.spy_on(TravisAPI._make_request, owner=TravisAPI,
call_original=False)
review_request.publish(review_request.submitter)
self.assertFalse(TravisAPI._make_request.called)
def test_travisci_config_form_valid(self):
"""Testing TravisCIIntegrationConfigForm validation success"""
form = TravisCIIntegrationConfigForm(
integration=self.integration,
request=None,
data={
'conditions_last_id': 0,
'conditions_mode': 'always',
'name': 'test',
'travis_endpoint': TravisAPI.OPEN_SOURCE_ENDPOINT,
'travis_ci_token': '123456',
'travis_yml': 'script:\n - python ./tests/runtests.py',
})
self.spy_on(TravisAPI.get_user, owner=TravisAPI, call_original=False)
self.spy_on(TravisAPI.lint,
owner=TravisAPI,
call_fake=lambda x, travis_yml: {'warnings': []})
self.assertTrue(form.is_valid())
def test_travisci_config_form_lint_failure(self):
"""Testing TravisCIIntegrationConfigForm validation lint failure"""
self.spy_on(TravisAPI.get_user, owner=TravisAPI, call_original=False)
self.spy_on(
TravisAPI.lint,
owner=TravisAPI,
call_fake=lambda x, travis_yml: {
'warnings': [{
'key': 'script',
'message': 'An error',
}],
})
form = TravisCIIntegrationConfigForm(
integration=self.integration,
request=None,
data={
'conditions_last_id': 0,
'conditions_mode': 'always',
'name': 'test',
'travis_endpoint': TravisAPI.OPEN_SOURCE_ENDPOINT,
'travis_ci_token': '123456',
'travis_yml': 'script:\n - python ./tests/runtests.py',
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['travis_yml'],
['In script section: An error'])
def test_travisci_config_form_auth_failure(self):
"""Testing TravisCIIntegrationConfigForm config validation"""
def _raise_403(obj):
raise HTTPError('', 403, 'Authentication failed', None, None)
self.spy_on(TravisAPI.get_user, owner=TravisAPI, call_fake=_raise_403)
form = TravisCIIntegrationConfigForm(
integration=self.integration,
request=None,
data={
'conditions_last_id': 0,
'conditions_mode': 'always',
'name': 'test',
'travis_endpoint': TravisAPI.OPEN_SOURCE_ENDPOINT,
'travis_ci_token': '123456',
'travis_yml': 'script:\n - python ./tests/runtests.py',
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['travis_ci_token'],
['Unable to authenticate with this API token.'])
def test_manual_run_no_build_on_publish(self):
"""Testing lack of TravisCIIntegration build when a new review
request is made with the run manually configuration
"""
repository = self._create_repository()
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
self._create_config(run_manually=True)
self.integration.enable_integration()
data = self._spy_on_make_request()
review_request.publish(review_request.submitter)
self.assertFalse(TravisAPI._make_request.called)
self.assertEqual(data, {})
def test_build_manual_run(self):
"""Testing TravisCIIntegration build via a manual trigger"""
repository = self._create_repository()
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
config = self._create_config()
self.integration.enable_integration()
status_update = \
self.create_status_update(service_id='travis-ci',
review_request=review_request)
data = self._spy_on_make_request()
status_update_request_run.send(sender=self.__class__,
status_update=status_update)
self.assertTrue(TravisAPI._make_request.called)
self.assertEqual(
data['url'],
'https://api.travis-ci.org/repo/'
'mypublicorg%2Fmypublicorgrepo/requests')
self.assertEqual(
data['request']['config']['env']['global'],
[
'REVIEWBOARD_STATUS_UPDATE_ID=1',
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d' % config.pk,
])
self.assertEqual(data['request']['message'],
'Test Summary\n\nTest Description')
self.assertTrue('git fetch --unshallow origin || true'
in data['request']['config']['before_install'])
self.assertTrue('git checkout %s' % diffset.base_commit_id
in data['request']['config']['before_install'])
self.assertEqual(data['request']['branch'], 'review-requests')
def test_build_new_review_request_with_public_github_repository(self):
"""Testing CircleCIIntegration builds for a new review request with
a public GitHub repository
"""
repository = self._create_repository(repository_plan='public')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
self._create_config()
self.integration.enable_integration()
data = self._spy_on_make_request()
review_request.publish(review_request.submitter)
self.assertTrue(TravisAPI._make_request.called)
self.assertEqual(
data['url'],
'https://api.travis-ci.org/repo/myuser%2Fmypublicrepo/requests')
def test_build_new_review_request_with_private_github_repository(self):
"""Testing CircleCIIntegration builds for a new review request with
a public GitHub repository
"""
repository = self._create_repository(repository_plan='private')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
self._create_config()
self.integration.enable_integration()
data = self._spy_on_make_request()
review_request.publish(review_request.submitter)
self.assertTrue(TravisAPI._make_request.called)
self.assertEqual(
data['url'],
'https://api.travis-ci.org/repo/myuser%2Fmyprivaterepo/requests')
def test_build_new_review_request_with_private_org_github_repository(self):
"""Testing CircleCIIntegration builds for a new review request with
a public GitHub repository
"""
repository = self._create_repository(repository_plan='private-org')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
diffset.base_commit_id = '8fd69d70f07b57c21ad8733c1c04ae604d21493f'
diffset.save()
self._create_config()
self.integration.enable_integration()
data = self._spy_on_make_request()
review_request.publish(review_request.submitter)
self.assertTrue(TravisAPI._make_request.called)
self.assertEqual(
data['url'],
'https://api.travis-ci.org/repo/'
'myprivateorg%2Fmyprivateorgrepo/requests')
class TravisCIWebHookTests(BaseTravisCITestCase):
"""Tests for the Travis CI webhook handler."""
def setUp(self):
super(TravisCIWebHookTests, self).setUp()
self.repository = self._create_repository()
self.review_request = self.create_review_request(
repository=self.repository)
self.status_update = self.create_status_update(self.review_request)
self.config = self._create_config()
self.integration.enable_integration()
self.webhook_url = reverse('travis-ci-webhook')
def test_webhook_no_env(self):
"""Testing TravisCIWebHookView with missing env"""
payload = json.dumps({})
rsp = self.client.post(self.webhook_url, {'payload': payload})
self.assertEqual(rsp.status_code, 400)
self.assertEqual(rsp.content, b'Got event without an env in config.')
def test_webhook_missing_ids(self):
"""Testing TravisCIWebHookView with missing object IDs"""
payload = json.dumps({
'matrix': [
{
'config': {
'env': [
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'
% self.config.pk,
],
},
},
],
})
rsp = self.client.post(self.webhook_url, {'payload': payload})
self.assertEqual(rsp.status_code, 400)
self.assertEqual(
rsp.content,
b'Unable to find REVIEWBOARD_STATUS_UPDATE_ID in payload.')
payload = json.dumps({
'matrix': [
{
'config': {
'env': [
'REVIEWBOARD_STATUS_UPDATE_ID=%d'
% self.status_update.pk,
],
},
},
],
})
rsp = self.client.post(self.webhook_url, {'payload': payload})
self.assertEqual(rsp.status_code, 400)
self.assertEqual(
rsp.content,
b'Unable to find REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID in '
b'payload.')
def test_webhook_bad_integration_config(self):
"""Testing TravisCIWebHookView with incorrect integration config ID"""
payload = json.dumps({
'matrix': [
{
'config': {
'env': [
'REVIEWBOARD_STATUS_UPDATE_ID=%d'
% self.status_update.pk,
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'
% (self.config.pk + 1),
],
},
},
],
})
rsp = self.client.post(self.webhook_url, {'payload': payload})
self.assertEqual(rsp.status_code, 400)
self.assertEqual(
rsp.content,
b'Unable to find matching integration config ID %d.'
% (self.config.pk + 1))
def test_webhook_bad_signature(self):
"""Testing TravisCIWebHookView with bad HTTP_SIGNATURE"""
payload = json.dumps({
'matrix': [
{
'config': {
'env': [
'REVIEWBOARD_STATUS_UPDATE_ID=%d'
% self.status_update.pk,
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'
% self.config.pk,
],
},
},
],
})
rsp = self.client.post(self.webhook_url, {'payload': payload})
self.assertEqual(rsp.status_code, 400)
self.assertEqual(
rsp.content,
b'Invalid Travis CI webhook signature for status update %d.'
% self.status_update.pk)
def test_webhook_bad_status_update(self):
"""Testing TravisCIWebHookView with incorrect status update ID"""
payload = json.dumps({
'matrix': [
{
'config': {
'env': [
'REVIEWBOARD_STATUS_UPDATE_ID=%d'
% (self.status_update.pk + 1),
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'
% self.config.pk,
],
},
},
],
})
self.spy_on(TravisCIWebHookView._validate_signature,
owner=TravisCIWebHookView,
call_fake=lambda self, request, integration_config: True)
rsp = self.client.post(self.webhook_url, {'payload': payload})
self.assertEqual(rsp.status_code, 400)
self.assertEqual(
rsp.content,
b'Unable to find matching status update ID %d.'
% (self.status_update.pk + 1))
def test_webhook_build_pending(self):
"""Testing TravisCIWebHookView build pending"""
payload = json.dumps({
'matrix': [
{
'config': {
'env': [
'REVIEWBOARD_STATUS_UPDATE_ID=%d'
% self.status_update.pk,
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'
% self.config.pk,
],
},
},
],
'build_url': 'https://example.com/build',
'state': 'started',
})
self.spy_on(TravisCIWebHookView._validate_signature,
owner=TravisCIWebHookView,
call_fake=lambda self, request, integration_config: True)
rsp = self.client.post(self.webhook_url, {'payload': payload})
self.assertEqual(rsp.status_code, 200)
self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)
self.assertEqual(self.status_update.url, 'https://example.com/build')
self.assertEqual(self.status_update.state,
StatusUpdate.PENDING)
def test_webhook_build_success(self):
"""Testing TravisCIWebHookView build success"""
payload = json.dumps({
'matrix': [
{
'config': {
'env': [
'REVIEWBOARD_STATUS_UPDATE_ID=%d'
% self.status_update.pk,
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'
% self.config.pk,
],
},
},
],
'build_url': 'https://example.com/build',
'state': 'passed',
})
self.spy_on(TravisCIWebHookView._validate_signature,
owner=TravisCIWebHookView,
call_fake=lambda self, request, integration_config: True)
rsp = self.client.post(self.webhook_url, {'payload': payload})
self.assertEqual(rsp.status_code, 200)
self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)
self.assertEqual(self.status_update.url, 'https://example.com/build')
self.assertEqual(self.status_update.state,
StatusUpdate.DONE_SUCCESS)
def test_webhook_build_error(self):
"""Testing TravisCIWebHookView build error"""
payload = json.dumps({
'matrix': [
{
'config': {
'env': [
'REVIEWBOARD_STATUS_UPDATE_ID=%d'
% self.status_update.pk,
'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'
% self.config.pk,
],
},
},
],
'build_url': 'https://example.com/build',
'state': 'failed',
})
self.spy_on(TravisCIWebHookView._validate_signature,
owner=TravisCIWebHookView,
call_fake=lambda self, request, integration_config: True)
rsp = self.client.post(self.webhook_url, {'payload': payload})
self.assertEqual(rsp.status_code, 200)
self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)
self.assertEqual(self.status_update.url, 'https://example.com/build')
self.assertEqual(self.status_update.state,
StatusUpdate.DONE_FAILURE)
| |
# -*- coding:utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import re
import zlib
from random import random
from textwrap import dedent
from time import time
from django.core.cache.backends.base import (
DEFAULT_TIMEOUT, BaseCache, default_key_func
)
from django.db import connections, router
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.module_loading import import_string
from django_mysql.utils import collapse_spaces
try:
from django.utils.six.moves import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
BIGINT_SIGNED_MIN = -9223372036854775808
BIGINT_SIGNED_MAX = 9223372036854775807
BIGINT_UNSIGNED_MAX = 18446744073709551615
# Slightly modified copies of Options/BaseDatabaseCache from django's
# cache.backends.db - these allow us to act like a separate app for database
# routers (django_mysql), and not appear on django's `createcachetable`
# command
class Options(object):
"""A class that will quack like a Django model _meta class.
This allows cache operations to be controlled by the router
"""
def __init__(self, table):
self.db_table = table
self.app_label = 'django_mysql'
self.model_name = 'cacheentry'
self.verbose_name = 'cache entry'
self.verbose_name_plural = 'cache entries'
self.object_name = 'CacheEntry'
self.abstract = False
self.managed = True
self.proxy = False
self.swapped = False
class BaseDatabaseCache(BaseCache):
def __init__(self, table, params):
super(BaseDatabaseCache, self).__init__(params)
self._table = table
class CacheEntry(object):
_meta = Options(table)
self.cache_model_class = CacheEntry
reverse_key_re = re.compile(r'^([^:]*):(\d+):(.*)')
def default_reverse_key_func(full_key):
"""
Reverse of Django's default_key_func, i.e. undoing:
def default_key_func(key, key_prefix, version):
return '%s:%s:%s' % (key_prefix, version, key)
"""
match = reverse_key_re.match(full_key)
return match.group(3), match.group(1), int(match.group(2))
def get_reverse_key_func(reverse_key_func):
"""
Function to decide which reverse key function to use
Defaults to ``None``, as any other value might not apply to the given
KEY_FUNCTION. Also the user may not use any of the operations that require
reversing the key_func.
"""
if reverse_key_func is not None:
if callable(reverse_key_func):
return reverse_key_func
else:
return import_string(reverse_key_func)
return None
class MySQLCache(BaseDatabaseCache):
# Got an error with the add() query using BIGINT_UNSIGNED_MAX, so use a
# value slightly 1 bit less (still an incalculable time into the future of
# 1970)
FOREVER_TIMEOUT = BIGINT_UNSIGNED_MAX >> 1
create_table_sql = dedent('''\
CREATE TABLE `{table_name}` (
cache_key varchar(255) CHARACTER SET utf8 COLLATE utf8_bin
NOT NULL PRIMARY KEY,
value longblob NOT NULL,
value_type char(1) CHARACTER SET latin1 COLLATE latin1_bin
NOT NULL DEFAULT 'p',
expires BIGINT UNSIGNED NOT NULL
);
''')
@classmethod
def _now(cls):
# Values in the expires column are milliseconds since unix epoch (UTC)
return int(time() * 1000)
def __init__(self, table, params):
super(MySQLCache, self).__init__(table, params)
options = params.get('OPTIONS', {})
self._compress_min_length = options.get('COMPRESS_MIN_LENGTH', 5000)
self._compress_level = options.get('COMPRESS_LEVEL', 6)
self._cull_probability = options.get('CULL_PROBABILITY', 0.01)
# Figure out our *reverse* key function
if self.key_func is default_key_func:
self.reverse_key_func = default_reverse_key_func
if ':' in self.key_prefix:
raise ValueError(
"Cannot use the default KEY_FUNCTION and "
"REVERSE_KEY_FUNCTION if you have a colon in your "
"KEY_PREFIX."
)
else:
reverse_key_func = params.get('REVERSE_KEY_FUNCTION', None)
self.reverse_key_func = get_reverse_key_func(reverse_key_func)
# Django API + helpers
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute(
self._get_query.format(table=table),
(key, self._now())
)
row = cursor.fetchone()
if row is None:
return default
else:
value, value_type = row
return self.decode(value, value_type)
_get_query = collapse_spaces("""
SELECT value, value_type
FROM {table}
WHERE cache_key = %s AND
expires >= %s
""")
def get_many(self, keys, version=None):
made_key_to_key = {
self.make_key(key, version=version): key
for key in keys
}
made_keys = list(made_key_to_key.keys())
for key in made_keys:
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute(
self._get_many_query.format(table=table),
(made_keys, self._now())
)
rows = cursor.fetchall()
data = {}
for made_key, value, value_type in rows:
key = made_key_to_key[made_key]
data[key] = self.decode(value, value_type)
return data
_get_many_query = collapse_spaces("""
SELECT cache_key, value, value_type
FROM {table}
WHERE cache_key IN %s AND
expires >= %s
""")
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._base_set('set', key, value, timeout)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return self._base_set('add', key, value, timeout)
def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
exp = self.get_backend_timeout(timeout)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
self._maybe_cull()
with connections[db].cursor() as cursor:
value, value_type = self.encode(value)
if mode == 'set':
query = self._set_query
params = (key, value, value_type, exp)
elif mode == 'add':
query = self._add_query
params = (key, value, value_type, exp, self._now())
cursor.execute(query.format(table=table), params)
if mode == 'set':
return True
elif mode == 'add':
# Use a special code in the add query for "did insert"
insert_id = cursor.lastrowid
return (insert_id != 444)
_set_many_query = collapse_spaces("""
INSERT INTO {table} (cache_key, value, value_type, expires)
VALUES {{VALUES_CLAUSE}}
ON DUPLICATE KEY UPDATE
value=VALUES(value),
value_type=VALUES(value_type),
expires=VALUES(expires)
""")
_set_query = _set_many_query.replace('{{VALUES_CLAUSE}}',
'(%s, %s, %s, %s)')
# Uses the IFNULL / LEAST / LAST_INSERT_ID trick to communicate the special
# value of 444 back to the client (LAST_INSERT_ID is otherwise 0, since
# there is no AUTO_INCREMENT column)
_add_query = collapse_spaces("""
INSERT INTO {table} (cache_key, value, value_type, expires)
VALUES (%s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
value=IF(expires > @tmp_now:=%s, value, VALUES(value)),
value_type=IF(expires > @tmp_now, value_type, VALUES(value_type)),
expires=IF(
expires > @tmp_now,
IFNULL(
LEAST(LAST_INSERT_ID(444), NULL),
expires
),
VALUES(expires)
)
""")
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
exp = self.get_backend_timeout(timeout)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
self._maybe_cull()
params = []
for key, value in six.iteritems(data):
made_key = self.make_key(key, version=version)
self.validate_key(made_key)
value, value_type = self.encode(value)
params.extend((made_key, value, value_type, exp))
query = self._set_many_query.replace(
'{{VALUES_CLAUSE}}',
','.join('(%s, %s, %s, %s)' for key in data)
).format(table=table)
with connections[db].cursor() as cursor:
cursor.execute(query, params)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute(self._delete_query.format(table=table), (key,))
_delete_query = collapse_spaces("""
DELETE FROM {table}
WHERE cache_key = %s
""")
def delete_many(self, keys, version=None):
made_keys = [self.make_key(key, version=version) for key in keys]
for key in made_keys:
self.validate_key(key)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute(
self._delete_many_query.format(table=table),
(made_keys,)
)
_delete_many_query = collapse_spaces("""
DELETE FROM {table}
WHERE cache_key IN %s
""")
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute(
self._has_key_query.format(table=table),
(key, self._now())
)
return cursor.fetchone() is not None
_has_key_query = collapse_spaces("""
SELECT 1 FROM {table}
WHERE cache_key = %s and expires > %s
""")
def incr(self, key, delta=1, version=None):
return self._base_delta(key, delta, version, '+')
def decr(self, key, delta=1, version=None):
return self._base_delta(key, delta, version, '-')
def _base_delta(self, key, delta, version, operation):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
updated = cursor.execute(
self._delta_query.format(table=table, operation=operation),
(delta, key)
)
if not updated:
raise ValueError("Key '%s' not found, or not an integer" % key)
# New value stored in insert_id
return cursor.lastrowid
# Looks a bit tangled to turn the blob back into an int for updating, but
# it works. Stores the new value for insert_id() with LAST_INSERT_ID
_delta_query = collapse_spaces("""
UPDATE {table}
SET value = LAST_INSERT_ID(
CAST(value AS SIGNED INTEGER)
{operation}
%s
)
WHERE cache_key = %s AND
value_type = 'i'
""")
def clear(self):
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute("DELETE FROM {table}".format(table=table))
def validate_key(self, key):
"""
Django normally warns about maximum key length, but we error on it.
"""
if len(key) > 250:
raise ValueError(
"Cache key is longer than the maxmimum 250 characters: {}"
.format(key)
)
return super(MySQLCache, self).validate_key(key)
def encode(self, obj):
"""
Take a Python object and return it as a tuple (value, value_type), a
blob and a one-char code for what type it is
"""
if self._is_valid_mysql_bigint(obj):
return obj, 'i'
value = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
value_type = 'p'
if (
self._compress_min_length and
len(value) >= self._compress_min_length
):
value = zlib.compress(value, self._compress_level)
value_type = 'z'
return value, value_type
def _is_valid_mysql_bigint(self, value):
return(
# Can't support int/long subclasses since they should are expected
# to decode back to the same object
(type(value) in six.integer_types) and
# Can't go beyond these ranges
BIGINT_SIGNED_MIN <= value <= BIGINT_SIGNED_MAX
)
def decode(self, value, value_type):
"""
Take a value blob and its value_type one-char code and convert it back
to a python object
"""
if value_type == 'i':
return int(value)
if value_type == 'z':
value = zlib.decompress(value)
value_type = 'p'
if value_type == 'p':
return pickle.loads(force_bytes(value))
raise ValueError(
"Unknown value_type '{}' read from the cache table."
.format(value_type)
)
def _maybe_cull(self):
# Roll the dice, if it says yes then cull
if self._cull_probability and random() <= self._cull_probability:
self.cull()
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
if timeout is None:
return self.FOREVER_TIMEOUT
timeout = super(MySQLCache, self).get_backend_timeout(timeout)
return int(timeout * 1000)
# Our API extensions
def keys_with_prefix(self, prefix, version=None):
if self.reverse_key_func is None:
raise ValueError(
"To use the _with_prefix commands with a custom KEY_FUNCTION, "
"you need to specify a custom REVERSE_KEY_FUNCTION too."
)
if version is None:
version = self.version
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
prefix = self.make_key(prefix + '%', version=version)
with connections[db].cursor() as cursor:
cursor.execute(
"""SELECT cache_key FROM {table}
WHERE cache_key LIKE %s AND
expires >= %s""".format(table=table),
(prefix, self._now())
)
rows = cursor.fetchall()
full_keys = {row[0] for row in rows}
keys = {}
for full_key in full_keys:
key, key_prefix, key_version = self.reverse_key_func(full_key)
if key_version == version:
keys[key] = key_version
return set(six.iterkeys(keys))
def get_with_prefix(self, prefix, version=None):
if self.reverse_key_func is None:
raise ValueError(
"To use the _with_prefix commands with a custom KEY_FUNCTION, "
"you need to specify a custom REVERSE_KEY_FUNCTION too."
)
if version is None:
version = self.version
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
prefix = self.make_key(prefix + '%', version=version)
version = six.text_type(version)
with connections[db].cursor() as cursor:
cursor.execute(
"""SELECT cache_key, value, value_type
FROM {table}
WHERE cache_key LIKE %s AND
expires >= %s""".format(table=table),
(prefix, self._now())
)
rows = cursor.fetchall()
data = {}
for made_key, value, value_type in rows:
key, key_prefix, key_version = self.reverse_key_func(made_key)
data[key] = self.decode(value, value_type)
return data
def delete_with_prefix(self, prefix, version=None):
if version is None:
version = self.version
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
prefix = self.make_key(prefix + '%', version=version)
with connections[db].cursor() as cursor:
return cursor.execute(
"""DELETE FROM {table}
WHERE cache_key LIKE %s""".format(table=table),
(prefix,)
)
def cull(self):
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
# First, try just deleting expired keys
num_deleted = cursor.execute(
"DELETE FROM {table} WHERE expires < %s".format(table=table),
(self._now(),)
)
# -1 means "Don't limit size"
if self._max_entries == -1:
return
cursor.execute("SELECT COUNT(*) FROM {table}".format(table=table))
num = cursor.fetchone()[0]
if num < self._max_entries:
return num_deleted
# Now do a key-based cull
if self._cull_frequency == 0:
num_deleted += cursor.execute(
"DELETE FROM {table}".format(table=table)
)
else:
cull_num = num // self._cull_frequency
cursor.execute(
"""SELECT cache_key FROM {table}
ORDER BY cache_key
LIMIT 1 OFFSET %s""".format(table=table),
(cull_num,)
)
max_key = cursor.fetchone()[0]
num_deleted += cursor.execute(
"""DELETE FROM {table}
WHERE cache_key < %s""".format(table=table),
(max_key,)
)
return num_deleted
| |
"""
Extensions called during training to generate samples and diagnostic plots and printouts.
"""
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import theano.tensor as T
import theano
from blocks.extensions import SimpleExtension
import viz
import sampler
class PlotSamples(SimpleExtension):
def __init__(self, model, algorithm, X, path, n_samples=49, **kwargs):
"""
Generate samples from the model. The do() function is called as an extension during training.
Generates 3 types of samples:
- Sample from generative model
- Sample from image denoising posterior distribution (default signal to noise of 1)
- Sample from image inpainting posterior distribution (inpaint left half of image)
"""
super(PlotSamples, self).__init__(**kwargs)
self.model = model
self.path = path
n_samples = np.min([n_samples, X.shape[0]])
self.X = X[:n_samples].reshape(
(n_samples, model.n_colors, model.spatial_width, model.spatial_width))
self.n_samples = n_samples
X_noisy = T.tensor4('X noisy samp', dtype=theano.config.floatX)
t = T.matrix('t samp', dtype=theano.config.floatX)
self.get_mu_sigma = theano.function([X_noisy, t], model.get_mu_sigma(X_noisy, t),
allow_input_downcast=True)
def do(self, callback_name, *args):
import sys
sys.setrecursionlimit(10000000)
print "generating samples"
base_fname_part1 = self.path + '/samples-'
base_fname_part2 = '_batch%06d'%self.main_loop.status['iterations_done']
sampler.generate_samples(self.model, self.get_mu_sigma,
n_samples=self.n_samples, inpaint=False, denoise_sigma=None, X_true=None,
base_fname_part1=base_fname_part1, base_fname_part2=base_fname_part2)
sampler.generate_samples(self.model, self.get_mu_sigma,
n_samples=self.n_samples, inpaint=True, denoise_sigma=None, X_true=self.X,
base_fname_part1=base_fname_part1, base_fname_part2=base_fname_part2)
sampler.generate_samples(self.model, self.get_mu_sigma,
n_samples=self.n_samples, inpaint=False, denoise_sigma=1, X_true=self.X,
base_fname_part1=base_fname_part1, base_fname_part2=base_fname_part2)
class PlotParameters(SimpleExtension):
def __init__(self, model, blocks_model, path, **kwargs):
super(PlotParameters, self).__init__(**kwargs)
self.path = path
self.model = model
self.blocks_model = blocks_model
def do(self, callback_name, *args):
import sys
sys.setrecursionlimit(10000000)
print "plotting parameters"
for param in self.blocks_model.parameters:
param_name = param.name
filename_safe_name = '-'.join(param_name.split('/')[2:]).replace(' ', '_')
base_fname_part1 = self.path + '/params-' + filename_safe_name
base_fname_part2 = '_batch%06d'%self.main_loop.status['iterations_done']
viz.plot_parameter(param.get_value(), base_fname_part1, base_fname_part2,
title=param_name, n_colors=self.model.n_colors)
class PlotGradients(SimpleExtension):
def __init__(self, model, blocks_model, algorithm, X, path, **kwargs):
super(PlotGradients, self).__init__(**kwargs)
self.path = path
self.X = X
self.model = model
self.blocks_model = blocks_model
gradients = []
for param_name in sorted(self.blocks_model.parameters.keys()):
gradients.append(algorithm.gradients[self.blocks_model.parameters[param_name]])
self.grad_f = theano.function(algorithm.inputs, gradients, allow_input_downcast=True)
def do(self, callback_name, *args):
print "plotting gradients"
grad_vals = self.grad_f(self.X)
keynames = sorted(self.blocks_model.parameters.keys())
for ii in xrange(len(keynames)):
param_name = keynames[ii]
val = grad_vals[ii]
filename_safe_name = '-'.join(param_name.split('/')[2:]).replace(' ', '_')
base_fname_part1 = self.path + '/grads-' + filename_safe_name
base_fname_part2 = '_batch%06d'%self.main_loop.status['iterations_done']
viz.plot_parameter(val, base_fname_part1, base_fname_part2,
title="grad " + param_name, n_colors=self.model.n_colors)
class PlotInternalState(SimpleExtension):
def __init__(self, model, blocks_model, state, features, X, path, **kwargs):
super(PlotInternalState, self).__init__(**kwargs)
self.path = path
self.X = X
self.model = model
self.blocks_model = blocks_model
self.internal_state_f = theano.function([features], state, allow_input_downcast=True)
self.internal_state_names = []
for var in state:
self.internal_state_names.append(var.name)
def do(self, callback_name, *args):
print "plotting internal state of network"
state = self.internal_state_f(self.X)
for ii in xrange(len(state)):
param_name = self.internal_state_names[ii]
val = state[ii]
filename_safe_name = param_name.replace(' ', '_').replace('/', '-')
base_fname_part1 = self.path + '/state-' + filename_safe_name
base_fname_part2 = '_batch%06d'%self.main_loop.status['iterations_done']
viz.plot_parameter(val, base_fname_part1, base_fname_part2,
title="state " + param_name, n_colors=self.model.n_colors)
class PlotMonitors(SimpleExtension):
def __init__(self, path, burn_in_iters=0, **kwargs):
super(PlotMonitors, self).__init__(**kwargs)
self.path = path
self.burn_in_iters = burn_in_iters
def do(self, callback_name, *args):
print "plotting monitors"
try:
df = self.main_loop.log.to_dataframe()
except AttributeError:
# This starting breaking after a Blocks update.
print "Failed to generate monitoring plots due to Blocks interface change."
return
iter_number = df.tail(1).index
# Throw out the first burn_in values
# as the objective is often much larger
# in that period.
if iter_number > self.burn_in_iters:
df = df.loc[self.burn_in_iters:]
cols = [col for col in df.columns if col.startswith(('cost', 'train', 'test'))]
df = df[cols].interpolate(method='linear')
# If we don't have any non-nan dataframes, don't plot
if len(df) == 0:
return
try:
axs = df.interpolate(method='linear').plot(
subplots=True, legend=False, figsize=(5, len(cols)*2))
except TypeError:
# This starting breaking after a different Blocks update.
print "Failed to generate monitoring plots due to Blocks interface change."
return
for ax, cname in zip(axs, cols):
ax.set_title(cname)
fn = os.path.join(self.path,
'monitors_subplots_batch%06d.png' % self.main_loop.status['iterations_done'])
plt.savefig(fn, bbox_inches='tight')
plt.clf()
df.plot(subplots=False, figsize=(15,10))
plt.gcf().tight_layout()
fn = os.path.join(self.path,
'monitors_batch%06d.png' % self.main_loop.status['iterations_done'])
plt.savefig(fn, bbox_inches='tight')
plt.close('all')
class LogLikelihood(SimpleExtension):
def __init__(self, model, test_stream, rescale, num_eval_batches=10000, **kwargs):
"""
Compute and print log likelihood lower bound on test dataset.
The do() function is called as an extension during training.
"""
super(LogLikelihood, self).__init__(**kwargs)
self.model = model
self.test_stream = test_stream
self.rescale = rescale
self.num_eval_batches = num_eval_batches
features = T.matrix('features', dtype=theano.config.floatX)
cost = self.model.cost(features)
self.L_gap_func = theano.function([features,], cost,
allow_input_downcast=True)
def print_stats(self, L_gap):
larr = np.array(L_gap)
mn = np.mean(larr)
sd = np.std(larr, ddof=1)
stderr = sd / np.sqrt(len(L_gap))
# The log likelihood lower bound, K, is reported for the data after Z-scoring it.
# Z-score rescale is the multiplicative factor by which the data was rescaled, to
# give it standard deviation 1.
print "eval batch=%05d (K-L_null)=%g bits/pix standard error=%g bits/pix Z-score rescale %g"%(
len(L_gap), mn, stderr, self.rescale)
def do(self, callback_name, *args):
L_gap = []
n_colors = self.model.n_colors
Xiter = None
for kk in xrange(self.num_eval_batches):
try:
X = next(Xiter)[0]
except:
Xiter = self.test_stream.get_epoch_iterator()
X = next(Xiter)[0]
lg = -self.L_gap_func(X)
L_gap.append(lg)
if np.mod(kk, 1000) == 999:
self.print_stats(L_gap)
self.print_stats(L_gap)
def decay_learning_rate(iteration, old_value):
# TODO the numbers in this function should not be hard coded
# this is called every epoch
# reduce the learning rate by 10 every 1000 epochs
min_value = 1e-4
decay_rate = np.exp(np.log(0.1)/1000.)
new_value = decay_rate*old_value
if new_value < min_value:
new_value = min_value
print "learning rate %g"%new_value
return np.float32(new_value)
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for the elasticsearch datasets"""
import time
import json
import pytest
import socket
import requests
import tensorflow as tf
from tensorflow import feature_column
from tensorflow.keras import layers
import tensorflow_io as tfio
# COMMON VARIABLES
ES_CONTAINER_NAME = "tfio-elasticsearch"
NODE = "http://localhost:9200"
INDEX = "people"
DOC_TYPE = "survivors"
HEADERS = {
"Content-Type": "application/json",
"Authorization": "Basic ZWxhc3RpYzpkZWZhdWx0X3Bhc3N3b3Jk",
}
ATTRS = ["name", "gender", "age", "fare", "vip", "survived"]
def is_container_running():
"""Check whether the elasticsearch container is up and running
with the correct port being exposed.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
status = sock.connect_ex(("127.0.0.1", 9200))
if status == 0:
return True
else:
return False
@pytest.mark.skipif(not is_container_running(), reason="The container is not running")
def test_create_index():
"""Create an index in the cluster"""
create_index_url = f"{NODE}/{INDEX}"
res = requests.put(create_index_url, headers=HEADERS)
assert res.status_code == 200
@pytest.mark.parametrize(
"record",
[
(("person1", "Male", 20, 80.52, False, 1)),
(("person2", "Female", 30, 40.88, True, 0)),
(("person3", "Male", 40, 20.73, True, 0)),
(("person4", "Female", 50, 100.99, False, 1)),
],
)
@pytest.mark.skipif(not is_container_running(), reason="The container is not running")
def test_populate_data(record):
"""Populate the index with data"""
put_item_url = f"{NODE}/{INDEX}/{DOC_TYPE}"
data = {}
for idx, attr in enumerate(ATTRS):
data[attr] = record[idx]
res = requests.post(put_item_url, json=data, headers=HEADERS)
# The 201 status code indicates the documents have been properly indexed
assert res.status_code == 201
# allow the cluster to index in the background.
time.sleep(1)
@pytest.mark.skipif(not is_container_running(), reason="The container is not running")
def test_elasticsearch_io_dataset():
"""Test the functionality of the ElasticsearchIODataset"""
dataset = tfio.experimental.elasticsearch.ElasticsearchIODataset(
nodes=[NODE], index=INDEX, doc_type=DOC_TYPE, headers=HEADERS
)
assert issubclass(type(dataset), tf.data.Dataset)
for item in dataset:
for attr in ATTRS:
assert attr in item
@pytest.mark.skipif(not is_container_running(), reason="The container is not running")
def test_elasticsearch_io_dataset_no_auth():
"""Test the functionality of the ElasticsearchIODataset when basic auth is
required but the associated header is not passed.
"""
try:
dataset = tfio.experimental.elasticsearch.ElasticsearchIODataset(
nodes=[NODE], index=INDEX, doc_type=DOC_TYPE
)
except ConnectionError as e:
assert str(
e
) == "No healthy node available for the index: {}, please check the cluster config".format(
INDEX
)
@pytest.mark.skipif(not is_container_running(), reason="The container is not running")
def test_elasticsearch_io_dataset_batch():
"""Test the functionality of the ElasticsearchIODataset"""
BATCH_SIZE = 2
dataset = tfio.experimental.elasticsearch.ElasticsearchIODataset(
nodes=[NODE], index=INDEX, doc_type=DOC_TYPE, headers=HEADERS
).batch(BATCH_SIZE)
assert issubclass(type(dataset), tf.data.Dataset)
for item in dataset:
for attr in ATTRS:
assert attr in item
assert len(item[attr]) == BATCH_SIZE
@pytest.mark.skipif(not is_container_running(), reason="The container is not running")
def test_elasticsearch_io_dataset_training():
"""Test the functionality of the ElasticsearchIODataset by training a
tf.keras model on the structured data.
"""
BATCH_SIZE = 2
dataset = tfio.experimental.elasticsearch.ElasticsearchIODataset(
nodes=[NODE], index=INDEX, doc_type=DOC_TYPE, headers=HEADERS
)
dataset = dataset.map(lambda v: (v, v.pop("survived")))
dataset = dataset.batch(BATCH_SIZE)
assert issubclass(type(dataset), tf.data.Dataset)
feature_columns = []
# Numeric column
fare_column = feature_column.numeric_column("fare")
feature_columns.append(fare_column)
# Bucketized column
age = feature_column.numeric_column("age")
age_buckets = feature_column.bucketized_column(age, boundaries=[10, 30])
feature_columns.append(age_buckets)
# Categorical column
gender = feature_column.categorical_column_with_vocabulary_list(
"gender", ["Male", "Female"]
)
gender_indicator = feature_column.indicator_column(gender)
feature_columns.append(gender_indicator)
# Convert the feature columns into a tf.keras layer
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
# Build the model
model = tf.keras.Sequential(
[
feature_layer,
layers.Dense(128, activation="relu"),
layers.Dense(128, activation="relu"),
layers.Dropout(0.1),
layers.Dense(1),
]
)
# Compile the model
model.compile(
optimizer="adam",
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["accuracy"],
)
# train the model
model.fit(dataset, epochs=5)
@pytest.mark.skipif(not is_container_running(), reason="The container is not running")
def test_cleanup():
"""Clean up the index"""
delete_index_url = f"{NODE}/{INDEX}"
res = requests.delete(delete_index_url, headers=HEADERS)
assert res.status_code == 200
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Wrapper for netCDF readers."""
from __future__ import unicode_literals, division, print_function
import os.path
from monty.dev import requires, deprecated
from monty.collections import AttrDict
from monty.functools import lazy_property
from pymatgen.core.units import ArrayWithUnit
from pymatgen.core.xcfunc import XcFunc
from pymatgen.core.structure import Structure
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
__all__ = [
"as_ncreader",
"as_etsfreader",
"NetcdfReader",
"ETSF_Reader",
"structure_from_ncdata",
]
try:
import netCDF4
except ImportError:
netCDF4 = None
def _asreader(file, cls):
closeit = False
if not isinstance(file, cls):
file, closeit = cls(file), True
return file, closeit
def as_ncreader(file):
"""
Convert file into a NetcdfReader instance.
Returns reader, closeit where closeit is set to True
if we have to close the file before leaving the procedure.
"""
return _asreader(file, NetcdfReader)
def as_etsfreader(file):
return _asreader(file, ETSF_Reader)
class NetcdfReaderError(Exception):
"""Base error class for NetcdfReader"""
class NO_DEFAULT(object):
"""Signal that read_value should raise an Error"""
class NetcdfReader(object):
"""
Wraps and extends netCDF4.Dataset. Read only mode. Supports with statements.
Additional documentation available at:
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
"""
Error = NetcdfReaderError
@requires(netCDF4 is not None, "netCDF4 must be installed to use this class")
def __init__(self, path):
"""Open the Netcdf file specified by path (read mode)."""
self.path = os.path.abspath(path)
try:
self.rootgrp = netCDF4.Dataset(self.path, mode="r")
except Exception as exc:
raise self.Error("In file %s: %s" % (self.path, str(exc)))
self.ngroups = len(list(self.walk_tree()))
#self.path2group = collections.OrderedDict()
#for children in self.walk_tree():
# for child in children:
# #print(child.group, child.path)
# self.path2group[child.path] = child.group
def __enter__(self):
"""Activated when used in the with statement."""
return self
def __exit__(self, type, value, traceback):
"""Activated at the end of the with statement. It automatically closes the file."""
self.rootgrp.close()
def close(self):
try:
self.rootgrp.close()
except Exception as exc:
logger.warning("Exception %s while trying to close %s" % (exc, self.path))
#@staticmethod
#def pathjoin(*args):
# return "/".join(args)
def walk_tree(self, top=None):
"""
Navigate all the groups in the file starting from top.
If top is None, the root group is used.
"""
if top is None:
top = self.rootgrp
values = top.groups.values()
yield values
for value in top.groups.values():
for children in self.walk_tree(value):
yield children
def print_tree(self):
for children in self.walk_tree():
for child in children:
print(child)
def read_dimvalue(self, dimname, path="/"):
"""Returns the value of a dimension."""
dim = self._read_dimensions(dimname, path=path)[0]
return len(dim)
def read_varnames(self, path="/"):
"""List of variable names stored in the group specified by path."""
if path == "/":
return self.rootgrp.variables.keys()
else:
group = self.path2group[path]
return group.variables.keys()
def read_value(self, varname, path="/", cmode=None, default=NO_DEFAULT):
"""
Returns the values of variable with name varname in the group specified by path.
Args:
varname: Name of the variable
path: path to the group.
cmode: if cmode=="c", a complex ndarrays is constructed and returned
(netcdf does not provide native support from complex datatype).
default: read_value returns default if varname is not present.
Returns:
numpy array if varname represents an array, scalar otherwise.
"""
try:
var = self.read_variable(varname, path=path)
except self.Error:
if default is NO_DEFAULT: raise
return default
if cmode is None:
# scalar or array
# getValue is not portable!
try:
return var.getValue()[0] if not var.shape else var[:]
except IndexError:
return var.getValue() if not var.shape else var[:]
else:
assert var.shape[-1] == 2
if cmode == "c":
return var[...,0] + 1j*var[...,1]
else:
raise ValueError("Wrong value for cmode %s" % cmode)
def read_variable(self, varname, path="/"):
"""Returns the variable with name varname in the group specified by path."""
return self._read_variables(varname, path=path)[0]
def _read_dimensions(self, *dimnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.dimensions[dname] for dname in dimnames]
else:
group = self.path2group[path]
return [group.dimensions[dname] for dname in dimnames]
except KeyError:
raise self.Error("In file %s:\ndimnames %s, kwargs %s" % (self.path, dimnames, kwargs))
def _read_variables(self, *varnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.variables[vname] for vname in varnames]
else:
group = self.path2group[path]
return [group.variables[vname] for vname in varnames]
except KeyError:
raise self.Error("In file %s:\nvarnames %s, kwargs %s" % (self.path, varnames, kwargs))
def read_keys(self, keys, dict_cls=AttrDict, path="/"):
"""
Read a list of variables/dimensions from file. If a key is not present the corresponding
entry in the output dictionary is set to None.
"""
od = dict_cls()
for k in keys:
try:
# Try to read a variable.
od[k] = self.read_value(k, path=path)
except self.Error:
try:
# Try to read a dimension.
od[k] = self.read_dimvalue(k, path=path)
except self.Error:
od[k] = None
return od
class ETSF_Reader(NetcdfReader):
"""
This object reads data from a file written according to the ETSF-IO specifications.
We assume that the netcdf file contains at least the crystallographic section.
"""
@lazy_property
def chemical_symbols(self):
"""Chemical symbols char [number of atom species][symbol length]."""
charr = self.read_value("chemical_symbols")
symbols = []
for v in charr:
symbols.append("".join(c for c in v))
#symbols = ["".join(str(c)) for symb in symbols for c in symb]
#symbols = [s.decode("ascii") for s in symbols]
#chemical_symbols = [str("".join(s)) for s in symbols]
#print(symbols)
return symbols
def typeidx_from_symbol(self, symbol):
"""Returns the type index from the chemical symbol. Note python convention."""
return self.chemical_symbols.index(symbol)
def read_structure(self, cls=Structure):
"""Returns the crystalline structure."""
if self.ngroups != 1:
raise NotImplementedError("In file %s: ngroups != 1" % self.path)
return structure_from_ncdata(self, cls=cls)
def read_abinit_xcfunc(self):
"""
Read ixc from an Abinit file. Return :class:`XcFunc` object.
"""
ixc = self.read_variable("ixc")
return XcFunc.from_abinit_ixc(ixc)
def structure_from_ncdata(ncdata, site_properties=None, cls=Structure):
"""
Reads and returns a pymatgen structure from a NetCDF file
containing crystallographic data in the ETSF-IO format.
Args:
ncdata: filename or NetcdfReader instance.
site_properties: Dictionary with site properties.
cls: The Structure class to instanciate.
"""
ncdata, closeit = as_ncreader(ncdata)
# TODO check whether atomic units are used
lattice = ArrayWithUnit(ncdata.read_value("primitive_vectors"), "bohr").to("ang")
red_coords = ncdata.read_value("reduced_atom_positions")
natom = len(red_coords)
znucl_type = ncdata.read_value("atomic_numbers")
# type_atom[0:natom] --> index Between 1 and number of atom species
type_atom = ncdata.read_value("atom_species")
# Fortran to C index and float --> int conversion.
species = natom * [None]
for atom in range(natom):
type_idx = type_atom[atom] - 1
species[atom] = int(znucl_type[type_idx])
d = {}
if site_properties is not None:
for prop in site_properties:
d[property] = ncdata.read_value(prop)
structure = cls(lattice, species, red_coords, site_properties=d)
# Quick and dirty hack.
# I need an abipy structure since I need to_abivars and other methods.
try:
from abipy.core.structure import Structure as AbipyStructure
structure.__class__ = AbipyStructure
except ImportError:
pass
if closeit:
ncdata.close()
return structure
| |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
from orquesta import statuses as wf_statuses
import st2tests
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
from tests.unit import base
import st2common
from local_runner import local_shell_command_runner
from st2actions.notifier import notifier
from st2actions.workflows import workflows
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import policiesregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as ac_const
from st2common.models.db import liveaction as lv_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import liveaction as lv_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.services import action as ac_svc
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import workflow as wf_ex_xport
from st2common.transport import publishers
from st2tests.mocks import liveaction as mock_lv_ac_xport
from st2tests.mocks import workflow as mock_wf_ex_xport
TEST_PACK = "orquesta_tests"
TEST_PACK_PATH = (
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/" + TEST_PACK
)
PACKS = [
TEST_PACK_PATH,
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/core",
]
RUNNER_RESULT_FAILED = (ac_const.LIVEACTION_STATUS_FAILED, {"stderror": "..."}, {})
@mock.patch.object(
publishers.CUDPublisher, "publish_update", mock.MagicMock(return_value=None)
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_create",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create),
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_state",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_create",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_create
),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_state",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_state
),
)
class OrquestaRunnerTest(st2tests.ExecutionDbTestCase):
@classmethod
def setUpClass(cls):
super(OrquestaRunnerTest, cls).setUpClass()
# Register runners and policy types.
runnersregistrar.register_runners()
policiesregistrar.register_policy_types(st2common)
# Register test pack(s).
registrar_options = {"use_pack_cache": False, "fail_on_failure": True}
actions_registrar = actionsregistrar.ActionsRegistrar(**registrar_options)
policies_registrar = policiesregistrar.PolicyRegistrar(**registrar_options)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
policies_registrar.register_from_pack(pack)
def tearDown(self):
super(OrquestaRunnerTest, self).tearDown()
# Remove all liveactions before running each test.
for lv_ac_db in lv_db_access.LiveAction.get_all():
lv_ac_db.delete()
# Remove all action executions before running each test.
for ac_ex_db in ex_db_access.ActionExecution.get_all():
ac_ex_db.delete()
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(side_effect=[RUNNER_RESULT_FAILED]),
)
def test_retry_policy_applied_on_workflow_failure(self):
wf_name = "sequential"
wf_ac_ref = TEST_PACK + "." + wf_name
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + ".yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result
)
# Ensure there is only one execution recorded.
self.assertEqual(len(lv_db_access.LiveAction.query(action=wf_ac_ref)), 1)
# Identify the records for the workflow and task.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
t1_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
t1_lv_ac_db = lv_db_access.LiveAction.query(task_execution=str(t1_ex_db.id))[0]
self.assertEqual(t1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_ex_db.id)
)[0]
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
notifier.get_notifier().process(t1_ac_ex_db)
workflows.get_engine().process(t1_ac_ex_db)
# Assert the main workflow is completed.
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
notifier.get_notifier().process(ac_ex_db)
# Ensure execution is retried.
self.assertEqual(len(lv_db_access.LiveAction.query(action=wf_ac_ref)), 2)
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(side_effect=[RUNNER_RESULT_FAILED]),
)
def test_no_retry_policy_applied_on_task_failure(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "subworkflow.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result
)
# Identify the records for the main workflow.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
tk_ex_dbs = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)
self.assertEqual(len(tk_ex_dbs), 1)
# Identify the records for the tasks.
t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk_ex_dbs[0].id)
)[0]
t1_wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(t1_ac_ex_db.id)
)[0]
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
self.assertEqual(t1_wf_ex_db.status, wf_statuses.RUNNING)
# Ensure there is only one execution for the task.
tk_ac_ref = TEST_PACK + "." + "sequential"
self.assertEqual(len(lv_db_access.LiveAction.query(action=tk_ac_ref)), 1)
# Fail the subtask of the subworkflow.
t1_t1_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(t1_wf_ex_db.id)
)[0]
t1_t1_lv_ac_db = lv_db_access.LiveAction.query(
task_execution=str(t1_t1_ex_db.id)
)[0]
self.assertEqual(t1_t1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_FAILED)
t1_t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t1_ex_db.id)
)[0]
self.assertEqual(t1_t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
notifier.get_notifier().process(t1_t1_ac_ex_db)
workflows.get_engine().process(t1_t1_ac_ex_db)
# Ensure the task execution is not retried.
self.assertEqual(len(lv_db_access.LiveAction.query(action=tk_ac_ref)), 1)
# Process the failure of the subworkflow.
t1_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t1_ac_ex_db.id))
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
workflows.get_engine().process(t1_ac_ex_db)
# Assert the main workflow is completed.
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_FAILED)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests_cache.backends.base
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains BaseCache class which can be used as in-memory cache backend or
extended to support persistence.
"""
from datetime import datetime
import hashlib
from copy import copy
from io import BytesIO
import requests
from ..compat import is_py2, urlencode, urlparse, urlunparse, parse_qsl
_DEFAULT_HEADERS = requests.utils.default_headers()
class BaseCache(object):
""" Base class for cache implementations, can be used as in-memory cache.
To extend it you can provide dictionary-like objects for
:attr:`keys_map` and :attr:`responses` or override public methods.
"""
def __init__(self, *args, **kwargs):
#: `key` -> `key_in_responses` mapping
self.keys_map = {}
#: `key_in_cache` -> `response` mapping
self.responses = {}
self._include_get_headers = kwargs.get("include_get_headers", False)
self._ignored_parameters = set(kwargs.get("ignored_parameters") or [])
def save_response(self, key, response):
""" Save response to cache
:param key: key for this response
:param response: response to save
.. note:: Response is reduced before saving (with :meth:`reduce_response`)
to make it picklable
"""
self.responses[key] = self.reduce_response(response), datetime.utcnow()
def add_key_mapping(self, new_key, key_to_response):
"""
Adds mapping of `new_key` to `key_to_response` to make it possible to
associate many keys with single response
:param new_key: new key (e.g. url from redirect)
:param key_to_response: key which can be found in :attr:`responses`
:return:
"""
self.keys_map[new_key] = key_to_response
def get_response_and_time(self, key, default=(None, None)):
""" Retrieves response and timestamp for `key` if it's stored in cache,
otherwise returns `default`
:param key: key of resource
:param default: return this if `key` not found in cache
:returns: tuple (response, datetime)
.. note:: Response is restored after unpickling with :meth:`restore_response`
"""
try:
if key not in self.responses:
key = self.keys_map[key]
response, timestamp = self.responses[key]
except KeyError:
return default
return self.restore_response(response), timestamp
def delete(self, key):
""" Delete `key` from cache. Also deletes all responses from response history
"""
try:
if key in self.responses:
response, _ = self.responses[key]
del self.responses[key]
else:
response, _ = self.responses[self.keys_map[key]]
del self.keys_map[key]
for r in response.history:
del self.keys_map[self.create_key(r.request)]
except KeyError:
pass
def delete_url(self, url):
""" Delete response associated with `url` from cache.
Also deletes all responses from response history. Works only for GET requests
"""
self.delete(self._url_to_key(url))
def clear(self):
""" Clear cache
"""
self.responses.clear()
self.keys_map.clear()
def has_key(self, key):
""" Returns `True` if cache has `key`, `False` otherwise
"""
return key in self.responses or key in self.keys_map
def has_url(self, url):
""" Returns `True` if cache has `url`, `False` otherwise.
Works only for GET request urls
"""
return self.has_key(self._url_to_key(url))
def _url_to_key(self, url):
session = requests.Session()
return self.create_key(session.prepare_request(requests.Request('GET', url)))
_response_attrs = ['_content', 'url', 'status_code', 'cookies',
'headers', 'encoding', 'request', 'reason', 'raw']
_raw_response_attrs = ['_original_response', 'decode_content', 'headers',
'reason', 'status', 'strict', 'version']
def reduce_response(self, response, seen=None):
""" Reduce response object to make it compatible with ``pickle``
"""
if seen is None:
seen = {}
try:
return seen[id(response)]
except KeyError:
pass
result = _Store()
# prefetch
response.content
for field in self._response_attrs:
setattr(result, field, self._picklable_field(response, field))
seen[id(response)] = result
result.history = tuple(self.reduce_response(r, seen) for r in response.history)
return result
def _picklable_field(self, response, name):
value = getattr(response, name)
if name == 'request':
value = copy(value)
value.hooks = []
elif name == 'raw':
result = _RawStore()
for field in self._raw_response_attrs:
setattr(result, field, getattr(value, field, None))
if result._original_response is not None:
setattr(result._original_response, "fp", None) # _io.BufferedReader is not picklable
value = result
return value
def restore_response(self, response, seen=None):
""" Restore response object after unpickling
"""
if seen is None:
seen = {}
try:
return seen[id(response)]
except KeyError:
pass
result = requests.Response()
for field in self._response_attrs:
setattr(result, field, getattr(response, field, None))
result.raw._cached_content_ = result.content
seen[id(response)] = result
result.history = tuple(self.restore_response(r, seen) for r in response.history)
return result
def _remove_ignored_parameters(self, request):
def filter_ignored_parameters(data):
return [(k, v) for k, v in data if k not in self._ignored_parameters]
url = urlparse(request.url)
query = parse_qsl(url.query)
query = filter_ignored_parameters(query)
query = urlencode(query)
url = urlunparse((url.scheme, url.netloc, url.path, url.params, query, url.fragment))
body = request.body
content_type = request.headers.get('content-type')
if body and content_type:
if content_type == 'application/x-www-form-urlencoded':
body = parse_qsl(body)
body = filter_ignored_parameters(body)
body = urlencode(body)
elif content_type == 'application/json':
import json
body = json.loads(body)
body = filter_ignored_parameters(sorted(body.items()))
body = json.dumps(body)
return url, body
def create_key(self, request):
if self._ignored_parameters:
url, body = self._remove_ignored_parameters(request)
else:
url, body = request.url, request.body
key = hashlib.sha256()
key.update(_to_bytes(request.method.upper()))
key.update(_to_bytes(url))
if request.body:
key.update(_to_bytes(body))
else:
if self._include_get_headers and request.headers != _DEFAULT_HEADERS:
for name, value in sorted(request.headers.items()):
key.update(_to_bytes(name))
key.update(_to_bytes(value))
return key.hexdigest()
def __str__(self):
return 'keys: %s\nresponses: %s' % (self.keys_map, self.responses)
# used for saving response attributes
class _Store(object):
pass
class _RawStore(object):
# noop for cached response
def release_conn(self):
pass
# for streaming requests support
def read(self, chunk_size=1):
if not hasattr(self, "_io_with_content_"):
self._io_with_content_ = BytesIO(self._cached_content_)
return self._io_with_content_.read(chunk_size)
def _to_bytes(s, encoding='utf-8'):
if is_py2 or isinstance(s, bytes):
return s
return bytes(s, encoding)
| |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import, division, print_function, unicode_literals
import base64
import errno
import glob
import hashlib
import ntpath
import os
import subprocess
import sys
import tempfile
from .envfuncs import Env, add_path_entry, path_search
from .platform import HostType, is_windows
def containing_repo_type(path):
while True:
if os.path.exists(os.path.join(path, ".git")):
return ("git", path)
if os.path.exists(os.path.join(path, ".hg")):
return ("hg", path)
parent = os.path.dirname(path)
if parent == path:
return None
path = parent
class BuildOptions(object):
def __init__(
self,
fbcode_builder_dir,
scratch_dir,
host_type,
install_dir=None,
num_jobs=0,
use_shipit=False,
vcvars_path=None,
):
""" fbcode_builder_dir - the path to either the in-fbsource fbcode_builder dir,
or for shipit-transformed repos, the build dir that
has been mapped into that dir.
scratch_dir - a place where we can store repos and build bits.
This path should be stable across runs and ideally
should not be in the repo of the project being built,
but that is ultimately where we generally fall back
for builds outside of FB
install_dir - where the project will ultimately be installed
num_jobs - the level of concurrency to use while building
use_shipit - use real shipit instead of the simple shipit transformer
vcvars_path - Path to external VS toolchain's vsvarsall.bat
"""
if not num_jobs:
import multiprocessing
num_jobs = multiprocessing.cpu_count()
if not install_dir:
install_dir = os.path.join(scratch_dir, "installed")
self.project_hashes = None
for p in ["../deps/github_hashes", "../project_hashes"]:
hashes = os.path.join(fbcode_builder_dir, p)
if os.path.exists(hashes):
self.project_hashes = hashes
break
# Use a simplistic heuristic to figure out if we're in fbsource
# and where the root of fbsource can be found
repo_type, repo_root = containing_repo_type(fbcode_builder_dir)
if repo_type == "hg":
self.fbsource_dir = repo_root
else:
self.fbsource_dir = None
self.num_jobs = num_jobs
self.scratch_dir = scratch_dir
self.install_dir = install_dir
self.fbcode_builder_dir = fbcode_builder_dir
self.host_type = host_type
self.use_shipit = use_shipit
if vcvars_path is None and is_windows():
# On Windows, the compiler is not available in the PATH by
# default so we need to run the vcvarsall script to populate the
# environment. We use a glob to find some version of this script
# as deployed with Visual Studio 2017. This logic will need
# updating when we switch to a newer compiler.
vcvarsall = glob.glob(
os.path.join(
os.environ["ProgramFiles(x86)"],
"Microsoft Visual Studio",
"2017",
"*",
"VC",
"Auxiliary",
"Build",
"vcvarsall.bat",
)
)
vcvars_path = vcvarsall[0]
self.vcvars_path = vcvars_path
def is_darwin(self):
return self.host_type.is_darwin()
def is_windows(self):
return self.host_type.is_windows()
def get_vcvars_path(self):
return self.vcvars_path
def is_linux(self):
return self.host_type.is_linux()
def _compute_hash(self, hash_by_name, manifest, manifests_by_name, ctx):
""" This recursive function computes a hash for a given manifest.
The hash takes into account some environmental factors on the
host machine and includes the hashes of its dependencies.
No caching of the computation is performed, which is theoretically
wasteful but the computation is fast enough that it is not required
to cache across multiple invocations. """
h = hash_by_name.get(manifest.name, None)
if h is not None:
return h
hasher = hashlib.sha256()
# Some environmental and configuration things matter
env = {}
env["install_dir"] = self.install_dir
env["scratch_dir"] = self.scratch_dir
env["os"] = self.host_type.ostype
env["distro"] = self.host_type.distro
env["distro_vers"] = self.host_type.distrovers
for name in ["CXXFLAGS", "CPPFLAGS", "LDFLAGS", "CXX", "CC"]:
env[name] = os.environ.get(name)
for tool in ["cc", "c++", "gcc", "g++", "clang", "clang++"]:
env["tool-%s" % tool] = path_search(os.environ, tool)
fetcher = manifest.create_fetcher(self, ctx)
env["fetcher.hash"] = fetcher.hash()
for name in sorted(env.keys()):
hasher.update(name.encode("utf-8"))
value = env.get(name)
if value is not None:
hasher.update(value.encode("utf-8"))
manifest.update_hash(hasher, ctx)
dep_list = sorted(manifest.get_section_as_dict("dependencies", ctx).keys())
for dep in dep_list:
dep_hash = self._compute_hash(
hash_by_name, manifests_by_name[dep], manifests_by_name, ctx
)
hasher.update(dep_hash.encode("utf-8"))
# Use base64 to represent the hash, rather than the simple hex digest,
# so that the string is shorter. Use the URL-safe encoding so that
# the hash can also be safely used as a filename component.
h = base64.urlsafe_b64encode(hasher.digest()).decode("ascii")
# ... and because cmd.exe is troublesome with `=` signs, nerf those.
# They tend to be padding characters at the end anyway, so we can
# safely discard them.
h = h.replace("=", "")
hash_by_name[manifest.name] = h
return h
def compute_dirs(self, manifest, fetcher, manifests_by_name, ctx):
hash_by_name = {}
hash = self._compute_hash(hash_by_name, manifest, manifests_by_name, ctx)
if manifest.is_first_party_project():
directory = manifest.name
else:
directory = "%s-%s" % (manifest.name, hash)
build_dir = os.path.join(self.scratch_dir, "build", directory)
inst_dir = os.path.join(self.install_dir, directory)
return {"build_dir": build_dir, "inst_dir": inst_dir, "hash": hash}
def compute_env_for_install_dirs(self, install_dirs, env=None):
if env:
env = env.copy()
else:
env = Env()
lib_path = None
if self.is_darwin():
lib_path = "DYLD_LIBRARY_PATH"
elif self.is_linux():
lib_path = "LD_LIBRARY_PATH"
else:
lib_path = None
for d in install_dirs:
add_path_entry(env, "CMAKE_PREFIX_PATH", d)
pkgconfig = os.path.join(d, "lib/pkgconfig")
if os.path.exists(pkgconfig):
add_path_entry(env, "PKG_CONFIG_PATH", pkgconfig)
# Allow resolving shared objects built earlier (eg: zstd
# doesn't include the full path to the dylib in its linkage
# so we need to give it an assist)
if lib_path:
for lib in ["lib", "lib64"]:
libdir = os.path.join(d, lib)
if os.path.exists(libdir):
add_path_entry(env, lib_path, libdir)
# Allow resolving binaries (eg: cmake, ninja) and dlls
# built by earlier steps
bindir = os.path.join(d, "bin")
if os.path.exists(bindir):
add_path_entry(env, "PATH", bindir, append=False)
return env
def list_win32_subst_letters():
output = subprocess.check_output(["subst"]).decode("utf-8")
# The output is a set of lines like: `F:\: => C:\open\some\where`
lines = output.strip().split("\r\n")
mapping = {}
for line in lines:
fields = line.split(": => ")
if len(fields) != 2:
continue
letter = fields[0]
path = fields[1]
mapping[letter] = path
return mapping
def find_existing_win32_subst_for_path(
path, # type: str
subst_mapping, # type: typing.Mapping[str, str]
):
# type: (...) -> typing.Optional[str]
path = ntpath.normcase(ntpath.normpath(path))
for letter, target in subst_mapping.items():
if ntpath.normcase(target) == path:
return letter
return None
def find_unused_drive_letter():
import ctypes
buffer_len = 256
blen = ctypes.c_uint(buffer_len)
rv = ctypes.c_uint()
bufs = ctypes.create_string_buffer(buffer_len)
rv = ctypes.windll.kernel32.GetLogicalDriveStringsA(blen, bufs)
if rv > buffer_len:
raise Exception("GetLogicalDriveStringsA result too large for buffer")
nul = "\x00".encode("ascii")
used = [drive.decode("ascii")[0] for drive in bufs.raw.strip(nul).split(nul)]
possible = [c for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
available = sorted(list(set(possible) - set(used)))
if len(available) == 0:
return None
# Prefer to assign later letters rather than earlier letters
return available[-1]
def create_subst_path(path):
for _attempt in range(0, 24):
drive = find_existing_win32_subst_for_path(
path, subst_mapping=list_win32_subst_letters()
)
if drive:
return drive
available = find_unused_drive_letter()
if available is None:
raise Exception(
(
"unable to make shorter subst mapping for %s; "
"no available drive letters"
)
% path
)
# Try to set up a subst mapping; note that we may be racing with
# other processes on the same host, so this may not succeed.
try:
subprocess.check_call(["subst", "%s:" % available, path])
return "%s:\\" % available
except Exception:
print("Failed to map %s -> %s" % (available, path))
raise Exception("failed to set up a subst path for %s" % path)
def _check_host_type(args, host_type):
if host_type is None:
host_tuple_string = getattr(args, "host_type", None)
if host_tuple_string:
host_type = HostType.from_tuple_string(host_tuple_string)
else:
host_type = HostType()
assert isinstance(host_type, HostType)
return host_type
def setup_build_options(args, host_type=None):
""" Create a BuildOptions object based on the arguments """
fbcode_builder_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
scratch_dir = args.scratch_path
if not scratch_dir:
# TODO: `mkscratch` doesn't currently know how best to place things on
# sandcastle, so whip up something reasonable-ish
if "SANDCASTLE" in os.environ:
if "DISK_TEMP" not in os.environ:
raise Exception(
(
"I need DISK_TEMP to be set in the sandcastle environment "
"so that I can store build products somewhere sane"
)
)
scratch_dir = os.path.join(
os.environ["DISK_TEMP"], "fbcode_builder_getdeps"
)
if not scratch_dir:
try:
scratch_dir = (
subprocess.check_output(
["mkscratch", "path", "--subdir", "fbcode_builder_getdeps"]
)
.strip()
.decode("utf-8")
)
except OSError as exc:
if exc.errno != errno.ENOENT:
# A legit failure; don't fall back, surface the error
raise
# This system doesn't have mkscratch so we fall back to
# something local.
munged = fbcode_builder_dir.replace("Z", "zZ")
for s in ["/", "\\", ":"]:
munged = munged.replace(s, "Z")
scratch_dir = os.path.join(
tempfile.gettempdir(), "fbcode_builder_getdeps-%s" % munged
)
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
if is_windows():
subst = create_subst_path(scratch_dir)
print(
"Mapping scratch dir %s -> %s" % (scratch_dir, subst), file=sys.stderr
)
scratch_dir = subst
else:
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
# Make sure we normalize the scratch path. This path is used as part of the hash
# computation for detecting if projects have been updated, so we need to always
# use the exact same string to refer to a given directory.
scratch_dir = os.path.realpath(scratch_dir)
host_type = _check_host_type(args, host_type)
return BuildOptions(
fbcode_builder_dir,
scratch_dir,
host_type,
install_dir=args.install_prefix,
num_jobs=args.num_jobs,
use_shipit=args.use_shipit,
vcvars_path=args.vcvars_path,
)
| |
"""
session stuff for jabber connections
"""
from twisted.internet import defer, reactor
from twisted.python import failure, log
from twisted.web import server
from twisted.names.srvconnect import SRVConnector
try:
from twisted.words.xish import domish, xmlstream
from twisted.words.protocols import jabber as jabber_protocol
except ImportError:
from twisted.xish import domish, xmlstream
import traceback
import os
import warnings
from punjab import jabber
from punjab.xmpp import ns
import time
import error
try:
from twisted.internet import ssl
except ImportError:
ssl = None
if ssl and not ssl.supported:
ssl = None
if not ssl:
log.msg("SSL ERROR: You do not have ssl support this may cause problems with tls client connections.")
class XMPPClientConnector(SRVConnector):
"""
A jabber connection to find srv records for xmpp client connections.
"""
def __init__(self, client_reactor, domain, factory):
""" Init """
if isinstance(domain, unicode):
warnings.warn(
"Domain argument to XMPPClientConnector should be bytes, "
"not unicode",
stacklevel=2)
domain = domain.encode('ascii')
SRVConnector.__init__(self, client_reactor, 'xmpp-client', domain, factory)
self.timeout = [1,3]
def pickServer(self):
"""
Pick a server and port to make the connection.
"""
host, port = SRVConnector.pickServer(self)
if port == 5223 and ssl:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.SSLv23_METHOD
self.connectFuncName = 'connectSSL'
self.connectFuncArgs = (context,)
return host, port
def make_session(pint, attrs, session_type='BOSH'):
"""
pint - punjab session interface class
attrs - attributes sent from the body tag
"""
s = Session(pint, attrs)
if pint.v:
log.msg('================================== %s connect to %s:%s ==================================' % (str(time.time()),s.hostname,s.port))
connect_srv = s.connect_srv
if attrs.has_key('route'):
connect_srv = False
if s.hostname in ['localhost', '127.0.0.1']:
connect_srv = False
if not connect_srv:
reactor.connectTCP(s.hostname, s.port, s, bindAddress=pint.bindAddress)
else:
connector = XMPPClientConnector(reactor, s.hostname, s)
connector.connect()
# timeout
reactor.callLater(s.inactivity, s.checkExpired)
pint.sessions[s.sid] = s
return s, s.waiting_requests[0].deferred
class WaitingRequest(object):
"""A helper object for managing waiting requests."""
def __init__(self, deferred, delayedcall, timeout = 30, startup = False, rid = None):
""" """
self.deferred = deferred
self.delayedcall = delayedcall
self.startup = startup
self.timeout = timeout
self.wait_start = time.time()
self.rid = rid
def doCallback(self, data):
""" """
self.deferred.callback(data)
def doErrback(self, data):
""" """
self.deferred.errback(data)
class Session(jabber.JabberClientFactory, server.Session):
""" Jabber Client Session class for client XMPP connections. """
def __init__(self, pint, attrs):
"""
Initialize the session
"""
if attrs.has_key('charset'):
self.charset = str(attrs['charset'])
else:
self.charset = 'utf-8'
self.to = attrs['to']
self.port = 5222
self.inactivity = 900
if self.to != '' and self.to.find(":") != -1:
# Check if port is in the 'to' string
to, port = self.to.split(':')
if port:
self.to = to
self.port = int(port)
else:
self.port = 5222
self.sid = "".join("%02x" % ord(i) for i in os.urandom(20))
jabber.JabberClientFactory.__init__(self, self.to, pint.v)
server.Session.__init__(self, pint, self.sid)
self.pint = pint
self.attrs = attrs
self.s = None
self.elems = []
rid = int(attrs['rid'])
self.waiting_requests = []
self.use_raw = attrs.get('raw', False)
self.raw_buffer = u""
self.xmpp_node = ''
self.success = 0
self.mechanisms = []
self.xmlstream = None
self.features = None
self.session = None
self.cache_data = {}
self.verbose = self.pint.v
self.noisy = self.verbose
self.version = attrs.get('version', 0.0)
self.key = attrs.get('newkey')
self.wait = int(attrs.get('wait', 0))
self.hold = int(attrs.get('hold', 0))
self.inactivity = int(attrs.get('inactivity', 900)) # default inactivity 15 mins
if attrs.has_key('window'):
self.window = int(attrs['window'])
else:
self.window = self.hold + 2
if attrs.has_key('polling'):
self.polling = int(attrs['polling'])
else:
self.polling = 0
if attrs.has_key('port'):
self.port = int(attrs['port'])
if attrs.has_key('hostname'):
self.hostname = attrs['hostname']
else:
self.hostname = self.to
self.use_raw = getattr(pint, 'use_raw', False) # use raw buffers
self.connect_srv = getattr(pint, 'connect_srv', True)
self.secure = attrs.has_key('secure') and attrs['secure'] == 'true'
self.authenticator.useTls = self.secure
if attrs.has_key('route'):
if attrs['route'].startswith("xmpp:"):
self.route = attrs['route'][5:]
if self.route.startswith("//"):
self.route = self.route[2:]
# route format change, see http://www.xmpp.org/extensions/xep-0124.html#session-request
rhostname, rport = self.route.split(":")
self.port = int(rport)
self.hostname = rhostname
self.resource = ''
else:
raise error.Error('internal-server-error')
self.authid = 0
self.rid = rid + 1
self.connected = 0 # number of clients connected on this session
self.notifyOnExpire(self.onExpire)
self.stream_error = None
if pint.v:
log.msg('Session Created : %s %s' % (str(self.sid),str(time.time()), ))
self.stream_error_called = False
self.addBootstrap(xmlstream.STREAM_START_EVENT, self.streamStart)
self.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self.connectEvent)
self.addBootstrap(xmlstream.STREAM_ERROR_EVENT, self.streamError)
self.addBootstrap(xmlstream.STREAM_END_EVENT, self.connectError)
# create the first waiting request
d = defer.Deferred()
timeout = 30
rid = self.rid - 1
self.appendWaitingRequest(d, rid,
timeout=timeout,
poll=self._startup_timeout,
startup=True,
)
def rawDataIn(self, buf):
""" Log incoming data on the xmlstream """
if self.pint and self.pint.v:
try:
log.msg("SID: %s => RECV: %r" % (self.sid, buf,))
except:
log.err()
if self.use_raw and self.authid:
if type(buf) == type(''):
buf = unicode(buf, 'utf-8')
# add some raw data
self.raw_buffer = self.raw_buffer + buf
def rawDataOut(self, buf):
""" Log outgoing data on the xmlstream """
try:
log.msg("SID: %s => SEND: %r" % (self.sid, buf,))
except:
log.err()
def _wrPop(self, data, i=0):
"""Pop off a waiting requst, do callback, and cache request
"""
wr = self.waiting_requests.pop(i)
wr.doCallback(data)
self._cacheData(wr.rid, data)
def clearWaitingRequests(self, hold = 0):
"""clear number of requests given
hold - number of requests to clear, default is all
"""
while len(self.waiting_requests) > hold:
self._wrPop([])
def _wrError(self, err, i = 0):
wr = self.waiting_requests.pop(i)
wr.doErrback(err)
def appendWaitingRequest(self, d, rid, timeout=None, poll=None, startup=False):
"""append waiting request
"""
if timeout is None:
timeout = self.wait
if poll is None:
poll = self._pollTimeout
self.waiting_requests.append(
WaitingRequest(d,
poll,
timeout = timeout,
rid = rid,
startup=startup))
def returnWaitingRequests(self):
"""return a waiting request
"""
while len(self.elems) > 0 and len(self.waiting_requests) > 0:
data = self.elems
self.elems = []
self._wrPop(data)
def onExpire(self):
""" When the session expires call this. """
if 'onExpire' in dir(self.pint):
self.pint.onExpire(self.sid)
if self.verbose and not getattr(self, 'terminated', False):
log.msg('SESSION -> We have expired', self.sid, self.rid, self.waiting_requests)
self.disconnect()
def terminate(self):
"""Terminates the session."""
self.wait = 0
self.terminated = True
if self.verbose:
log.msg('SESSION -> Terminate')
# if there are any elements hanging around and waiting
# requests, send those off
self.returnWaitingRequests()
self.clearWaitingRequests()
try:
self.expire()
except:
self.onExpire()
return defer.succeed(self.elems)
def poll(self, d = None, rid = None):
"""Handles the responses to requests.
This function is called for every request except session setup
and session termination. It handles the reply portion of the
request by returning a deferred which will get called back
when there is data or when the wait timeout expires.
"""
# queue this request
if d is None:
d = defer.Deferred()
if self.pint.error:
d.addErrback(self.pint.error)
if not rid:
rid = self.rid - 1
self.appendWaitingRequest(d, rid)
# check if there is any data to send back to a request
self.returnWaitingRequests()
# make sure we aren't queueing too many requests
self.clearWaitingRequests(self.hold)
return d
def _pollTimeout(self, d):
"""Handle request timeouts.
Since the timeout function is called, we must return an empty
reply as there is no data to send back.
"""
# find the request that timed out and reply
pop_eye = []
for i in range(len(self.waiting_requests)):
if self.waiting_requests[i].deferred == d:
pop_eye.append(i)
self.touch()
for i in pop_eye:
self._wrPop([],i)
def _pollForId(self, d):
if self.xmlstream.sid:
self.authid = self.xmlstream.sid
self._pollTimeout(d)
def connectEvent(self, xs):
self.version = self.authenticator.version
self.xmlstream = xs
if self.pint.v:
# add logging for verbose output
self.xmlstream.rawDataOutFn = self.rawDataOut
self.xmlstream.rawDataInFn = self.rawDataIn
if self.version == '1.0':
self.xmlstream.addObserver("/features", self.featuresHandler)
def streamStart(self, xs):
"""
A xmpp stream has started
"""
# This is done to fix the stream id problem, I should submit a bug to twisted bugs
try:
self.authid = self.xmlstream.sid
if not self.attrs.has_key('no_events'):
self.xmlstream.addOnetimeObserver("/auth", self.stanzaHandler)
self.xmlstream.addOnetimeObserver("/response", self.stanzaHandler)
self.xmlstream.addOnetimeObserver("/success", self._saslSuccess)
self.xmlstream.addOnetimeObserver("/failure", self._saslError)
self.xmlstream.addObserver("/iq/bind", self.bindHandler)
self.xmlstream.addObserver("/bind", self.stanzaHandler)
self.xmlstream.addObserver("/challenge", self.stanzaHandler)
self.xmlstream.addObserver("/message", self.stanzaHandler)
self.xmlstream.addObserver("/iq", self.stanzaHandler)
self.xmlstream.addObserver("/presence", self.stanzaHandler)
# TODO - we should do something like this
# self.xmlstream.addObserver("/*", self.stanzaHandler)
except:
log.err(traceback.print_exc())
self._wrError(error.Error("remote-connection-failed"))
self.disconnect()
def featuresHandler(self, f):
"""
handle stream:features
"""
f.prefixes = ns.XMPP_PREFIXES.copy()
#check for tls
self.f = {}
for feature in f.elements():
self.f[(feature.uri, feature.name)] = feature
starttls = (ns.TLS_XMLNS, 'starttls') in self.f
initializers = getattr(self.xmlstream, 'initializers', [])
self.features = f
self.xmlstream.features = f
# There is a tls initializer added by us, if it is available we need to try it
if len(initializers)>0 and starttls:
self.secure = True
if self.authid is None:
self.authid = self.xmlstream.sid
# If we get tls, then we should start tls, wait and then return
# Here we wait, the tls initializer will start it
if starttls and self.secure:
if self.verbose:
log.msg("Wait until starttls is completed.")
log.msg(initializers)
return
self.elems.append(f)
if len(self.waiting_requests) > 0:
self.returnWaitingRequests()
self.elems = [] # reset elems
self.raw_buffer = u"" # reset raw buffer, features should not be in it
def bindHandler(self, stz):
"""bind debugger for punjab, this is temporary! """
if self.verbose:
try:
log.msg('BIND: %s %s' % (str(self.sid), str(stz.bind.jid)))
except:
log.err()
if self.use_raw:
self.raw_buffer = stz.toXml()
def stanzaHandler(self, stz):
"""generic stanza handler for httpbind and httppoll"""
stz.prefixes = ns.XMPP_PREFIXES
if self.use_raw and self.authid:
stz = domish.SerializedXML(self.raw_buffer)
self.raw_buffer = u""
self.elems.append(stz)
if self.waiting_requests and len(self.waiting_requests) > 0:
# if there are any waiting requests, give them all the
# data so far, plus this new data
self.returnWaitingRequests()
def _startup_timeout(self, d):
# this can be called if connection failed, or if we connected
# but never got a stream features before the timeout
if self.pint.v:
log.msg('================================== %s %s startup timeout ==================================' % (str(self.sid), str(time.time()),))
for i in range(len(self.waiting_requests)):
if self.waiting_requests[i].deferred == d:
# check if we really failed or not
if self.authid:
self._wrPop(self.elems, i=i)
else:
self._wrError(error.Error("remote-connection-failed"), i=i)
def buildRemoteError(self, err_elem=None):
# This may not be a stream error, such as an XML parsing error.
# So expose it as remote-connection-failed.
err = 'remote-connection-failed'
if err_elem is not None:
# This is an actual stream:error. Create a remote-stream-error to encapsulate it.
err = 'remote-stream-error'
e = error.Error(err)
e.error_stanza = err
e.children = []
if err_elem is not None:
e.children.append(err_elem)
return e
def streamError(self, streamerror):
"""called when we get a stream:error stanza"""
self.stream_error_called = True
try:
err_elem = streamerror.value.getElement()
except AttributeError:
err_elem = None
e = self.buildRemoteError(err_elem)
do_expire = True
if len(self.waiting_requests) > 0:
wr = self.waiting_requests.pop(0)
wr.doErrback(e)
else: # need to wait for a new request and then expire
do_expire = False
if self.pint and self.pint.sessions.has_key(self.sid):
if do_expire:
try:
self.expire()
except:
self.onExpire()
else:
s = self.pint.sessions.get(self.sid)
s.stream_error = e
def connectError(self, reason):
"""called when we get disconnected"""
if self.stream_error_called: return
# Before Twisted 11.x the xmlstream object was passed instead of the
# disconnect reason. See http://twistedmatrix.com/trac/ticket/2618
if not isinstance(reason, failure.Failure):
reason_str = 'Reason unknown'
else:
reason_str = str(reason)
# If the connection was established and lost, then we need to report
# the error back to the client, since he needs to reauthenticate.
# FIXME: If the connection was lost before anything happened, we could
# silently retry instead.
if self.verbose:
log.msg('connect ERROR: %s' % reason_str)
self.stopTrying()
e = error.Error('remote-connection-failed')
do_expire = True
if self.waiting_requests:
wr = self.waiting_requests.pop(0)
wr.doErrback(e)
else: # need to wait for a new request and then expire
do_expire = False
if self.pint and self.pint.sessions.has_key(self.sid):
if do_expire:
try:
self.expire()
except:
self.onExpire()
else:
s = self.pint.sessions.get(self.sid)
s.stream_error = e
def sendRawXml(self, obj):
"""
Send a raw xml string, not a domish.Element
"""
self.touch()
self._send(obj)
def _send(self, xml):
"""
Send valid data over the xmlstream
"""
if self.xmlstream: # FIXME this happens on an expired session and the post has something to send
if isinstance(xml, domish.Element):
xml.localPrefixes = {}
self.xmlstream.send(xml)
def _removeObservers(self, typ = ''):
if typ == 'event':
observers = self.xmlstream._eventObservers
else:
observers = self.xmlstream._xpathObservers
emptyLists = []
for priority, priorityObservers in observers.iteritems():
for query, callbacklist in priorityObservers.iteritems():
callbacklist.callbacks = []
emptyLists.append((priority, query))
for priority, query in emptyLists:
del observers[priority][query]
def disconnect(self):
"""
Disconnect from the xmpp server.
"""
if not getattr(self, 'xmlstream',None):
return
if self.xmlstream:
#sh = "<presence type='unavailable' xmlns='jabber:client'/>"
sh = "</stream:stream>"
self.xmlstream.send(sh)
self.stopTrying()
if self.xmlstream:
self.xmlstream.transport.loseConnection()
del self.xmlstream
self.connected = 0
self.pint = None
self.elems = []
if self.waiting_requests:
self.clearWaitingRequests()
del self.waiting_requests
self.mechanisms = None
self.features = None
def checkExpired(self):
"""
Check if the session or xmpp connection has expired
"""
# send this so we do not timeout from servers
if getattr(self, 'xmlstream', None):
self.xmlstream.send(' ')
if self.inactivity is None:
wait = 900
elif self.inactivity == 0:
wait = time.time()
else:
wait = self.inactivity
if self.waiting_requests and len(self.waiting_requests)>0:
wait += self.wait # if we have pending requests we need to add the wait time
if time.time() - self.lastModified > wait+(0.1):
if self.site.sessions.has_key(self.uid):
self.terminate()
else:
pass
else:
reactor.callLater(wait, self.checkExpired)
def _cacheData(self, rid, data):
if len(self.cache_data.keys())>=3:
# remove the first one in
keys = self.cache_data.keys()
keys.sort()
del self.cache_data[keys[0]]
self.cache_data[int(rid)] = data
# This stuff will leave when SASL and TLS are implemented correctly
# session stuff
def _sessionResultEvent(self, iq):
""" """
if len(self.waiting_requests)>0:
wr = self.waiting_requests.pop(0)
d = wr.deferred
else:
d = None
if iq["type"] == "result":
if d:
d.callback(self)
else:
if d:
d.errback(self)
def _saslSuccess(self, s):
""" """
self.success = 1
self.s = s
# return success to the client
if len(self.waiting_requests)>0:
self._wrPop([s])
self.authenticator._reset()
if self.use_raw:
self.raw_buffer = u""
def _saslError(self, sasl_error, d = None):
""" SASL error """
if d:
d.errback(self)
if len(self.waiting_requests)>0:
self._wrPop([sasl_error])
| |
from __future__ import with_statement
import textwrap
import os
import sys
from os.path import join, normpath
from tempfile import mkdtemp
from mock import patch
from tests.lib import assert_all_changes, pyversion
from tests.lib.local_repos import local_repo, local_checkout
from pip.util import rmtree
def test_simple_uninstall(script):
"""
Test simple install and uninstall.
"""
result = script.pip('install', 'INITools==0.2')
assert join(script.site_packages, 'initools') in result.files_created, (
sorted(result.files_created.keys())
)
# the import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import initools")
result2 = script.pip('uninstall', 'INITools', '-y')
assert_all_changes(result, result2, [script.venv / 'build', 'cache'])
def test_uninstall_with_scripts(script):
"""
Uninstall an easy_installed package with scripts.
"""
result = script.run('easy_install', 'PyLogo', expect_stderr=True)
easy_install_pth = script.site_packages / 'easy-install.pth'
pylogo = sys.platform == 'win32' and 'pylogo' or 'PyLogo'
assert(pylogo in result.files_updated[easy_install_pth].bytes)
result2 = script.pip('uninstall', 'pylogo', '-y')
assert_all_changes(
result,
result2,
[script.venv / 'build', 'cache', easy_install_pth],
)
def test_uninstall_easy_install_after_import(script):
"""
Uninstall an easy_installed package after it's been imported
"""
result = script.run('easy_install', 'INITools==0.2', expect_stderr=True)
# the import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import initools")
result2 = script.pip('uninstall', 'INITools', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
]
)
def test_uninstall_namespace_package(script):
"""
Uninstall a distribution with a namespace package without clobbering
the namespace and everything in it.
"""
result = script.pip('install', 'pd.requires==0.0.3', expect_error=True)
assert join(script.site_packages, 'pd') in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'pd.find', '-y', expect_error=True)
assert join(script.site_packages, 'pd') not in result2.files_deleted, (
sorted(result2.files_deleted.keys())
)
assert join(script.site_packages, 'pd', 'find') in result2.files_deleted, (
sorted(result2.files_deleted.keys())
)
def test_uninstall_overlapping_package(script, data):
"""
Uninstalling a distribution that adds modules to a pre-existing package
should only remove those added modules, not the rest of the existing
package.
See: GitHub issue #355 (pip uninstall removes things it didn't install)
"""
parent_pkg = data.packages.join("parent-0.1.tar.gz")
child_pkg = data.packages.join("child-0.1.tar.gz")
result1 = script.pip('install', parent_pkg, expect_error=False)
assert join(script.site_packages, 'parent') in result1.files_created, (
sorted(result1.files_created.keys())
)
result2 = script.pip('install', child_pkg, expect_error=False)
assert join(script.site_packages, 'child') in result2.files_created, (
sorted(result2.files_created.keys())
)
assert normpath(
join(script.site_packages, 'parent/plugins/child_plugin.py')
) in result2.files_created, sorted(result2.files_created.keys())
# The import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import parent.plugins.child_plugin, child")
result3 = script.pip('uninstall', '-y', 'child', expect_error=False)
assert join(script.site_packages, 'child') in result3.files_deleted, (
sorted(result3.files_created.keys())
)
assert normpath(
join(script.site_packages, 'parent/plugins/child_plugin.py')
) in result3.files_deleted, sorted(result3.files_deleted.keys())
assert join(script.site_packages, 'parent') not in result3.files_deleted, (
sorted(result3.files_deleted.keys())
)
# Additional check: uninstalling 'child' should return things to the
# previous state, without unintended side effects.
assert_all_changes(result2, result3, [])
def test_uninstall_console_scripts(script):
"""
Test uninstalling a package with more files (console_script entry points,
extra directories).
"""
args = ['install']
args.append('discover')
result = script.pip(*args, **{"expect_error": True})
assert script.bin / 'discover' + script.exe in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'discover', '-y', expect_error=True)
assert_all_changes(result, result2, [script.venv / 'build', 'cache'])
def test_uninstall_easy_installed_console_scripts(script):
"""
Test uninstalling package with console_scripts that is easy_installed.
"""
args = ['easy_install']
args.append('discover')
result = script.run(*args, **{"expect_stderr": True})
assert script.bin / 'discover' + script.exe in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'discover', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
]
)
def test_uninstall_editable_from_svn(script, tmpdir):
"""
Test uninstalling an editable installation from svn.
"""
result = script.pip(
'install', '-e',
'%s#egg=initools-dev' % local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache"),
),
)
result.assert_installed('INITools')
result2 = script.pip('uninstall', '-y', 'initools')
assert (script.venv / 'src' / 'initools' in result2.files_after)
assert_all_changes(
result,
result2,
[
script.venv / 'src',
script.venv / 'build',
script.site_packages / 'easy-install.pth'
],
)
def test_uninstall_editable_with_source_outside_venv(script, tmpdir):
"""
Test uninstalling editable install from existing source outside the venv.
"""
cache_dir = tmpdir.join("cache")
try:
temp = mkdtemp()
tmpdir = join(temp, 'pip-test-package')
_test_uninstall_editable_with_source_outside_venv(
script,
tmpdir,
cache_dir,
)
finally:
rmtree(temp)
def _test_uninstall_editable_with_source_outside_venv(
script, tmpdir, cache_dir):
result = script.run(
'git', 'clone',
local_repo(
'git+git://github.com/pypa/pip-test-package',
cache_dir,
),
tmpdir,
expect_stderr=True,
)
result2 = script.pip('install', '-e', tmpdir)
assert join(
script.site_packages, 'pip-test-package.egg-link'
) in result2.files_created, list(result2.files_created.keys())
result3 = script.pip('uninstall', '-y',
'pip-test-package', expect_error=True)
assert_all_changes(
result,
result3,
[script.venv / 'build', script.site_packages / 'easy-install.pth'],
)
def test_uninstall_from_reqs_file(script, tmpdir):
"""
Test uninstall from a requirements file.
"""
script.scratch_path.join("test-req.txt").write(
textwrap.dedent("""
-e %s#egg=initools-dev
# and something else to test out:
PyLogo<0.4
""") %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache")
)
)
result = script.pip('install', '-r', 'test-req.txt')
script.scratch_path.join("test-req.txt").write(
textwrap.dedent("""
# -f, -i, and --extra-index-url should all be ignored by uninstall
-f http://www.example.com
-i http://www.example.com
--extra-index-url http://www.example.com
-e %s#egg=initools-dev
# and something else to test out:
PyLogo<0.4
""") %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache")
)
)
result2 = script.pip('uninstall', '-r', 'test-req.txt', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
script.venv / 'src',
script.scratch / 'test-req.txt',
script.site_packages / 'easy-install.pth',
],
)
def test_uninstall_as_egg(script, data):
"""
Test uninstall package installed as egg.
"""
to_install = data.packages.join("FSPkg")
result = script.pip('install', to_install, '--egg', expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_folder = script.site_packages / 'FSPkg-0.1dev-py%s.egg' % pyversion
assert fspkg_folder not in result.files_created, str(result.stdout)
assert egg_folder in result.files_created, str(result)
result2 = script.pip('uninstall', 'FSPkg', '-y', expect_error=True)
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
],
)
@patch('pip.req.req_uninstall.logger')
def test_uninstallpathset_no_paths(mock_logger):
"""
Test UninstallPathSet logs notification when there are no paths to
uninstall
"""
from pip.req.req_uninstall import UninstallPathSet
from pkg_resources import get_distribution
test_dist = get_distribution('pip')
# ensure that the distribution is "local"
with patch("pip.req.req_uninstall.dist_is_local") as mock_dist_is_local:
mock_dist_is_local.return_value = True
uninstall_set = UninstallPathSet(test_dist)
uninstall_set.remove() # with no files added to set
mock_logger.notify.assert_any_call(
"Can't uninstall 'pip'. No files were found to uninstall.",
)
@patch('pip.req.req_uninstall.logger')
def test_uninstallpathset_non_local(mock_logger):
"""
Test UninstallPathSet logs notification and returns (with no exception)
when dist is non-local
"""
nonlocal_path = os.path.abspath("/nonlocal")
from pip.req.req_uninstall import UninstallPathSet
from pkg_resources import get_distribution
test_dist = get_distribution('pip')
test_dist.location = nonlocal_path
# ensure that the distribution is "non-local"
# setting location isn't enough, due to egg-link file checking for
# develop-installs
with patch("pip.req.req_uninstall.dist_is_local") as mock_dist_is_local:
mock_dist_is_local.return_value = False
uninstall_set = UninstallPathSet(test_dist)
# with no files added to set; which is the case when trying to remove
# non-local dists
uninstall_set.remove()
mock_logger.notify.assert_any_call(
"Not uninstalling pip at %s, outside environment %s" %
(nonlocal_path, sys.prefix)
)
mock_logger.notify.mock_calls
def test_uninstall_wheel(script, data):
"""
Test uninstalling a wheel
"""
package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index')
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created
result2 = script.pip('uninstall', 'simple.dist', '-y')
assert_all_changes(result, result2, [])
| |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class CreditCardCharge(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
CreditCardCharge - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'amount': 'float',
'tax_amount': 'float',
'currency': 'str',
'credit_card': 'str',
'installments': 'float',
'order_id': 'str',
'description': 'str',
'third_party_id': 'str',
'paid': 'bool',
'customer': 'str',
'payment_transaction': 'str'
}
self.attribute_map = {
'id': 'id',
'amount': 'amount',
'tax_amount': 'taxAmount',
'currency': 'currency',
'credit_card': 'creditCard',
'installments': 'installments',
'order_id': 'orderId',
'description': 'description',
'third_party_id': 'thirdPartyId',
'paid': 'paid',
'customer': 'customer',
'payment_transaction': 'paymentTransaction'
}
self._id = None
self._amount = None
self._tax_amount = None
self._currency = None
self._credit_card = None
self._installments = None
self._order_id = None
self._description = None
self._third_party_id = None
self._paid = None
self._customer = None
self._payment_transaction = None
@property
def id(self):
"""
Gets the id of this CreditCardCharge.
:return: The id of this CreditCardCharge.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this CreditCardCharge.
:param id: The id of this CreditCardCharge.
:type: str
"""
self._id = id
@property
def amount(self):
"""
Gets the amount of this CreditCardCharge.
:return: The amount of this CreditCardCharge.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this CreditCardCharge.
:param amount: The amount of this CreditCardCharge.
:type: float
"""
self._amount = amount
@property
def tax_amount(self):
"""
Gets the tax_amount of this CreditCardCharge.
:return: The tax_amount of this CreditCardCharge.
:rtype: float
"""
return self._tax_amount
@tax_amount.setter
def tax_amount(self, tax_amount):
"""
Sets the tax_amount of this CreditCardCharge.
:param tax_amount: The tax_amount of this CreditCardCharge.
:type: float
"""
self._tax_amount = tax_amount
@property
def currency(self):
"""
Gets the currency of this CreditCardCharge.
3-letter ISO code for currency.
:return: The currency of this CreditCardCharge.
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""
Sets the currency of this CreditCardCharge.
3-letter ISO code for currency.
:param currency: The currency of this CreditCardCharge.
:type: str
"""
self._currency = currency
@property
def credit_card(self):
"""
Gets the credit_card of this CreditCardCharge.
:return: The credit_card of this CreditCardCharge.
:rtype: str
"""
return self._credit_card
@credit_card.setter
def credit_card(self, credit_card):
"""
Sets the credit_card of this CreditCardCharge.
:param credit_card: The credit_card of this CreditCardCharge.
:type: str
"""
self._credit_card = credit_card
@property
def installments(self):
"""
Gets the installments of this CreditCardCharge.
The amount of payments to divide the charge amount.
:return: The installments of this CreditCardCharge.
:rtype: float
"""
return self._installments
@installments.setter
def installments(self, installments):
"""
Sets the installments of this CreditCardCharge.
The amount of payments to divide the charge amount.
:param installments: The installments of this CreditCardCharge.
:type: float
"""
self._installments = installments
@property
def order_id(self):
"""
Gets the order_id of this CreditCardCharge.
:return: The order_id of this CreditCardCharge.
:rtype: str
"""
return self._order_id
@order_id.setter
def order_id(self, order_id):
"""
Sets the order_id of this CreditCardCharge.
:param order_id: The order_id of this CreditCardCharge.
:type: str
"""
self._order_id = order_id
@property
def description(self):
"""
Gets the description of this CreditCardCharge.
:return: The description of this CreditCardCharge.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this CreditCardCharge.
:param description: The description of this CreditCardCharge.
:type: str
"""
self._description = description
@property
def third_party_id(self):
"""
Gets the third_party_id of this CreditCardCharge.
:return: The third_party_id of this CreditCardCharge.
:rtype: str
"""
return self._third_party_id
@third_party_id.setter
def third_party_id(self, third_party_id):
"""
Sets the third_party_id of this CreditCardCharge.
:param third_party_id: The third_party_id of this CreditCardCharge.
:type: str
"""
self._third_party_id = third_party_id
@property
def paid(self):
"""
Gets the paid of this CreditCardCharge.
:return: The paid of this CreditCardCharge.
:rtype: bool
"""
return self._paid
@paid.setter
def paid(self, paid):
"""
Sets the paid of this CreditCardCharge.
:param paid: The paid of this CreditCardCharge.
:type: bool
"""
self._paid = paid
@property
def customer(self):
"""
Gets the customer of this CreditCardCharge.
:return: The customer of this CreditCardCharge.
:rtype: str
"""
return self._customer
@customer.setter
def customer(self, customer):
"""
Sets the customer of this CreditCardCharge.
:param customer: The customer of this CreditCardCharge.
:type: str
"""
self._customer = customer
@property
def payment_transaction(self):
"""
Gets the payment_transaction of this CreditCardCharge.
:return: The payment_transaction of this CreditCardCharge.
:rtype: str
"""
return self._payment_transaction
@payment_transaction.setter
def payment_transaction(self, payment_transaction):
"""
Sets the payment_transaction of this CreditCardCharge.
:param payment_transaction: The payment_transaction of this CreditCardCharge.
:type: str
"""
self._payment_transaction = payment_transaction
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
| |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
Blender importer for Three.js (ASCII JSON format).
"""
import os
import time
import json
import bpy
import mathutils
from mathutils.geometry import tessellate_polygon
from bpy_extras.image_utils import load_image
# #####################################################
# Generators
# #####################################################
def setColor(c, t):
c.r = t[0]
c.g = t[1]
c.b = t[2]
def create_texture(filename, modelpath):
name = filename
texture = bpy.data.textures.new(name, type='IMAGE')
image = load_image(filename, modelpath)
has_data = False
if image:
texture.image = image
has_data = image.has_data
return texture
def create_materials(data, modelpath):
materials = []
materials_data = data.get("materials", [])
for i, m in enumerate(materials_data):
name = m.get("DbgName", "material_%d" % i)
colorAmbient = m.get("colorAmbient", None)
colorDiffuse = m.get("colorDiffuse", None)
colorSpecular = m.get("colorSpecular", None)
alpha = m.get("transparency", 1.0)
specular_hardness = m.get("specularCoef", 0)
mapDiffuse = m.get("mapDiffuse", None)
mapLightmap = m.get("mapLightmap", None)
vertexColorsType = m.get("vertexColors", False)
useVertexColors = False
if vertexColorsType:
useVertexColors = True
material = bpy.data.materials.new(name)
material.THREE_useVertexColors = useVertexColors
if colorDiffuse:
setColor(material.diffuse_color, colorDiffuse)
material.diffuse_intensity = 1.0
if colorSpecular:
setColor(material.specular_color, colorSpecular)
material.specular_intensity = 1.0
if alpha < 1.0:
material.alpha = alpha
material.use_transparency = True
if specular_hardness:
material.specular_hardness = specular_hardness
if mapDiffuse:
texture = create_texture(mapDiffuse, modelpath)
mtex = material.texture_slots.add()
mtex.texture = texture
mtex.texture_coords = 'UV'
mtex.use = True
mtex.use_map_color_diffuse = True
material.active_texture = texture
materials.append(material)
return materials
def create_mesh_object(name, vertices, materials, face_data, flipYZ, recalculate_normals):
faces = face_data["faces"]
vertexNormals = face_data["vertexNormals"]
vertexColors = face_data["vertexColors"]
vertexUVs = face_data["vertexUVs"]
faceMaterials = face_data["materials"]
faceColors = face_data["faceColors"]
edges = []
# Create a new mesh
me = bpy.data.meshes.new(name)
me.from_pydata(vertices, edges, faces)
# Handle normals
if not recalculate_normals:
me.update(calc_edges = True)
if face_data["hasVertexNormals"]:
print("setting vertex normals")
for fi in range(len(faces)):
if vertexNormals[fi]:
#print("setting face %i with %i vertices" % (fi, len(normals[fi])))
# if me.update() is called after setting vertex normals
# setting face.use_smooth overrides these normals
# - this fixes weird shading artefacts (seems to come from sharing
# of vertices between faces, didn't find a way how to set vertex normals
# per face use of vertex as opposed to per vertex),
# - probably this just overrides all custom vertex normals
# - to preserve vertex normals from the original data
# call me.update() before setting them
me.faces[fi].use_smooth = True
if not recalculate_normals:
for j in range(len(vertexNormals[fi])):
vertexNormal = vertexNormals[fi][j]
x = vertexNormal[0]
y = vertexNormal[1]
z = vertexNormal[2]
if flipYZ:
tmp = y
y = -z
z = tmp
# flip normals (this make them look consistent with the original before export)
#x = -x
#y = -y
#z = -z
vi = me.faces[fi].vertices[j]
me.vertices[vi].normal.x = x
me.vertices[vi].normal.y = y
me.vertices[vi].normal.z = z
if recalculate_normals:
me.update(calc_edges = True)
# Handle colors
if face_data["hasVertexColors"]:
print("setting vertex colors")
me.vertex_colors.new("vertex_color_layer_0")
for fi in range(len(faces)):
if vertexColors[fi]:
face_colors = me.vertex_colors[0].data[fi]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for vi in range(len(vertexColors[fi])):
r = vertexColors[fi][vi][0]
g = vertexColors[fi][vi][1]
b = vertexColors[fi][vi][2]
face_colors[vi].r = r
face_colors[vi].g = g
face_colors[vi].b = b
elif face_data["hasFaceColors"]:
print("setting vertex colors from face colors")
me.vertex_colors.new("vertex_color_layer_0")
for fi in range(len(faces)):
if faceColors[fi]:
r = faceColors[fi][0]
g = faceColors[fi][1]
b = faceColors[fi][2]
face_colors = me.vertex_colors[0].data[fi]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for vi in range(len(faces[fi])):
face_colors[vi].r = r
face_colors[vi].g = g
face_colors[vi].b = b
# Handle uvs
if face_data["hasVertexUVs"]:
print("setting vertex uvs")
for li, layer in enumerate(vertexUVs):
me.uv_textures.new("uv_layer_%d" % li)
for fi in range(len(faces)):
if layer[fi]:
uv_face = me.uv_textures[li].data[fi]
face_uvs = uv_face.uv1, uv_face.uv2, uv_face.uv3, uv_face.uv4
for vi in range(len(layer[fi])):
u = layer[fi][vi][0]
v = layer[fi][vi][1]
face_uvs[vi].x = u
face_uvs[vi].y = v
active_texture = materials[faceMaterials[fi]].active_texture
if active_texture:
uv_face.image = active_texture.image
# Handle materials # 1
if face_data["hasMaterials"]:
print("setting materials (mesh)")
for m in materials:
me.materials.append(m)
print("setting materials (faces)")
for fi in range(len(faces)):
if faceMaterials[fi] >= 0:
me.faces[fi].material_index = faceMaterials[fi]
# Create a new object
ob = bpy.data.objects.new(name, me)
ob.data = me # link the mesh data to the object
scene = bpy.context.scene # get the current scene
scene.objects.link(ob) # link the object into the scene
ob.location = scene.cursor_location # position object at 3d-cursor
# #####################################################
# Faces
# #####################################################
def extract_faces(data):
result = {
"faces" : [],
"materials" : [],
"faceUVs" : [],
"vertexUVs" : [],
"faceNormals" : [],
"vertexNormals" : [],
"faceColors" : [],
"vertexColors" : [],
"hasVertexNormals" : False,
"hasVertexUVs" : False,
"hasVertexColors" : False,
"hasFaceColors" : False,
"hasMaterials" : False
}
faces = data.get("faces", [])
normals = data.get("normals", [])
colors = data.get("colors", [])
offset = 0
zLength = len(faces)
# disregard empty arrays
nUvLayers = 0
for layer in data["uvs"]:
if len(layer) > 0:
nUvLayers += 1
result["faceUVs"].append([])
result["vertexUVs"].append([])
while ( offset < zLength ):
type = faces[ offset ]
offset += 1
isQuad = isBitSet( type, 0 )
hasMaterial = isBitSet( type, 1 )
hasFaceUv = isBitSet( type, 2 )
hasFaceVertexUv = isBitSet( type, 3 )
hasFaceNormal = isBitSet( type, 4 )
hasFaceVertexNormal = isBitSet( type, 5 )
hasFaceColor = isBitSet( type, 6 )
hasFaceVertexColor = isBitSet( type, 7 )
#print("type", type, "bits", isQuad, hasMaterial, hasFaceUv, hasFaceVertexUv, hasFaceNormal, hasFaceVertexNormal, hasFaceColor, hasFaceVertexColor)
result["hasVertexUVs"] = result["hasVertexUVs"] or hasFaceVertexUv
result["hasVertexNormals"] = result["hasVertexNormals"] or hasFaceVertexNormal
result["hasVertexColors"] = result["hasVertexColors"] or hasFaceVertexColor
result["hasFaceColors"] = result["hasFaceColors"] or hasFaceColor
result["hasMaterials"] = result["hasMaterials"] or hasMaterial
# vertices
if isQuad:
a = faces[ offset ]
offset += 1
b = faces[ offset ]
offset += 1
c = faces[ offset ]
offset += 1
d = faces[ offset ]
offset += 1
face = [a, b, c, d]
nVertices = 4
else:
a = faces[ offset ]
offset += 1
b = faces[ offset ]
offset += 1
c = faces[ offset ]
offset += 1
face = [a, b, c]
nVertices = 3
result["faces"].append(face)
# material
if hasMaterial:
materialIndex = faces[ offset ]
offset += 1
else:
materialIndex = -1
result["materials"].append(materialIndex)
# uvs
for i in range(nUvLayers):
faceUv = None
if hasFaceUv:
uvLayer = data["uvs"][ i ]
uvIndex = faces[ offset ]
offset += 1
u = uvLayer[ uvIndex * 2 ]
v = uvLayer[ uvIndex * 2 + 1 ]
faceUv = [u, v]
result["faceUVs"][i].append(faceUv)
if hasFaceVertexUv:
uvLayer = data["uvs"][ i ]
vertexUvs = []
for j in range(nVertices):
uvIndex = faces[ offset ]
offset += 1
u = uvLayer[ uvIndex * 2 ]
v = uvLayer[ uvIndex * 2 + 1 ]
vertexUvs.append([u, v])
result["vertexUVs"][i].append(vertexUvs)
if hasFaceNormal:
normalIndex = faces[ offset ] * 3
offset += 1
x = normals[ normalIndex ]
y = normals[ normalIndex + 1 ]
z = normals[ normalIndex + 2 ]
faceNormal = [x, y, z]
else:
faceNormal = None
result["faceNormals"].append(faceNormal)
if hasFaceVertexNormal:
vertexNormals = []
for j in range(nVertices):
normalIndex = faces[ offset ] * 3
offset += 1
x = normals[ normalIndex ]
y = normals[ normalIndex + 1 ]
z = normals[ normalIndex + 2 ]
vertexNormals.append( [x, y, z] )
else:
vertexNormals = None
result["vertexNormals"].append(vertexNormals)
if hasFaceColor:
colorIndex = faces[ offset ]
offset += 1
faceColor = hexToTuple( colors[ colorIndex ] )
else:
faceColor = None
result["faceColors"].append(faceColor)
if hasFaceVertexColor:
vertexColors = []
for j in range(nVertices):
colorIndex = faces[ offset ]
offset += 1
color = hexToTuple( colors[ colorIndex ] )
vertexColors.append( color )
else:
vertexColors = None
result["vertexColors"].append(vertexColors)
return result
# #####################################################
# Utils
# #####################################################
def hexToTuple( hexColor ):
r = (( hexColor >> 16 ) & 0xff) / 255.0
g = (( hexColor >> 8 ) & 0xff) / 255.0
b = ( hexColor & 0xff) / 255.0
return (r, g, b)
def isBitSet(value, position):
return value & ( 1 << position )
def splitArray(data, chunkSize):
result = []
chunk = []
for i in range(len(data)):
if i > 0 and i % chunkSize == 0:
result.append(chunk)
chunk = []
chunk.append(data[i])
result.append(chunk)
return result
def extract_json_string(text):
marker_begin = "var model ="
marker_end = "postMessage"
start = text.find(marker_begin) + len(marker_begin)
end = text.find(marker_end)
end = text.rfind("}", start, end)
return text[start:end+1].strip()
def get_name(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
def get_path(filepath):
return os.path.dirname(filepath)
# #####################################################
# Parser
# #####################################################
def load(operator, context, filepath, option_flip_yz = True, recalculate_normals = True, option_worker = False):
print('\nimporting %r' % filepath)
time_main = time.time()
print("\tparsing JSON file...")
time_sub = time.time()
file = open(filepath, 'rU')
rawcontent = file.read()
file.close()
if option_worker:
json_string = extract_json_string(rawcontent)
else:
json_string = rawcontent
data = json.loads( json_string )
time_new = time.time()
print('parsing %.4f sec' % (time_new - time_sub))
time_sub = time_new
# flip YZ
vertices = splitArray(data["vertices"], 3)
if option_flip_yz:
vertices[:] = [(v[0], -v[2], v[1]) for v in vertices]
# extract faces
face_data = extract_faces(data)
# deselect all
bpy.ops.object.select_all(action='DESELECT')
nfaces = len(face_data["faces"])
nvertices = len(vertices)
nnormals = len(data.get("normals", [])) / 3
ncolors = len(data.get("colors", [])) / 3
nuvs = len(data.get("uvs", [])) / 2
nmaterials = len(data.get("materials", []))
print('\tbuilding geometry...\n\tfaces:%i, vertices:%i, vertex normals: %i, vertex uvs: %i, vertex colors: %i, materials: %i ...' % (
nfaces, nvertices, nnormals, nuvs, ncolors, nmaterials ))
# Create materials
materials = create_materials(data, get_path(filepath))
# Create new obj
create_mesh_object(get_name(filepath), vertices, materials, face_data, option_flip_yz, recalculate_normals)
scene = bpy.context.scene
scene.update()
time_new = time.time()
print('finished importing: %r in %.4f sec.' % (filepath, (time_new - time_main)))
return {'FINISHED'}
if __name__ == "__main__":
register()
| |
"""
This module lets the simulator communicate with external things like the
log viewer and NetVis.
"""
import comm
import socket
import json
import threading
import core
import api
class StreamingConnection (comm.NullInterface):
def __init__ (self, parent, sock):
self.sock = sock
self.parent = parent
self.thread = threading.Thread(target = self._recvLoop)
self.thread.daemon = True
self.thread.start()
def make (a,A, b,B):
a = a.entity.name
b = b.entity.name
if a <= b:
return (a,A,b,B)
return (b,B,a,A)
links = set()
for te in core.topo.values():
for n,p in enumerate(te.ports):
if p is None: continue
links.add(make(te, n, p.dst, p.dstPort))
links = [list(e) for e in links]
msg = {
'type':'initialize',
'entities':dict([(n.entity.name,
'circle' if isinstance(n.entity, api.HostEntity) else 'square')
for n in core.topo.values()]),
# 'entities': {},
'links':links,
}
parent.send(msg, connections=self)
def _recvLoop (self):
import select
d = ''
retry = 0
while True:
try:
(rx, tx, xx) = select.select([self.sock], [], [self.sock])
except:
# sock died
break
if len(xx):
#TODO: reopen?
break
if len(rx):
try:
r = self.sock.recv(4096)
if len(r) == 0:
retry += 1
if retry > 4: break
continue
else:
retry = 0
d = d + r
except:
#TODO: reopen
break
while d.find('\n') >= 0:
l,d = d.split('\n', 1)
l = l.strip()
if len(l) == 0: continue
methodName = "<UNSET>"
try:
data = json.loads(l)
methodName = "_handle_" + data.get('type', "<UNDEFINED>")
m = getattr(self, methodName)
del data['type']
world.doLater(0, m, **data)
except:
core.simlog.error("Error dispatching " + methodName)
traceback.print_exc()
core.events._disconnect(self)
def _handle_ping (self, node1, node2):
import basics
node1 = _getByName(node1).entity
node2 = _getByName(node2).entity
if node1 and node2:
node1.send(basics.Ping(node2), flood=True)
def _handle_console (self, command):
# Execute python command, return output to GUI
r = interp.runsource(command, "<gui>")
if r:
core.events.send_console_more(command)
def _handle_addEdge (self, node1, node2):
node1 = _getByName(node1)
node2 = _getByName(node2)
if node1 and node2:
if not node1.isConnectedTo(node2):
node1.linkTo(node2)
def _handle_delEdge (self, node1, node2):
node1 = _getByName(node1)
node2 = _getByName(node2)
if node1 and node2:
if node1.isConnectedTo(node2):
node1.unlinkTo(node2)
def _handle_disconnect (self, node):
node = _getByName(node)
if node:
node.disconnect()
def send_raw (self, msg):
try:
self.sock.send(msg)
except:
try:
self.sock.close()
except:
pass
self.sock = None
#TODO: reopen?
pass
class StreamingInterface (object):
def __init__ (self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(("127.0.0.1", 4444))
self.sock.listen(5)
self.thread = threading.Thread(target = self._listenLoop)
self.thread.daemon = True
self.thread.start()
self.connections = []
def _listenLoop (self):
import select
try:
while True:
(rx, tx, xx) = select.select([self.sock], [], [self.sock])
if len(xx): break
sock,addr = self.sock.accept()
#print "connect",addr
self.connections.append(StreamingConnection(self, sock))
except:
import traceback
traceback.print_exc()
pass
core.simlog.debug("No longer listening for remote interfaces")
def _disconnect (self, con):
try:
con.sock.close()
except:
pass
try:
self.connections.remove(con)
#print "con closed"
except:
pass
def send(self, msg, connections = None):
if connections is None:
connections = self.connections
elif not isinstance(connections, list):
connections = [connections]
r = json.dumps(msg, default=repr) + "\n";
bad = []
for c in connections:
try:
c.send_raw(r)
except:
bad.append(c)
for c in bad:
self._disconnect(c)
def send_console(self, text):
#self.send({'type':'console','msg':text})
pass
def send_console_more(self, text):
#self.send({'type':'console_more','command':text})
pass
def send_log(self, record):
self.send(record)
def send_entity_down(self, name):
self.send({
'type':'delEntity',
'node':name,
})
def send_entity_up(self, name, kind):
self.send(
{
'type':'addEntity',
'kind':'square' if kind == 'switch' else 'circle',
'label':name,
})
def send_link_up(self, srcid, sport, dstid, dport):
self.send({
'type':'link',
'node1':srcid,
'node2':dstid,
'node1_port':sport,
'node2_port':dport,
})
def packet (self, n1, n2, packet, duration, drop=False):
m = {
"type":"packet",
"node1":n1,
"node2":n2,
"duration":duration * 1000,
"stroke":packet.outer_color,
"fill":packet.inner_color,
"drop":drop,
}
#if color is not None:
# m['stroke'] = color
self.send(m)
def send_link_down(self, srcid, sport, dstid, dport):
self.send({
'type':'unlink',
'node1':srcid,
'node2':dstid,
'node1_port':sport,
'node2_port':dport,
})
def highlight_path (self, nodes):
""" Sends a path to the GUI to be highlighted """
nodes = [n.name for n in nodes]
msg = {'type':'highlight', 'nodes':nodes}
#self.send(msg)
def set_debug(self, nodeid, msg):
self.send({
'type' : 'debug',
'node' : nodeid,
'msg': msg,
})
interface = StreamingInterface
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'foo': 'bar'})
raise AssertionError("Accepted invalid option foo")
except JSONRPCException as e:
assert("Unexpected key foo" in e.error['message'])
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': 'foobar'})
raise AssertionError("Accepted invalid bitdeal address")
except JSONRPCException as e:
assert("changeAddress must be a valid bitdeal address" in e.error['message'])
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 2})
except JSONRPCException as e:
assert('changePosition out of bounds' == e.error['message'])
else:
assert(False)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0];
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
if __name__ == '__main__':
RawTransactionsTest().main()
| |
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions for type handling and type conversion (Blink/C++ <-> V8/JS).
Extends IdlType and IdlUnionType with V8-specific properties, methods, and
class methods.
Spec:
http://www.w3.org/TR/WebIDL/#es-type-mapping
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import posixpath
from idl_types import IdlTypeBase, IdlType, IdlUnionType, IdlArrayOrSequenceType, IdlNullableType
import v8_attributes # for IdlType.constructor_type_name
from v8_globals import includes
################################################################################
# V8-specific handling of IDL types
################################################################################
NON_WRAPPER_TYPES = frozenset([
'CompareHow',
'EventHandler',
'EventListener',
'NodeFilter',
'SerializedScriptValue',
])
TYPED_ARRAYS = {
# (cpp_type, v8_type), used by constructor templates
'ArrayBuffer': None,
'ArrayBufferView': None,
'Float32Array': ('float', 'v8::kExternalFloatArray'),
'Float64Array': ('double', 'v8::kExternalDoubleArray'),
'Int8Array': ('signed char', 'v8::kExternalByteArray'),
'Int16Array': ('short', 'v8::kExternalShortArray'),
'Int32Array': ('int', 'v8::kExternalIntArray'),
'Uint8Array': ('unsigned char', 'v8::kExternalUnsignedByteArray'),
'Uint8ClampedArray': ('unsigned char', 'v8::kExternalPixelArray'),
'Uint16Array': ('unsigned short', 'v8::kExternalUnsignedShortArray'),
'Uint32Array': ('unsigned int', 'v8::kExternalUnsignedIntArray'),
}
IdlType.is_typed_array_element_type = property(
lambda self: self.base_type in TYPED_ARRAYS)
IdlType.is_wrapper_type = property(
lambda self: (self.is_interface_type and
self.base_type not in NON_WRAPPER_TYPES))
################################################################################
# C++ types
################################################################################
CPP_TYPE_SAME_AS_IDL_TYPE = set([
'double',
'float',
'long long',
'unsigned long long',
])
CPP_INT_TYPES = set([
'byte',
'long',
'short',
])
CPP_UNSIGNED_TYPES = set([
'octet',
'unsigned int',
'unsigned long',
'unsigned short',
])
CPP_SPECIAL_CONVERSION_RULES = {
'CompareHow': 'Range::CompareHow',
'Date': 'double',
'EventHandler': 'EventListener*',
'Promise': 'ScriptPromise',
'ScriptValue': 'ScriptValue',
'boolean': 'bool',
'unrestricted double': 'double',
'unrestricted float': 'float',
}
def cpp_type_initializer(idl_type):
"""Returns a string containing a C++ initialization statement for the
corresponding type.
|idl_type| argument is of type IdlType.
"""
base_idl_type = idl_type.base_type
if idl_type.native_array_element_type:
return ''
if idl_type.is_numeric_type:
return ' = 0'
if base_idl_type == 'boolean':
return ' = false'
if (base_idl_type in NON_WRAPPER_TYPES or
base_idl_type in CPP_SPECIAL_CONVERSION_RULES or
base_idl_type == 'any' or
idl_type.is_string_type or
idl_type.is_enum):
return ''
return ' = nullptr'
def cpp_type_union(idl_type, extended_attributes=None, raw_type=False):
# FIXME: Need to revisit the design of union support.
# http://crbug.com/240176
return None
def cpp_type_initializer_union(idl_type):
return (member_type.cpp_type_initializer for member_type in idl_type.member_types)
# Allow access as idl_type.cpp_type if no arguments
IdlTypeBase.cpp_type_initializer = property(cpp_type_initializer)
IdlUnionType.cpp_type = property(cpp_type_union)
IdlUnionType.cpp_type_initializer = property(cpp_type_initializer_union)
IdlUnionType.cpp_type_args = cpp_type_union
IdlArrayOrSequenceType.native_array_element_type = property(
lambda self: self.element_type)
def cpp_template_type(template, inner_type):
"""Returns C++ template specialized to type, with space added if needed."""
if inner_type.endswith('>'):
format_string = '{template}<{inner_type} >'
else:
format_string = '{template}<{inner_type}>'
return format_string.format(template=template, inner_type=inner_type)
# [ImplementedAs]
# This handles [ImplementedAs] on interface types, not [ImplementedAs] in the
# interface being generated. e.g., given:
# Foo.idl: interface Foo {attribute Bar bar};
# Bar.idl: [ImplementedAs=Zork] interface Bar {};
# when generating bindings for Foo, the [ImplementedAs] on Bar is needed.
# This data is external to Foo.idl, and hence computed as global information in
# compute_interfaces_info.py to avoid having to parse IDLs of all used interfaces.
IdlType.implemented_as_interfaces = {}
def implemented_as(idl_type):
base_idl_type = idl_type.base_type
if base_idl_type in IdlType.implemented_as_interfaces:
return IdlType.implemented_as_interfaces[base_idl_type]
return base_idl_type
IdlType.implemented_as = property(implemented_as)
IdlType.set_implemented_as_interfaces = classmethod(
lambda cls, new_implemented_as_interfaces:
cls.implemented_as_interfaces.update(new_implemented_as_interfaces))
################################################################################
# Includes
################################################################################
def includes_for_cpp_class(class_name, relative_dir_posix):
return set([posixpath.join('bindings', relative_dir_posix, class_name + '.h')])
INCLUDES_FOR_TYPE = {
'object': set(),
'CompareHow': set(),
'EventHandler': set(['bindings/core/v8/V8AbstractEventListener.h',
'bindings/core/v8/V8EventListenerList.h']),
'EventListener': set(['bindings/core/v8/BindingSecurity.h',
'bindings/core/v8/V8EventListenerList.h',
'core/frame/LocalDOMWindow.h']),
'NodeList': set(['bindings/core/v8/V8NodeList.h',
'core/dom/NodeList.h',
'core/dom/StaticNodeList.h']),
'Promise': set(['bindings/core/v8/ScriptPromise.h']),
'SerializedScriptValue': set(['bindings/core/v8/SerializedScriptValue.h']),
'ScriptValue': set(['bindings/core/v8/ScriptValue.h']),
}
def includes_for_type(idl_type):
idl_type = idl_type.preprocessed_type
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in INCLUDES_FOR_TYPE:
return INCLUDES_FOR_TYPE[base_idl_type]
if idl_type.is_basic_type:
return set()
if idl_type.is_typed_array_element_type:
return set(['bindings/core/v8/custom/V8%sCustom.h' % base_idl_type])
if base_idl_type.endswith('ConstructorConstructor'):
# FIXME: rename to NamedConstructor
# FIXME: replace with a [NamedConstructorAttribute] extended attribute
# Ending with 'ConstructorConstructor' indicates a named constructor,
# and these do not have header files, as they are part of the generated
# bindings for the interface
return set()
if base_idl_type.endswith('Constructor'):
# FIXME: replace with a [ConstructorAttribute] extended attribute
base_idl_type = idl_type.constructor_type_name
if base_idl_type not in component_dir:
return set()
return set(['bindings/%s/v8/V8%s.h' % (component_dir[base_idl_type],
base_idl_type)])
IdlType.includes_for_type = property(includes_for_type)
IdlUnionType.includes_for_type = property(
lambda self: set.union(*[member_type.includes_for_type
for member_type in self.member_types]))
IdlArrayOrSequenceType.includes_for_type = property(
lambda self: self.element_type.includes_for_type)
def add_includes_for_type(idl_type):
includes.update(idl_type.includes_for_type)
IdlTypeBase.add_includes_for_type = add_includes_for_type
def includes_for_interface(interface_name):
return IdlType(interface_name).includes_for_type
def add_includes_for_interface(interface_name):
includes.update(includes_for_interface(interface_name))
def impl_should_use_nullable_container(idl_type):
return not(idl_type.cpp_type_has_null_value)
IdlTypeBase.impl_should_use_nullable_container = property(
impl_should_use_nullable_container)
def impl_includes_for_type(idl_type, interfaces_info):
includes_for_type = set()
if idl_type.impl_should_use_nullable_container:
includes_for_type.add('bindings/nullable.h')
idl_type = idl_type.preprocessed_type
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
includes_for_type.update(impl_includes_for_type(
native_array_element_type, interfaces_info))
includes_for_type.add('wtf/Vector.h')
if idl_type.is_string_type:
includes_for_type.add('wtf/text/WTFString.h')
if idl_type.name in interfaces_info:
interface_info = interfaces_info[idl_type.name]
includes_for_type.add(interface_info['include_path'])
return includes_for_type
IdlTypeBase.impl_includes_for_type = impl_includes_for_type
component_dir = {}
def set_component_dirs(new_component_dirs):
component_dir.update(new_component_dirs)
################################################################################
# C++ -> V8
################################################################################
def preprocess_idl_type(idl_type):
if idl_type.is_enum:
# Enumerations are internally DOMStrings
return IdlType('DOMString')
if (idl_type.name == 'Any' or idl_type.is_callback_function):
return IdlType('ScriptValue')
return idl_type
IdlTypeBase.preprocessed_type = property(preprocess_idl_type)
def preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes):
"""Returns IDL type and value, with preliminary type conversions applied."""
idl_type = idl_type.preprocessed_type
if idl_type.name == 'Promise':
idl_type = IdlType('ScriptValue')
if idl_type.base_type in ['long long', 'unsigned long long']:
# long long and unsigned long long are not representable in ECMAScript;
# we represent them as doubles.
is_nullable = idl_type.is_nullable
idl_type = IdlType('double')
if is_nullable:
idl_type = IdlNullableType(idl_type)
cpp_value = 'static_cast<double>(%s)' % cpp_value
# HTML5 says that unsigned reflected attributes should be in the range
# [0, 2^31). When a value isn't in this range, a default value (or 0)
# should be returned instead.
extended_attributes = extended_attributes or {}
if ('Reflect' in extended_attributes and
idl_type.base_type in ['unsigned long', 'unsigned short']):
cpp_value = cpp_value.replace('getUnsignedIntegralAttribute',
'getIntegralAttribute')
cpp_value = 'std::max(0, static_cast<int>(%s))' % cpp_value
return idl_type, cpp_value
IdlType.release = property(lambda self: self.is_interface_type)
IdlUnionType.release = property(
lambda self: [member_type.is_interface_type
for member_type in self.member_types])
def literal_cpp_value(idl_type, idl_literal):
"""Converts an expression that is a valid C++ literal for this type."""
# FIXME: add validation that idl_type and idl_literal are compatible
literal_value = str(idl_literal)
if idl_type.base_type in CPP_UNSIGNED_TYPES:
return literal_value + 'u'
return literal_value
IdlType.literal_cpp_value = literal_cpp_value
################################################################################
# Utility properties for nullable types
################################################################################
def cpp_type_has_null_value(idl_type):
# - String types (String/AtomicString) represent null as a null string,
# i.e. one for which String::isNull() returns true.
# - Wrapper types (raw pointer or RefPtr/PassRefPtr) represent null as
# a null pointer.
return (idl_type.is_string_type or idl_type.is_wrapper_type)
IdlTypeBase.cpp_type_has_null_value = property(cpp_type_has_null_value)
def is_implicit_nullable(idl_type):
# Nullable type where the corresponding C++ type supports a null value.
return idl_type.is_nullable and idl_type.cpp_type_has_null_value
def is_explicit_nullable(idl_type):
# Nullable type that isn't implicit nullable (see above.) For such types,
# we use Nullable<T> or similar explicit ways to represent a null value.
return idl_type.is_nullable and not idl_type.is_implicit_nullable
IdlTypeBase.is_implicit_nullable = property(is_implicit_nullable)
IdlUnionType.is_implicit_nullable = False
IdlTypeBase.is_explicit_nullable = property(is_explicit_nullable)
| |
import codecs
import datetime
import os
import shutil
import tempfile
import unittest
from io import StringIO
from unittest import mock
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.contrib.staticfiles import storage
from django.contrib.staticfiles.management.commands import (
collectstatic, runserver,
)
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.test import override_settings
from django.test.utils import extend_sys_path
from django.utils import timezone
from django.utils._os import symlinks_supported
from django.utils.functional import empty
from .cases import CollectionTestCase, StaticFilesTestCase, TestDefaults
from .settings import TEST_ROOT, TEST_SETTINGS
from .storage import DummyStorage
class TestNoFilesCreated:
def test_no_files_created(self):
"""
Make sure no files were create in the destination directory.
"""
self.assertEqual(os.listdir(settings.STATIC_ROOT), [])
class TestRunserver(StaticFilesTestCase):
@override_settings(MIDDLEWARE=['django.middleware.common.CommonMiddleware'])
def test_middleware_loaded_only_once(self):
command = runserver.Command()
with mock.patch('django.middleware.common.CommonMiddleware') as mocked:
command.get_handler(use_static_handler=True, insecure_serving=True)
self.assertEqual(mocked.call_count, 1)
class TestFindStatic(TestDefaults, CollectionTestCase):
"""
Test ``findstatic`` management command.
"""
def _get_file(self, filepath):
path = call_command('findstatic', filepath, all=False, verbosity=0, stdout=StringIO())
with codecs.open(path, "r", "utf-8") as f:
return f.read()
def test_all_files(self):
"""
findstatic returns all candidate files if run without --first and -v1.
"""
result = call_command('findstatic', 'test/file.txt', verbosity=1, stdout=StringIO())
lines = [l.strip() for l in result.split('\n')]
self.assertEqual(len(lines), 3) # three because there is also the "Found <file> here" line
self.assertIn('project', lines[1])
self.assertIn('apps', lines[2])
def test_all_files_less_verbose(self):
"""
findstatic returns all candidate files if run without --first and -v0.
"""
result = call_command('findstatic', 'test/file.txt', verbosity=0, stdout=StringIO())
lines = [l.strip() for l in result.split('\n')]
self.assertEqual(len(lines), 2)
self.assertIn('project', lines[0])
self.assertIn('apps', lines[1])
def test_all_files_more_verbose(self):
"""
findstatic returns all candidate files if run without --first and -v2.
Also, test that findstatic returns the searched locations with -v2.
"""
result = call_command('findstatic', 'test/file.txt', verbosity=2, stdout=StringIO())
lines = [l.strip() for l in result.split('\n')]
self.assertIn('project', lines[1])
self.assertIn('apps', lines[2])
self.assertIn("Looking in the following locations:", lines[3])
searched_locations = ', '.join(lines[4:])
# AppDirectoriesFinder searched locations
self.assertIn(os.path.join('staticfiles_tests', 'apps', 'test', 'static'), searched_locations)
self.assertIn(os.path.join('staticfiles_tests', 'apps', 'no_label', 'static'), searched_locations)
# FileSystemFinder searched locations
self.assertIn(TEST_SETTINGS['STATICFILES_DIRS'][1][1], searched_locations)
self.assertIn(TEST_SETTINGS['STATICFILES_DIRS'][0], searched_locations)
# DefaultStorageFinder searched locations
self.assertIn(
os.path.join('staticfiles_tests', 'project', 'site_media', 'media'),
searched_locations
)
class TestConfiguration(StaticFilesTestCase):
def test_location_empty(self):
msg = 'without having set the STATIC_ROOT setting to a filesystem path'
err = StringIO()
for root in ['', None]:
with override_settings(STATIC_ROOT=root):
with self.assertRaisesMessage(ImproperlyConfigured, msg):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
def test_local_storage_detection_helper(self):
staticfiles_storage = storage.staticfiles_storage
try:
storage.staticfiles_storage._wrapped = empty
with self.settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage'):
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
storage.staticfiles_storage._wrapped = empty
with self.settings(STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage'):
command = collectstatic.Command()
self.assertFalse(command.is_local_storage())
collectstatic.staticfiles_storage = storage.FileSystemStorage()
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
collectstatic.staticfiles_storage = DummyStorage()
command = collectstatic.Command()
self.assertFalse(command.is_local_storage())
finally:
staticfiles_storage._wrapped = empty
collectstatic.staticfiles_storage = staticfiles_storage
storage.staticfiles_storage = staticfiles_storage
class TestCollectionHelpSubcommand(AdminScriptTestCase):
@override_settings(STATIC_ROOT=None)
def test_missing_settings_dont_prevent_help(self):
"""
Even if the STATIC_ROOT setting is not set, one can still call the
`manage.py help collectstatic` command.
"""
self.write_settings('settings.py', apps=['django.contrib.staticfiles'])
out, err = self.run_manage(['help', 'collectstatic'])
self.assertNoOutput(err)
class TestCollection(TestDefaults, CollectionTestCase):
"""
Test ``collectstatic`` management command.
"""
def test_ignore(self):
"""
-i patterns are ignored.
"""
self.assertFileNotFound('test/test.ignoreme')
def test_common_ignore_patterns(self):
"""
Common ignore patterns (*~, .*, CVS) are ignored.
"""
self.assertFileNotFound('test/.hidden')
self.assertFileNotFound('test/backup~')
self.assertFileNotFound('test/CVS')
class TestCollectionClear(CollectionTestCase):
"""
Test the ``--clear`` option of the ``collectstatic`` management command.
"""
def run_collectstatic(self, **kwargs):
clear_filepath = os.path.join(settings.STATIC_ROOT, 'cleared.txt')
with open(clear_filepath, 'w') as f:
f.write('should be cleared')
super().run_collectstatic(clear=True)
def test_cleared_not_found(self):
self.assertFileNotFound('cleared.txt')
def test_dir_not_exists(self, **kwargs):
shutil.rmtree(settings.STATIC_ROOT)
super().run_collectstatic(clear=True)
@override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.PathNotImplementedStorage')
def test_handle_path_notimplemented(self):
self.run_collectstatic()
self.assertFileNotFound('cleared.txt')
class TestInteractiveMessages(CollectionTestCase):
overwrite_warning_msg = "This will overwrite existing files!"
delete_warning_msg = "This will DELETE ALL FILES in this location!"
files_copied_msg = "static files copied"
@staticmethod
def mock_input(stdout):
def _input(msg):
stdout.write(msg)
return 'yes'
return _input
def test_warning_when_clearing_staticdir(self):
stdout = StringIO()
self.run_collectstatic()
with mock.patch('builtins.input', side_effect=self.mock_input(stdout)):
call_command('collectstatic', interactive=True, clear=True, stdout=stdout)
output = stdout.getvalue()
self.assertNotIn(self.overwrite_warning_msg, output)
self.assertIn(self.delete_warning_msg, output)
def test_warning_when_overwriting_files_in_staticdir(self):
stdout = StringIO()
self.run_collectstatic()
with mock.patch('builtins.input', side_effect=self.mock_input(stdout)):
call_command('collectstatic', interactive=True, stdout=stdout)
output = stdout.getvalue()
self.assertIn(self.overwrite_warning_msg, output)
self.assertNotIn(self.delete_warning_msg, output)
def test_no_warning_when_staticdir_does_not_exist(self):
stdout = StringIO()
shutil.rmtree(settings.STATIC_ROOT)
call_command('collectstatic', interactive=True, stdout=stdout)
output = stdout.getvalue()
self.assertNotIn(self.overwrite_warning_msg, output)
self.assertNotIn(self.delete_warning_msg, output)
self.assertIn(self.files_copied_msg, output)
def test_no_warning_for_empty_staticdir(self):
stdout = StringIO()
with tempfile.TemporaryDirectory(prefix='collectstatic_empty_staticdir_test') as static_dir:
with override_settings(STATIC_ROOT=static_dir):
call_command('collectstatic', interactive=True, stdout=stdout)
output = stdout.getvalue()
self.assertNotIn(self.overwrite_warning_msg, output)
self.assertNotIn(self.delete_warning_msg, output)
self.assertIn(self.files_copied_msg, output)
class TestCollectionExcludeNoDefaultIgnore(TestDefaults, CollectionTestCase):
"""
Test ``--exclude-dirs`` and ``--no-default-ignore`` options of the
``collectstatic`` management command.
"""
def run_collectstatic(self):
super().run_collectstatic(use_default_ignore_patterns=False)
def test_no_common_ignore_patterns(self):
"""
With --no-default-ignore, common ignore patterns (*~, .*, CVS)
are not ignored.
"""
self.assertFileContains('test/.hidden', 'should be ignored')
self.assertFileContains('test/backup~', 'should be ignored')
self.assertFileContains('test/CVS', 'should be ignored')
@override_settings(INSTALLED_APPS=[
'staticfiles_tests.apps.staticfiles_config.IgnorePatternsAppConfig',
'staticfiles_tests.apps.test',
])
class TestCollectionCustomIgnorePatterns(CollectionTestCase):
def test_custom_ignore_patterns(self):
"""
A custom ignore_patterns list, ['*.css'] in this case, can be specified
in an AppConfig definition.
"""
self.assertFileNotFound('test/nonascii.css')
self.assertFileContains('test/.hidden', 'should be ignored')
class TestCollectionDryRun(TestNoFilesCreated, CollectionTestCase):
"""
Test ``--dry-run`` option for ``collectstatic`` management command.
"""
def run_collectstatic(self):
super().run_collectstatic(dry_run=True)
class TestCollectionFilesOverride(CollectionTestCase):
"""
Test overriding duplicated files by ``collectstatic`` management command.
Check for proper handling of apps order in installed apps even if file modification
dates are in different order:
'staticfiles_test_app',
'staticfiles_tests.apps.no_label',
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
# get modification and access times for no_label/static/file2.txt
self.orig_path = os.path.join(TEST_ROOT, 'apps', 'no_label', 'static', 'file2.txt')
self.orig_mtime = os.path.getmtime(self.orig_path)
self.orig_atime = os.path.getatime(self.orig_path)
# prepare duplicate of file2.txt from a temporary app
# this file will have modification time older than no_label/static/file2.txt
# anyway it should be taken to STATIC_ROOT because the temporary app is before
# 'no_label' app in installed apps
self.temp_app_path = os.path.join(self.temp_dir, 'staticfiles_test_app')
self.testfile_path = os.path.join(self.temp_app_path, 'static', 'file2.txt')
os.makedirs(self.temp_app_path)
with open(os.path.join(self.temp_app_path, '__init__.py'), 'w+'):
pass
os.makedirs(os.path.dirname(self.testfile_path))
with open(self.testfile_path, 'w+') as f:
f.write('duplicate of file2.txt')
os.utime(self.testfile_path, (self.orig_atime - 1, self.orig_mtime - 1))
self.settings_with_test_app = self.modify_settings(
INSTALLED_APPS={'prepend': 'staticfiles_test_app'})
with extend_sys_path(self.temp_dir):
self.settings_with_test_app.enable()
super().setUp()
def tearDown(self):
super().tearDown()
self.settings_with_test_app.disable()
def test_ordering_override(self):
"""
Test if collectstatic takes files in proper order
"""
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# run collectstatic again
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# The collectstatic test suite already has conflicting files since both
# project/test/file.txt and apps/test/static/test/file.txt are collected. To
# properly test for the warning not happening unless we tell it to explicitly,
# we remove the project directory and will add back a conflicting file later.
@override_settings(STATICFILES_DIRS=[])
class TestCollectionOverwriteWarning(CollectionTestCase):
"""
Test warning in ``collectstatic`` output when a file is skipped because a
previous file was already written to the same path.
"""
# If this string is in the collectstatic output, it means the warning we're
# looking for was emitted.
warning_string = 'Found another file'
def _collectstatic_output(self, **kwargs):
"""
Run collectstatic, and capture and return the output. We want to run
the command at highest verbosity, which is why we can't
just call e.g. BaseCollectionTestCase.run_collectstatic()
"""
out = StringIO()
call_command('collectstatic', interactive=False, verbosity=3, stdout=out, **kwargs)
return out.getvalue()
def test_no_warning(self):
"""
There isn't a warning if there isn't a duplicate destination.
"""
output = self._collectstatic_output(clear=True)
self.assertNotIn(self.warning_string, output)
def test_warning(self):
"""
There is a warning when there are duplicate destinations.
"""
with tempfile.TemporaryDirectory() as static_dir:
duplicate = os.path.join(static_dir, 'test', 'file.txt')
os.mkdir(os.path.dirname(duplicate))
with open(duplicate, 'w+') as f:
f.write('duplicate of file.txt')
with self.settings(STATICFILES_DIRS=[static_dir]):
output = self._collectstatic_output(clear=True)
self.assertIn(self.warning_string, output)
os.remove(duplicate)
# Make sure the warning went away again.
with self.settings(STATICFILES_DIRS=[static_dir]):
output = self._collectstatic_output(clear=True)
self.assertNotIn(self.warning_string, output)
@override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage')
class TestCollectionNonLocalStorage(TestNoFilesCreated, CollectionTestCase):
"""
Tests for a Storage that implements get_modified_time() but not path()
(#15035).
"""
def test_storage_properties(self):
# Properties of the Storage as described in the ticket.
storage = DummyStorage()
self.assertEqual(storage.get_modified_time('name'), datetime.datetime(1970, 1, 1, tzinfo=timezone.utc))
with self.assertRaisesMessage(NotImplementedError, "This backend doesn't support absolute paths."):
storage.path('name')
class TestCollectionNeverCopyStorage(CollectionTestCase):
@override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.NeverCopyRemoteStorage')
def test_skips_newer_files_in_remote_storage(self):
"""
collectstatic skips newer files in a remote storage.
run_collectstatic() in setUp() copies the static files, then files are
always skipped after NeverCopyRemoteStorage is activated since
NeverCopyRemoteStorage.get_modified_time() returns a datetime in the
future to simulate an unmodified file.
"""
stdout = StringIO()
self.run_collectstatic(stdout=stdout, verbosity=2)
output = stdout.getvalue()
self.assertIn("Skipping 'test.txt' (not modified)", output)
@unittest.skipUnless(symlinks_supported(), "Must be able to symlink to run this test.")
class TestCollectionLinks(TestDefaults, CollectionTestCase):
"""
Test ``--link`` option for ``collectstatic`` management command.
Note that by inheriting ``TestDefaults`` we repeat all
the standard file resolving tests here, to make sure using
``--link`` does not change the file-selection semantics.
"""
def run_collectstatic(self, clear=False, link=True, **kwargs):
super().run_collectstatic(link=link, clear=clear, **kwargs)
def test_links_created(self):
"""
With ``--link``, symbolic links are created.
"""
self.assertTrue(os.path.islink(os.path.join(settings.STATIC_ROOT, 'test.txt')))
def test_broken_symlink(self):
"""
Test broken symlink gets deleted.
"""
path = os.path.join(settings.STATIC_ROOT, 'test.txt')
os.unlink(path)
self.run_collectstatic()
self.assertTrue(os.path.islink(path))
def test_symlinks_and_files_replaced(self):
"""
Running collectstatic in non-symlink mode replaces symlinks with files,
while symlink mode replaces files with symlinks.
"""
path = os.path.join(settings.STATIC_ROOT, 'test.txt')
self.assertTrue(os.path.islink(path))
self.run_collectstatic(link=False)
self.assertFalse(os.path.islink(path))
self.run_collectstatic(link=True)
self.assertTrue(os.path.islink(path))
def test_clear_broken_symlink(self):
"""
With ``--clear``, broken symbolic links are deleted.
"""
nonexistent_file_path = os.path.join(settings.STATIC_ROOT, 'nonexistent.txt')
broken_symlink_path = os.path.join(settings.STATIC_ROOT, 'symlink.txt')
os.symlink(nonexistent_file_path, broken_symlink_path)
self.run_collectstatic(clear=True)
self.assertFalse(os.path.lexists(broken_symlink_path))
| |
'''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
# The Olson database is updated several times a year.
OLSON_VERSION = '2015d'
VERSION = '2015.4' # Switching to pip compatible version numbering.
__version__ = VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
]
import sys, datetime, os.path, gettext
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
from pytz.exceptions import AmbiguousTimeError
from pytz.exceptions import InvalidTimeError
from pytz.exceptions import NonExistentTimeError
from pytz.exceptions import UnknownTimeZoneError
from pytz.lazy import LazyDict, LazyList, LazySet
from pytz.tzinfo import unpickler
from pytz.tzfile import build_tzinfo, _byte_string
try:
unicode
except NameError: # Python 3.x
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
s.encode('US-ASCII') # Raise an exception if not ASCII
return s # But return the original string - not a byte string.
else: # Python 2.x
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii(u'Hello')
'Hello'
>>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
return s.encode('US-ASCII')
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename) and resource_stream is not None:
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(name):
"""Return true if the given resource exists"""
try:
open_resource(name).close()
return True
except IOError:
return False
# Enable this when we get some translations?
# We want an i18n API that is useful to programs using Python's gettext
# module, as well as the Zope3 i18n package. Perhaps we should just provide
# the POT file and translations, and leave it up to callers to make use
# of them.
#
# t = gettext.translation(
# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'),
# fallback=True
# )
# def _(timezone_name):
# """Translate a timezone name using the current locale, returning Unicode"""
# return t.ugettext(timezone_name)
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if zone in all_timezones_set:
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p) - len(naive_p)
17
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _CountryTimezoneDict(LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> def print_list(list_of_strings):
... 'We use a helper so doctests work under Python 2.3 -> 3.x'
... for s in list_of_strings:
... print(s)
>>> print_list(country_timezones['nz'])
Pacific/Auckland
Pacific/Chatham
>>> print_list(country_timezones['ch'])
Europe/Zurich
>>> print_list(country_timezones['CH'])
Europe/Zurich
>>> print_list(country_timezones[unicode('ch')])
Europe/Zurich
>>> print_list(country_timezones['XXX'])
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> print_list(country_timezones('nz'))
Pacific/Auckland
Pacific/Chatham
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('US-ASCII')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if zone not in all_timezones_set:
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
finally:
zone_tab.close()
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> print(country_names['au'])
Australia
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('US-ASCII')
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close()
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return ZERO
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.replace(tzinfo=self)
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest, os, sys
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = LazyList(
tz for tz in all_timezones if resource_exists(tz))
all_timezones_set = LazySet(all_timezones)
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kathmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = LazyList(
tz for tz in common_timezones if tz in all_timezones)
common_timezones_set = LazySet(common_timezones)
| |
"""
A file full of bit twidling helpers
"""
import struct
MAX_WORD = 32 # usually no more than 8, 16 is for SIMD register support
# Masks to use for unsigned anding to size
u_maxes = [ (2 ** (8*i)) - 1 for i in range(MAX_WORD+1) ]
u_maxes[0] = 0 # powers of 0 are 1, but we need 0
bu_maxes = [ (2 ** (i)) - 1 for i in range(8*MAX_WORD+1) ]
# Masks of just the sign bit for different sizes
sign_bits = [ (2 ** (8*i)) >> 1 for i in range(MAX_WORD+1) ]
sign_bits[0] = 0 # powers of 0 are 1, but we need 0
bsign_bits = [ (2 ** i)>>1 for i in range(8*MAX_WORD+1) ]
# Max *signed* masks (all but top bit )
s_maxes = [ u_maxes[i] ^ sign_bits[i] for i in range(len(u_maxes))]
s_maxes[0] = 0
# bit width masks
b_masks = [ (2**i)-1 for i in range(MAX_WORD*8) ]
b_masks[0] = 0
def unsigned(value, size):
"""
Make a value unsigned based on it's size.
"""
return value & u_maxes[size]
def signed(value, size):
"""
Make a value signed based on it's size.
"""
x = unsigned(value, size)
if x & sign_bits[size]:
x = (x - u_maxes[size]) - 1
return x
def is_signed(value, size):
x = unsigned(value, size)
return bool(x & sign_bits[size])
def sign_extend(value, cursize, newsize):
"""
Take a value and extend it's size filling
in the space with the value of the high
order bit.
"""
x = unsigned(value, cursize)
if cursize != newsize:
# Test for signed w/o the call
if x & sign_bits[cursize]:
delta = newsize - cursize
highbits = u_maxes[delta]
x |= highbits << (8*cursize)
return x
def bsign_extend(value, cursize, newsize):
x = value
if cursize != newsize:
if x & bsign_bits[cursize]:
delta = newsize - cursize
highbits = bu_maxes[delta]
x |= highbits << (cursize)
return x
def is_parity(val):
s = 0
while val:
s ^= val & 1
val = val >> 1
return (not s)
parity_table = []
for i in range(256):
parity_table.append(is_parity(i))
def is_parity_byte(bval):
"""
An "optimized" parity checker that looks up the index.
"""
return parity_table[bval & 0xff]
def lsb(value):
return value & 0x1
def msb(value, size):
if value & sign_bits[size]:
return 1
return 0
def is_signed_half_carry(value, size, src):
'''
BCD carry/borrow in the second most important nibble:
32bit - bit 27
16bit - bit 11
8bit - bit 3
'''
bitsize = (size << 3) - 5
mask = 1<<bitsize
p1 = value & mask
p2 = src & mask
return ((p1 ^ p2) != 0)
def is_signed_carry(value, size, src):
smax = s_maxes[size]
if value > smax > src:
return True
if value < -smax < -src:
return True
return False
def is_signed_overflow(value, size):
smax = s_maxes[size]
if value > smax:
return True
if value < -smax:
return True
return False
def is_unsigned_carry(value, size):
umax = u_maxes[size]
if value > umax:
return True
elif value < 0:
return True
return False
def is_aux_carry(src, dst):
return (dst & 0xf) + (src & 0xf) > 15
def is_aux_carry_sub(src, dst):
return src & 0xf > dst & 0xf
le_fmt_chars = (None,"B","<H",None,"<I",None,None,None,"<Q")
be_fmt_chars = (None,"B",">H",None,">I",None,None,None,">Q")
def parsebytes(bytes, offset, size, sign=False, bigend=False):
"""
Mostly for pulling immediates out of strings...
"""
if size > 8:
return slowparsebytes(bytes, offset, size, sign=sign, bigend=bigend)
if bigend:
f = be_fmt_chars[size]
else:
f = le_fmt_chars[size]
if f == None:
return slowparsebytes(bytes, offset, size, sign=sign, bigend=bigend)
d = bytes[offset:offset+size]
x = struct.unpack(f, d)[0]
if sign:
x = signed(x, size)
return x
def slowparsebytes(bytes, offset, size, sign=False, bigend=False):
if bigend:
begin = offset
inc = 1
else:
begin = offset + (size-1)
inc = -1
ret = 0
ioff = 0
for x in range(size):
ret = ret << 8
ret |= ord(bytes[begin+ioff])
ioff += inc
if sign:
ret = signed(ret, size)
return ret
def buildbytes(value, size, bigend=False):
value = unsigned(value, size)
if bigend:
f = be_fmt_chars[size]
else:
f = le_fmt_chars[size]
if f == None:
raise Exception("envi.bits.buildbytes needs slowbuildbytes")
return struct.pack(f, value)
def byteswap(value, size):
ret = 0
for i in range(size):
ret |= (value >> (8*i)) & 0xff
ret = ret << 8
return ret
hex_fmt = {
0:'0x%.1x',
1:"0x%.2x",
2:"0x%.4x",
4:"0x%.8x",
8:"0x%.16x",
}
def intwidth(val):
if val < 0:
val = abs(val)
ret = 0
while val:
ret += 1
val = val >> 8
return ret
def hex(value, size=None):
if size == None:
size = intwidth(value)
fmt = hex_fmt.get(size)
if fmt != None:
return fmt % value
x = []
while value:
x.append('%.2x' % (value & 0xff))
value = value >> 8
x.reverse()
return '0x%.s' % ''.join(x)
return hex_fmt.get(size) % value
def binrepr(intval, bitwidth=None):
'''
Return a string of one's and zero's for the given value.
'''
ret = []
while intval:
ret.append(str(intval & 0x1))
intval >>= 1
ret.reverse()
binstr = ''.join(ret)
if bitwidth != None:
binstr = binstr.rjust(bitwidth, '0')
return binstr
def binary(binstr):
'''
Decode a binary string of 1/0's into a python number
'''
return int(binstr,2)
def binbytes(binstr):
'''
Decode a binary string of 1/0's into a python binary
string.
'''
if len(binstr) % 8 != 0:
raise Exception('Byte padded binary strings only for now!')
bytes = ''
while binstr:
bytes += chr( binary(binstr[:8]) )
binstr = binstr[8:]
return bytes
def parsebits(bytes, offset, bitoff, bitsize):
'''
Parse bitsize bits from the bit offset bitoff beginning
at offset bytes.
Example:
'''
val = 0
cnt = 0
while cnt < bitsize:
addbit = bitoff + cnt
addoff = offset + (addbit / 8)
modoff = addbit % 8
o = ord(bytes[addoff])
val = (val << 1) + ((o >> (7 - modoff)) & 1)
cnt += 1
return val
def masktest(s):
'''
Specify a bit mask with the following syntax:
'110100xxx00xx' to return a tester callback which will
determine if an integer value meets the mask.
example:
opcode = 0x4388e234
if masktest('1011xxxx0000')(opcode):
print 'MATCHED!'
NOTE: For performance reasons, it is recommeneded that
masktest be used to initialize a static list of tests
that are re-used rather than reconstructed.
'''
maskin = binary( s.replace('0','1').replace('x','0') )
matchval = binary( s.replace('x','0') )
def domask(testval):
return testval & maskin == matchval
return domask
#if __name__ == '__main__':
#print hex(parsebits('\x0f\x00', 0, 4, 8))
#print hex(parsebits('\x0f\x0f', 0, 4, 12))
#print hex(parsebits('\x0f\x0f\xf0', 1, 4, 4))
| |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module contains base REST classes for constructing REST servlets. """
from synapse.api.errors import SynapseError, Codes
import logging
import simplejson
logger = logging.getLogger(__name__)
def parse_integer(request, name, default=None, required=False):
"""Parse an integer parameter from the request string
Args:
request: the twisted HTTP request.
name (str): the name of the query parameter.
default (int|None): value to use if the parameter is absent, defaults
to None.
required (bool): whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
Returns:
int|None: An int value or the default.
Raises:
SynapseError: if the parameter is absent and required, or if the
parameter is present and not an integer.
"""
return parse_integer_from_args(request.args, name, default, required)
def parse_integer_from_args(args, name, default=None, required=False):
if name in args:
try:
return int(args[name][0])
except:
message = "Query parameter %r must be an integer" % (name,)
raise SynapseError(400, message)
else:
if required:
message = "Missing integer query parameter %r" % (name,)
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
else:
return default
def parse_boolean(request, name, default=None, required=False):
"""Parse a boolean parameter from the request query string
Args:
request: the twisted HTTP request.
name (str): the name of the query parameter.
default (bool|None): value to use if the parameter is absent, defaults
to None.
required (bool): whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
Returns:
bool|None: A bool value or the default.
Raises:
SynapseError: if the parameter is absent and required, or if the
parameter is present and not one of "true" or "false".
"""
return parse_boolean_from_args(request.args, name, default, required)
def parse_boolean_from_args(args, name, default=None, required=False):
if name in args:
try:
return {
"true": True,
"false": False,
}[args[name][0]]
except:
message = (
"Boolean query parameter %r must be one of"
" ['true', 'false']"
) % (name,)
raise SynapseError(400, message)
else:
if required:
message = "Missing boolean query parameter %r" % (name,)
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
else:
return default
def parse_string(request, name, default=None, required=False,
allowed_values=None, param_type="string"):
"""Parse a string parameter from the request query string.
Args:
request: the twisted HTTP request.
name (str): the name of the query parameter.
default (str|None): value to use if the parameter is absent, defaults
to None.
required (bool): whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
allowed_values (list[str]): List of allowed values for the string,
or None if any value is allowed, defaults to None
Returns:
str|None: A string value or the default.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values.
"""
return parse_string_from_args(
request.args, name, default, required, allowed_values, param_type,
)
def parse_string_from_args(args, name, default=None, required=False,
allowed_values=None, param_type="string"):
if name in args:
value = args[name][0]
if allowed_values is not None and value not in allowed_values:
message = "Query parameter %r must be one of [%s]" % (
name, ", ".join(repr(v) for v in allowed_values)
)
raise SynapseError(400, message)
else:
return value
else:
if required:
message = "Missing %s query parameter %r" % (param_type, name)
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
else:
return default
def parse_json_value_from_request(request):
"""Parse a JSON value from the body of a twisted HTTP request.
Args:
request: the twisted HTTP request.
Returns:
The JSON value.
Raises:
SynapseError if the request body couldn't be decoded as JSON.
"""
try:
content_bytes = request.content.read()
except:
raise SynapseError(400, "Error reading JSON content.")
try:
content = simplejson.loads(content_bytes)
except simplejson.JSONDecodeError:
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
return content
def parse_json_object_from_request(request):
"""Parse a JSON object from the body of a twisted HTTP request.
Args:
request: the twisted HTTP request.
Raises:
SynapseError if the request body couldn't be decoded as JSON or
if it wasn't a JSON object.
"""
content = parse_json_value_from_request(request)
if type(content) != dict:
message = "Content must be a JSON object."
raise SynapseError(400, message, errcode=Codes.BAD_JSON)
return content
class RestServlet(object):
""" A Synapse REST Servlet.
An implementing class can either provide its own custom 'register' method,
or use the automatic pattern handling provided by the base class.
To use this latter, the implementing class instead provides a `PATTERN`
class attribute containing a pre-compiled regular expression. The automatic
register method will then use this method to register any of the following
instance methods associated with the corresponding HTTP method:
on_GET
on_PUT
on_POST
on_DELETE
on_OPTIONS
Automatically handles turning CodeMessageExceptions thrown by these methods
into the appropriate HTTP response.
"""
def register(self, http_server):
""" Register this servlet with the given HTTP server. """
if hasattr(self, "PATTERNS"):
patterns = self.PATTERNS
for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"):
if hasattr(self, "on_%s" % (method,)):
method_handler = getattr(self, "on_%s" % (method,))
http_server.register_paths(method, patterns, method_handler)
else:
raise NotImplementedError("RestServlet must register something.")
| |
# Jaikrishna Initial Date: June 24, 2013 Last Updated: June 24, 2013
#
# These files have been made available online through a Creative Commons Attribution-S$
# (http://creativecommons.org/licenses/by-sa/3.0/)
#
# http://www.dexterindustries.com/
# This code is for testing the BrickPi with a Lego Motor
from BrickPi import * #import BrickPi.py file to use BrickPi operations
import numpy
from numpy import *
import math
LEFT_MOTOR = PORT_A
RIGHT_MOTOR = PORT_C
LEFT_TOUCHSENSOR = PORT_3
RIGHT_TOUCHSENSOR = PORT_1
SONAR_SENSOR = PORT_2
AVE_SPEED = 180
current_x = 0
current_y = 0
current_theta = 0.0
def bumper_left_hit():
return BrickPi.Sensor[LEFT_TOUCHSENSOR]
def bumper_right_hit():
return BrickPi.Sensor[RIGHT_TOUCHSENSOR]
def get_distance():
return BrickPi.Sensor[SONAR_SENSOR]
def get_speed():
return "[" + str(BrickPi.MotorSpeed[LEFT_MOTOR]) + ", " + str(BrickPi.MotorSpeed[RIGHT_MOTOR]) + "]"
def direction():
return BrickPi.MotorSpeed[LEFT_MOTOR] - BrickPi.MotorSpeed[RIGHT_MOTOR] + 5
def move_forward():
BrickPi.MotorEnable[LEFT_MOTOR] = 1 #Enable the Motor A
BrickPi.MotorEnable[RIGHT_MOTOR] = 1 #Enable the Motor B
BrickPi.MotorSpeed[LEFT_MOTOR] = 85 #Set the speed of MotorA (-255 to 255)
BrickPi.MotorSpeed[RIGHT_MOTOR] = 90 #Set the speed of MotorB (-255 to 255)
def updatable_move(leftSpeed, rightSpeed):
if not leftSpeed == None:
BrickPi.MotorSpeed[LEFT_MOTOR] = leftSpeed
if not rightSpeed == None:
BrickPi.MotorSpeed[RIGHT_MOTOR] = rightSpeed
def change_speed(Speed):
BrickPi.MotorSpeed[LEFT_MOTOR] = Speed + 5 #Set the speed of MotorA (-255 to 255)
BrickPi.MotorSpeed[RIGHT_MOTOR] = Speed #Set the speed of MotorB (-255 to 255)
def stop():
BrickPi.MotorSpeed[LEFT_MOTOR] = 0 # stop the motor
BrickPi.MotorSpeed[RIGHT_MOTOR] = 0 # stop the motor
def forward_40cm():
motorRotateDegree([105,100],[750,750],[LEFT_MOTOR,RIGHT_MOTOR]) #Set the circle bo$
# BrickPi.MotorSpeed[PORT_A] = 180 #Set the speed of MotorA (-255 to 255)
# BrickPi.MotorSpeed[PORT_B] = 173 #Set the speed of MotorB (-255 to 255)
# ot = time.time()
# while(time.time() - ot < 1.26): #running while loop for 3 seconds
# BrickPiUpdateValues() # Ask BrickPi to update values for sensors/motors
# time.sleep(.1) # sleep for 500 ms
# time.sleep(1.5)
def left_90deg():
motorRotateDegree([103,103],[-125,125],[LEFT_MOTOR,RIGHT_MOTOR])
# BrickPi.MotorSpeed[PORT_A] = -83 #Set the speed of MotorA (-255 to 255)
# BrickPi.MotorSpeed[PORT_B] = 83 #Set the speed of MotorB (-255 to 255)
# ot = time.time()
# while(time.time() - ot < 0.875): #running while loop for 1 seconds
# BrickPiUpdateValues() # Ask BrickPi to update values for sensors/motors
# time.sleep(.1) # sleep for 500 ms
# time.sleep(1.5)
def right_90deg():
motorRotateDegree([103,103],[125,-125],[LEFT_MOTOR,RIGHT_MOTOR])
def reverse():
motorRotateDegree([103,103],[-225,-225],[LEFT_MOTOR,RIGHT_MOTOR])
def run_oneRound():
forward_40cm()
left_90deg()
forward_40cm()
left_90deg()
forward_40cm()
left_90deg()
forward_40cm()
left_90deg()
def run_touchSensor():
hit = False
move_forward()
while not hit:
result = BrickPiUpdateValues() # Ask BrickPi to update values for motors$
if not result:
if (bumper_left_hit() or bumper_right_hit()):
hit = True
time.sleep(.1)
if hit:
stop() # stop the motor
if not (bumper_left_hit() and bumper_right_hit()) :
if (bumper_left_hit()):
reverse()
right_90deg()
elif (bumper_right_hit()):
reverse()
left_90deg()
else :
reverse()
left_90deg()
hit = False
move_forward()
def init_sonarSensor():
distances = zeros(5,int)
#print distances
for i in range(5):
BrickPiUpdateValues()
distances[i] = get_distance()
time.sleep(.02)
return distances
def run_sonarSensor():
count = 0
distances = init_sonarSensor()
move_forward()
while True:
BrickPiUpdateValues()
if count < 4:
distances[count] = get_distance()
count += 1
elif count == 4:
count = 0
distances[count] = get_distance()
count += 1
distance = median(distances)
distance = median(init_sonarSensor())
print distance
if distance > 30:
change_speed(int(110*(distance-30)/70)+70)
print "forward"
elif distance < 30:
change_speed(int(-110*(30-distance)/70)-70)
print "reverse"
else:
change_speed(0)
print get_speed()
#time.sleep(.1)
def run_walkalongwall():
move_forward()
after_dis = median(init_sonarSensor())
count = 0
while not (bumper_left_hit() or bumper_right_hit()):
BrickPiUpdateValues()
pre_dis = after_dis
after_dis = median(init_sonarSensor())
diff_dis = after_dis - pre_dis
#distance = median(distances)
#if after_dis > 30 and diff_dis > 0:
if direction() > 2: #if it is turing right
if after_dis > 30: #if the distance is greater than 30
if diff_dis > 0: #if distance is increasing (turing right too much)
rightspeed = int(90 + (after_dis - 30)*1.5)
updatable_move(85,rightspeed)
else:
pass
if after_dis < 30:
# begin to turn left
rightspeed = int(90 + (30 - after_dis)*1.5)
updatable_move(85,rightspeed)
elif direction() < -2: #if it is turing left
if after_dis > 30: #if the distance is greater than 30
leftspeed = int(85 + (after_dis - 30)*1.5)
updatable_move(leftspeed, 90)
elif after_dis < 30:
if diff_dis < 0:
rightspeed = int(90 + (30 - after_dis)*2)
updatable_move(85,rightspeed)
pass
else:
if after_dis > 30: #if the distance is greater than 30
# begin to turn right
leftspeed = int(85 + (after_dis - 30)*1.5)
updatable_move(leftspeed, 90)
elif after_dis < 30:
# begin to turn left
rightspeed = int(90 + (30 - after_dis)*1.5)
updatable_move(85,rightspeed)
print after_dis
print get_speed()
def forward(distance):
global current_x,current_y,current_theta
motorRotateDegree([160,160],[18.75*distance,18.75*distance],[LEFT_MOTOR,RIGHT_MOTOR])
current_x = current_x + distance*math.cos(current_theta*math.pi/180)
current_y = current_y + distance*math.sin(current_theta*math.pi/180)
def turn(degrees):
global current_x,current_y,current_theta
motorRotateDegree([123,123],[-1.950*degrees,1.950*degrees],[LEFT_MOTOR,RIGHT_MOTOR])
current_theta = current_theta + degrees
if current_theta > 180:
current_theta -= 360
elif current_theta <-180:
current_theta += 360
def navigateToWaypoint(x,y):
print [x,y]
global current_x,current_y,current_theta
print [current_x,current_y]
distance = pow(pow(x-current_x,2)+pow(y-current_y,2),0.5)
print x == current_x
dif_y = float(round(y,3)-round(current_y,3))
dif_x = float(round(x,3)-round(current_x,3))
if dif_x == 0:
if y > current_y:
degrees = 90 - current_theta
print "positive y"
elif y < current_y:
degrees = -90 - current_theta
print "negative y"
else:
degrees = math.atan(dif_y/dif_x)/math.pi*180
if dif_y > 0 and dif_x >0:
degrees = degrees - current_theta
elif dif_y > 0 and dif_x< 0:
degrees = degrees - current_theta + 180
elif dif_y <0 and dif_x > 0:
degrees = degrees - current_theta
elif dif_y < 0 and dif_x < 0:
degrees = degrees -current_theta-180
if degrees > 180:
degrees -= 360
elif degrees < -180:
degrees += 360
print "current location: " + str(current_x)+" "+str(current_y)
print "current direction: " + str(current_theta)
print "turn" + str(degrees)
turn(degrees)
time.sleep(1)
print "forward" + str(distance)
forward(distance)
print "current location: " + str(current_x)+" "+str(current_y)
print "current direction: " + str(current_theta)
# Main
# set up sensors
BrickPiSetup() # setup the serial port for communication
BrickPi.MotorEnable[LEFT_MOTOR] = 1 #Enable the Motor A
BrickPi.MotorEnable[RIGHT_MOTOR] = 1 #Enable the Motor B
BrickPi.SensorType[LEFT_TOUCHSENSOR] = TYPE_SENSOR_TOUCH #Set the type of sensor
BrickPi.SensorType[RIGHT_TOUCHSENSOR] = TYPE_SENSOR_TOUCH #Set the type of sensor
BrickPi.SensorType[SONAR_SENSOR] = TYPE_SENSOR_ULTRASONIC_CONT # Set up ultrsonic sensor
BrickPiSetupSensors() #Send the properties of sensors to BrickPi
# test touch sensor
#left_90deg()
#right_90deg()
#run_touchSensor()
# test ultrasonic sensor
#run_sonarSensor()
#print get_distance()
#run_walkalongwall()
'''move_forward()
while True:
BrickPiUpdateValues()'''
print "action1"
navigateToWaypoint(50.0,50.0)
time.sleep(0.5)
print "action2"
navigateToWaypoint(50.0,-20.0)
time.sleep(0.5)
print "action3"
navigateToWaypoint(0.0,0.0)
time.sleep(0.5)
#turn(45)
#time.sleep(0.5)
#turn(180)
#while not (bumper_left_hit() or bumper_right_hit()):
#for i in range(10):
# BrickPiUpdateValues()
# print BrickPi.Sensor[SONAR_SENSOR]
# time.sleep(0.05)
'''def test_turn(degrees):
motorRotateDegree([123,123],[-1.950*degrees,1.950*degrees],[LEFT_MOTOR,RIGHT_MOTOR])
test_turn(180)'''
| |
import logging
from django.apps import apps as django_apps
from django.conf import settings
from django.db import transaction
from orchestra.core.errors import WorkflowError
from orchestra.models import Certification
from orchestra.models import Step
from orchestra.models import TodoListTemplate
from orchestra.models import Workflow
from orchestra.models import WorkflowVersion
from orchestra.workflow.defaults import get_default_assignment_policy
from orchestra.workflow.defaults import get_default_creation_policy
from orchestra.workflow.defaults import get_default_review_policy
from orchestra.workflow.directory import parse_workflow_directory
logger = logging.getLogger(__name__)
def get_workflow_version_slugs():
versions = {}
for app_name in settings.ORCHESTRA_WORKFLOWS:
# App label is the last part of the app name by default
app_label = app_name.split('.')[-1]
workflow_directory = django_apps.get_app_config(app_label).path
data = parse_workflow_directory(workflow_directory)
workflow_slug = data['workflow']['slug']
if versions.get(workflow_slug) is not None:
raise WorkflowError('Workflow {} present in multiple apps: {}, {}'
.format(workflow_slug,
versions[workflow_slug]['app_label'],
app_label))
else:
versions[workflow_slug] = {
'app_label': app_label,
'versions': (version['slug'] for version in data['versions'])
}
return versions
@transaction.atomic
def load_workflow(app_label, version_slug, force=False):
workflow_directory = django_apps.get_app_config(app_label).path
data = parse_workflow_directory(workflow_directory)
# Create the workflow object if it doesn't exist
workflow_data = data['workflow']
workflow, workflow_created = Workflow.objects.update_or_create(
slug=workflow_data['slug'],
defaults={
'name': workflow_data['name'],
'description': workflow_data['description'],
'code_directory': workflow_directory,
'sample_data_load_function': workflow_data.get(
'sample_data_load_function')
}
)
# Create all certifications for the workflow
for certification_data in workflow_data['certifications']:
Certification.objects.update_or_create(
slug=certification_data['slug'],
workflow=workflow,
defaults={
'name': certification_data['name'],
'description': certification_data['description'],
}
)
# Create the certification dependencies once all certs are in the db
# Allow updating these over time so that a workflow's certifications can
# evolve. This means that the user is responsible for ensuring that all
# workers have the proper certifications after updating these dependencies.
for certification_data in workflow_data['certifications']:
certification = Certification.objects.get(
slug=certification_data['slug'],
workflow=workflow
)
required_certification_slugs = certification_data.get(
'required_certifications', [])
required_certifications = Certification.objects.filter(
workflow=workflow,
slug__in=required_certification_slugs
)
if required_certifications.count() != len(
required_certification_slugs):
raise WorkflowError(
'Certification {} requires non-existent certification.'
.format(certification_data['slug']))
certification.required_certifications.set(
list(required_certifications))
# Load the desired versions
desired_versions = [version_data for version_data in data['versions']
if version_data['slug'] == version_slug]
if len(desired_versions) != 1:
raise WorkflowError('Invalid version requested: {}'
.format(version_slug))
load_workflow_version(desired_versions[0], workflow, force=force)
def load_workflow_version(version_data, workflow, force=False):
# Create the version object
version, version_created = WorkflowVersion.objects.update_or_create(
slug=version_data['slug'],
workflow=workflow,
defaults={
'name': version_data['name'],
'description': version_data['description'],
'sanity_checks': version_data.get('sanity_checks', {}),
'abort_completion_function': version_data.get(
'abort_completion_function', {}),
}
)
if not version_created:
if not force:
# It is safe to error out after modifying the DB because
# all of this code is wrapped in a transaction by load_workflow.
raise WorkflowError('Version {} already exists'
.format(version_data['slug']))
# Check that the versions are safe to merge
new_step_slugs = set(step['slug'] for step in version_data['steps'])
old_step_slugs = set(
Step.objects
.filter(workflow_version=version)
.values_list('slug', flat=True)
)
if old_step_slugs - new_step_slugs:
raise WorkflowError(
'Even with --force, you cannot remove steps from a workflow.'
'Drop and recreate the database to reset, or create a new '
'version for your workflow.')
# Create or update the version steps.
old_creation_dependencies = {}
old_submission_dependencies = {}
for step_data in version_data['steps']:
is_human = step_data.get('is_human', True)
completion_ends_project = step_data.get(
'completion_ends_project', False)
step, step_created = Step.objects.update_or_create(
slug=step_data['slug'],
workflow_version=version,
defaults={
'name': step_data['name'],
'description': step_data['description'],
'is_human': is_human,
'completion_ends_project': completion_ends_project,
'detailed_description_function': step_data.get(
'detailed_description_function', {}),
'execution_function': step_data.get('execution_function', {}),
'review_policy': step_data.get(
'review_policy',
get_default_review_policy(is_human)),
'assignment_policy': step_data.get(
'assignment_policy',
get_default_assignment_policy(is_human)),
'creation_policy': step_data.get(
'creation_policy',
get_default_creation_policy()),
'user_interface': step_data.get('user_interface', {}),
'assignable_hours_function': step_data.get(
'assignable_hours_function', {}),
}
)
if not step_created:
old_creation_dependencies[step_data['slug']] = set(
step.creation_depends_on.values_list('slug', flat=True))
old_submission_dependencies[step_data['slug']] = set(
step.submission_depends_on.values_list('slug', flat=True))
# Don't prevent updates to these, because we want to allow
# certifications to evolve over the lifetime of a workflow.
_set_step_relations(step, step_data, 'required_certifications',
Certification, workflow=workflow)
# Set up step dependencies once the steps objects are in the DB.
for step_data in version_data['steps']:
step_slug = step_data['slug']
step = Step.objects.get(
slug=step_slug,
workflow_version=version
)
# Set step creation dependencies.
_verify_dependencies_not_updated(
step_data,
'creation_depends_on',
old_creation_dependencies.get(step_slug)
)
_set_step_relations(step, step_data, 'creation_depends_on', Step,
workflow_version=version)
_set_step_relations(step, step_data, 'submission_depends_on', Step,
workflow_version=version)
_set_step_relations(step, step_data, 'todolist_templates_to_apply',
TodoListTemplate)
def _verify_dependencies_not_updated(step_data, dependency_attr,
old_dependencies):
new_dependencies = set(step_data.get(dependency_attr, []))
old_set = set(old_dependencies or [])
new_set = set(new_dependencies)
if old_dependencies is not None and (new_set - old_set):
raise WorkflowError(
'Even with --force, you cannot change the topology of a workflow. '
'Drop and recreate the database to reset, or create a new '
'version for your workflow.')
if new_set != old_set:
logger.warn(
('Step `%s` changed dependencies from %s to %s. You '
'will manually have to re-run task creation logic if you '
'want existing projects to receive new tasks.'),
step_data['slug'], old_set, new_set)
def _set_step_relations(step, step_data, relation_attr, relation_model,
**model_filters):
relation_slugs = set(step_data.get(relation_attr, []))
relations = list(relation_model.objects.filter(
slug__in=relation_slugs, **model_filters))
if len(relations) != len(relation_slugs):
raise WorkflowError(
'{}.{} contains a non-existent slug.'
.format(step_data['slug'], relation_attr))
getattr(step, relation_attr).set(relations)
| |
"""
Support pre-0.12 series pickle compatibility.
"""
import sys
import pandas # noqa
import copy
import pickle as pkl
from pandas import compat, Index
from pandas.compat import u, string_types # noqa
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if len(args) and type(args[0]) is type:
n = args[0].__name__ # noqa
try:
stack[-1] = func(*args)
return
except Exception as e:
# If we have a deprecated function,
# try to replace and try again.
msg = '_reconstruct: First argument must be a sub-type of ndarray'
if msg in str(e):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except TypeError:
pass
# try to re-encode the arguments
if getattr(self, 'encoding', None) is not None:
args = tuple(arg.encode(self.encoding)
if isinstance(arg, string_types)
else arg for arg in args)
try:
stack[-1] = func(*args)
return
except TypeError:
pass
# unknown exception, re-raise
if getattr(self, 'is_verbose', None):
print(sys.exc_info())
print(func, args)
raise
# If classes are moved, provide compat here.
_class_locations_map = {
('pandas.core.sparse.array', 'SparseArray'):
('pandas.core.arrays', 'SparseArray'),
# 15477
('pandas.core.base', 'FrozenNDArray'):
('pandas.core.indexes.frozen', 'FrozenNDArray'),
('pandas.core.base', 'FrozenList'):
('pandas.core.indexes.frozen', 'FrozenList'),
# 10890
('pandas.core.series', 'TimeSeries'):
('pandas.core.series', 'Series'),
('pandas.sparse.series', 'SparseTimeSeries'):
('pandas.core.sparse.series', 'SparseSeries'),
# 12588, extensions moving
('pandas._sparse', 'BlockIndex'):
('pandas._libs.sparse', 'BlockIndex'),
('pandas.tslib', 'Timestamp'):
('pandas._libs.tslib', 'Timestamp'),
# 18543 moving period
('pandas._period', 'Period'): ('pandas._libs.tslibs.period', 'Period'),
('pandas._libs.period', 'Period'):
('pandas._libs.tslibs.period', 'Period'),
# 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
('pandas.tslib', '__nat_unpickle'):
('pandas._libs.tslibs.nattype', '__nat_unpickle'),
('pandas._libs.tslib', '__nat_unpickle'):
('pandas._libs.tslibs.nattype', '__nat_unpickle'),
# 15998 top-level dirs moving
('pandas.sparse.array', 'SparseArray'):
('pandas.core.arrays.sparse', 'SparseArray'),
('pandas.sparse.series', 'SparseSeries'):
('pandas.core.sparse.series', 'SparseSeries'),
('pandas.sparse.frame', 'SparseDataFrame'):
('pandas.core.sparse.frame', 'SparseDataFrame'),
('pandas.indexes.base', '_new_Index'):
('pandas.core.indexes.base', '_new_Index'),
('pandas.indexes.base', 'Index'):
('pandas.core.indexes.base', 'Index'),
('pandas.indexes.numeric', 'Int64Index'):
('pandas.core.indexes.numeric', 'Int64Index'),
('pandas.indexes.range', 'RangeIndex'):
('pandas.core.indexes.range', 'RangeIndex'),
('pandas.indexes.multi', 'MultiIndex'):
('pandas.core.indexes.multi', 'MultiIndex'),
('pandas.tseries.index', '_new_DatetimeIndex'):
('pandas.core.indexes.datetimes', '_new_DatetimeIndex'),
('pandas.tseries.index', 'DatetimeIndex'):
('pandas.core.indexes.datetimes', 'DatetimeIndex'),
('pandas.tseries.period', 'PeriodIndex'):
('pandas.core.indexes.period', 'PeriodIndex'),
# 19269, arrays moving
('pandas.core.categorical', 'Categorical'):
('pandas.core.arrays', 'Categorical'),
# 19939, add timedeltaindex, float64index compat from 15998 move
('pandas.tseries.tdi', 'TimedeltaIndex'):
('pandas.core.indexes.timedeltas', 'TimedeltaIndex'),
('pandas.indexes.numeric', 'Float64Index'):
('pandas.core.indexes.numeric', 'Float64Index'),
}
# our Unpickler sub-class to override methods and some dispatcher
# functions for compat
if compat.PY3:
class Unpickler(pkl._Unpickler):
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
return super(Unpickler, self).find_class(module, name)
else:
class Unpickler(pkl.Unpickler):
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except (AttributeError, KeyError):
pass
def load(fh, encoding=None, compat=False, is_verbose=False):
"""load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh: a filelike object
encoding: an optional encoding
compat: provide Series compatibility mode, boolean, default False
is_verbose: show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Department'
db.create_table(u'tickets_department', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'tickets', ['Department'])
# Adding model 'UserProfile'
db.create_table(u'tickets_userprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('department', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tickets.Department'])),
))
db.send_create_signal(u'tickets', ['UserProfile'])
# Adding model 'Team'
db.create_table(u'tickets_team', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('department', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tickets.Department'])),
('days_off', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal(u'tickets', ['Team'])
# Adding model 'SubscriberType'
db.create_table(u'tickets_subscribertype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'tickets', ['SubscriberType'])
# Adding model 'Type'
db.create_table(u'tickets_type', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'tickets', ['Type'])
# Adding model 'Reason'
db.create_table(u'tickets_reason', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'tickets', ['Reason'])
# Adding model 'Urgence'
db.create_table(u'tickets_urgence', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('color', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'tickets', ['Urgence'])
# Adding model 'Ticket'
db.create_table(u'tickets_ticket', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tickets.Type'])),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tickets.Team'])),
('urgence', self.gf('django.db.models.fields.related.ForeignKey')(default=2, to=orm['tickets.Urgence'])),
('subscriber_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tickets.SubscriberType'])),
('account', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('price', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('address', self.gf('django.db.models.fields.CharField')(max_length=255)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('technical_data', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
('solution', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('reason', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tickets.Reason'], null=True, blank=True)),
('user_created', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ticket_created', to=orm['auth.User'])),
('user_modified', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ticket_modified', to=orm['auth.User'])),
('time', self.gf('django.db.models.fields.TimeField')(default=datetime.timedelta(0, 3600))),
('date_assigned', self.gf('django.db.models.fields.DateTimeField')()),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'tickets', ['Ticket'])
# Adding model 'ChangeLog'
db.create_table(u'tickets_changelog', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ticket', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tickets.Ticket'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('action', self.gf('django.db.models.fields.CharField')(max_length=255)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'tickets', ['ChangeLog'])
def backwards(self, orm):
# Deleting model 'Department'
db.delete_table(u'tickets_department')
# Deleting model 'UserProfile'
db.delete_table(u'tickets_userprofile')
# Deleting model 'Team'
db.delete_table(u'tickets_team')
# Deleting model 'SubscriberType'
db.delete_table(u'tickets_subscribertype')
# Deleting model 'Type'
db.delete_table(u'tickets_type')
# Deleting model 'Reason'
db.delete_table(u'tickets_reason')
# Deleting model 'Urgence'
db.delete_table(u'tickets_urgence')
# Deleting model 'Ticket'
db.delete_table(u'tickets_ticket')
# Deleting model 'ChangeLog'
db.delete_table(u'tickets_changelog')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'tickets.changelog': {
'Meta': {'object_name': 'ChangeLog'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Ticket']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'tickets.department': {
'Meta': {'object_name': 'Department'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.reason': {
'Meta': {'object_name': 'Reason'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.subscribertype': {
'Meta': {'object_name': 'SubscriberType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.team': {
'Meta': {'object_name': 'Team'},
'days_off': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'department': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.ticket': {
'Meta': {'object_name': 'Ticket'},
'account': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date_assigned': ('django.db.models.fields.DateTimeField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'reason': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Reason']", 'null': 'True', 'blank': 'True'}),
'solution': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'subscriber_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.SubscriberType']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Team']"}),
'technical_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.timedelta(0, 3600)'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Type']"}),
'urgence': ('django.db.models.fields.related.ForeignKey', [], {'default': '2', 'to': u"orm['tickets.Urgence']"}),
'user_created': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticket_created'", 'to': u"orm['auth.User']"}),
'user_modified': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticket_modified'", 'to': u"orm['auth.User']"})
},
u'tickets.type': {
'Meta': {'object_name': 'Type'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.urgence': {
'Meta': {'object_name': 'Urgence'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['tickets']
| |
"""
Tests for the gocookies code.
"""
import os
import shutil
import tempfile
import unittest
import urllib.request
import pyrfc3339
from juju.client.gocookies import GoCookieJar
# cookie_content holds the JSON contents of a Go-produced
# cookie file (reformatted so it's not all on one line but
# otherwise unchanged).
cookie_content = """
[
{
"CanonicalHost": "bar.com",
"Creation": "2017-11-17T08:53:55.088820092Z",
"Domain": "bar.com",
"Expires": "2345-11-15T18:16:08Z",
"HostOnly": true,
"HttpOnly": false,
"LastAccess": "2017-11-17T08:53:55.088822562Z",
"Name": "bar",
"Path": "/",
"Persistent": true,
"Secure": false,
"Updated": "2017-11-17T08:53:55.088822562Z",
"Value": "bar-value"
},
{
"CanonicalHost": "x.foo.com",
"Creation": "2017-11-17T08:53:55.088814857Z",
"Domain": "x.foo.com",
"Expires": "2345-11-15T18:16:05Z",
"HostOnly": true,
"HttpOnly": false,
"LastAccess": "2017-11-17T08:53:55.088884015Z",
"Name": "foo",
"Path": "/path",
"Persistent": true,
"Secure": false,
"Updated": "2017-11-17T08:53:55.088814857Z",
"Value": "foo-path-value"
},
{
"CanonicalHost": "x.foo.com",
"Creation": "2017-11-17T08:53:55.088814857Z",
"Domain": "foo.com",
"Expires": "2345-11-15T18:16:06Z",
"HostOnly": false,
"HttpOnly": false,
"LastAccess": "2017-11-17T08:53:55.088919437Z",
"Name": "foo4",
"Path": "/path",
"Persistent": true,
"Secure": false,
"Updated": "2017-11-17T08:53:55.088814857Z",
"Value": "foo4-value"
},
{
"CanonicalHost": "x.foo.com",
"Creation": "2017-11-17T08:53:55.088790709Z",
"Domain": "x.foo.com",
"Expires": "2345-11-15T18:16:01Z",
"HostOnly": true,
"HttpOnly": false,
"LastAccess": "2017-11-17T08:53:55.088884015Z",
"Name": "foo",
"Path": "/",
"Persistent": true,
"Secure": false,
"Updated": "2017-11-17T08:53:55.088790709Z",
"Value": "foo-value"
},
{
"CanonicalHost": "x.foo.com",
"Creation": "2017-11-17T08:53:55.088790709Z",
"Domain": "foo.com",
"Expires": "2345-11-15T18:16:02Z",
"HostOnly": false,
"HttpOnly": false,
"LastAccess": "2017-11-17T08:53:55.088919437Z",
"Name": "foo1",
"Path": "/",
"Persistent": true,
"Secure": false,
"Updated": "2017-11-17T08:53:55.088790709Z",
"Value": "foo1-value"
},
{
"CanonicalHost": "x.foo.com",
"Creation": "2017-11-17T08:53:55.088790709Z",
"Domain": "x.foo.com",
"Expires": "2345-11-15T18:16:03Z",
"HostOnly": true,
"HttpOnly": false,
"LastAccess": "2017-11-17T08:53:55.088850252Z",
"Name": "foo2",
"Path": "/",
"Persistent": true,
"Secure": true,
"Updated": "2017-11-17T08:53:55.088790709Z",
"Value": "foo2-value"
},
{
"CanonicalHost": "x.foo.com",
"Creation": "2017-11-17T08:53:55.088790709Z",
"Domain": "foo.com",
"Expires": "2345-11-15T18:16:04Z",
"HostOnly": false,
"HttpOnly": false,
"LastAccess": "2017-11-17T08:53:55.088919437Z",
"Name": "foo3",
"Path": "/",
"Persistent": true,
"Secure": false,
"Updated": "2017-11-17T08:53:55.088790709Z",
"Value": "foo3-value"
}
]
"""
# cookie_content_queries holds a set of queries
# that were automatically generated by running
# the queries on the above cookie_content data
# and printing the results.
cookie_content_queries = [
('http://x.foo.com', [
('foo', 'foo-value'),
('foo1', 'foo1-value'),
('foo3', 'foo3-value'),
]),
('https://x.foo.com', [
('foo', 'foo-value'),
('foo1', 'foo1-value'),
('foo2', 'foo2-value'),
('foo3', 'foo3-value'),
]),
('http://arble.foo.com', [
('foo1', 'foo1-value'),
('foo3', 'foo3-value'),
]),
('http://arble.com', [
]),
('http://x.foo.com/path/x', [
('foo', 'foo-path-value'),
('foo4', 'foo4-value'),
('foo', 'foo-value'),
('foo1', 'foo1-value'),
('foo3', 'foo3-value'),
]),
('http://arble.foo.com/path/x', [
('foo4', 'foo4-value'),
('foo1', 'foo1-value'),
('foo3', 'foo3-value'),
]),
('http://foo.com/path/x', [
('foo4', 'foo4-value'),
('foo1', 'foo1-value'),
('foo3', 'foo3-value'),
]),
]
class TestGoCookieJar(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dir)
def test_readcookies(self):
jar = self.load_jar(cookie_content)
self.assert_jar_queries(jar, cookie_content_queries)
def test_roundtrip(self):
jar = self.load_jar(cookie_content)
filename2 = os.path.join(self.dir, 'cookies2')
jar.save(filename=filename2)
jar = GoCookieJar()
jar.load(filename=filename2)
self.assert_jar_queries(jar, cookie_content_queries)
def test_expiry_time(self):
content = '''[
{
"CanonicalHost": "bar.com",
"Creation": "2017-11-17T08:53:55.088820092Z",
"Domain": "bar.com",
"Expires": "2345-11-15T18:16:08Z",
"HostOnly": true,
"HttpOnly": false,
"LastAccess": "2017-11-17T08:53:55.088822562Z",
"Name": "bar",
"Path": "/",
"Persistent": true,
"Secure": false,
"Updated": "2017-11-17T08:53:55.088822562Z",
"Value": "bar-value"
}
]'''
jar = self.load_jar(content)
got_expires = tuple(jar)[0].expires
want_expires = int(pyrfc3339.parse('2345-11-15T18:16:08Z').timestamp())
self.assertEqual(got_expires, want_expires)
def load_jar(self, content):
filename = os.path.join(self.dir, 'cookies')
with open(filename, 'x') as f:
f.write(content)
jar = GoCookieJar()
jar.load(filename=filename)
return jar
def assert_jar_queries(self, jar, queries):
'''Assert that all the given queries (see cookie_content_queries)
are satisfied when run on the given cookie jar.
:param jar CookieJar: the cookie jar to query
:param queries: the queries to run.
'''
for url, want_cookies in queries:
req = urllib.request.Request(url)
jar.add_cookie_header(req)
# We can't use SimpleCookie to find out what cookies
# have been presented, because SimpleCookie
# only allows one cookie with a given name,
# so we naively parse the cookies ourselves, which
# is OK because we know we don't have to deal
# with any complex cases.
cookie_header = req.get_header('Cookie')
got_cookies = []
if cookie_header is not None:
got_cookies = [
tuple(part.split('='))
for part in cookie_header.split('; ')
]
got_cookies.sort()
want_cookies = list(want_cookies)
want_cookies.sort()
self.assertEqual(got_cookies, want_cookies, msg='query {}; got {}; want {}'.format(url, got_cookies, want_cookies))
| |
"""
#;+
#; NAME:
#; abssys_utils
#; Version 1.0
#;
#; PURPOSE:
#; Module for Absorption Systems
#; 23-Oct-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from astropy.io import ascii, fits
from astropy import units as u
from xastropy.igm.abs_sys.ionic_clm import Ions_Clm, Ionic_Clm_File
from xastropy.xutils import xdebug as xdb
from xastropy import spec as xspec
from xastropy import kinematics as xkin
from astropy.coordinates import SkyCoord
###################### ######################
###################### ######################
###################### ######################
# Class for Absorption Line System
class Absline_System(object):
"""An absorption line system
Attributes:
name: Coordinates
coord: Coordinates
zabs : float
Absorption redshift
NHI: float
Log10 of the HI column density
sigNHI: np.array(2)
Log10 error of the HI column density (-/+)
ions: Ions_Clm Class
"""
__metaclass__ = ABCMeta
# Init
def __init__(self, abs_type, zabs=0., NHI=0., MH=0., dat_file=None, tree=None):
""" Initiator
Parameters
----------
abs_type : string
Type of Abs Line System, e.g. MgII, DLA, LLS, CGM
dat_file : string
ASCII .dat file summarizing the system
"""
self.zabs = zabs
self.NHI = NHI
self.MH = MH
self.coord = None
# Abs type
if abs_type == None:
self.abs_type = 'NONE'
else:
self.abs_type = abs_type
# Tree
if tree == None: tree = ''
self.tree = tree
# Lines
self.lines = {} # Dict of Spectra_Line classes
self.absid_file = None
# Kinematics
self.kin = {}
# Fill in
if dat_file != None:
print('absys_utils: Reading {:s} file'.format(dat_file))
self.parse_dat_file(dat_file)
self.dat_file = dat_file
# Initialize coord
if self.coord is None:
ras, decs = ('00 00 00', '+00 00 00')
self.coord = SkyCoord(ras, decs, 'icrs', unit=(u.hour, u.deg))
# Read a .dat file
def parse_dat_file(self,dat_file,verbose=False,flg_out=None):
'''
Parameters
flg_out: int
1: Return the dictionary
'''
# Define
datdict = OrderedDict()
# Open
f=open(dat_file,'r')
for line in f:
tmp=line.split('! ')
#tmp=line.split(' ! ')
tkey=tmp[1].strip()
key=tkey
#key=tkey.replace(' ','')
val=tmp[0].strip()
datdict[key]=val
f.close()
#pdb.set_trace()
self.datdict = datdict
# #########
# Pull attributes
# RA/DEC
try:
ras,decs = (datdict['RA (2000)'], datdict['DEC (2000)'])
#print(datdict['RA(2000)'], datdict['DEC(2000)'])
#pdb.set_trace()
except:
ras, decs = ('00 00 00', '+00 00 00')
self.coord = SkyCoord(ras, decs, 'icrs', unit=(u.hour, u.deg))
# Name
self.name = ('J'+
self.coord.ra.to_string(unit=u.hour,sep='',pad=True)+
self.coord.dec.to_string(sep='',pad=True,alwayssign=True))
# zabs
try:
self.zabs = float(datdict['zabs'])
except: self.zabs=0.
# NHI
try:
self.NHI = float(datdict['NHI']) # DLA format
except:
try:
self.NHI = float(datdict['NHI tot']) # LLS format
except: self.NHI=0.
# NHIsig
try:
key_sigNHI = datdict['sig(NHI)'] # DLA format
except:
try:
key_sigNHI = datdict['NHI sig'] # LLS format
except:
key_sigNHI='0.0 0.0'
self.sigNHI = np.array(map(float,key_sigNHI.split()))
# Abund file
try:
key_clmfil = datdict['Abund file'] # DLA format
except:
key_clmfil=''
self.clm_fil = key_clmfil.strip()
#xdb.set_trace()
# Finish
if verbose: print(datdict)
if flg_out != None:
if (flg_out % 2) == 1: ret_val = [datdict]
else: ret_val = [0]
return ret_val
# Write a .dat file
def write_dat_file(self):
# Assuming an OrderedDict
f=open(self.dat_file,'w')
for key in self.datdict:
sv = '{:60s}! {:s}\n'.format(self.datdict[key],key)
f.write(str(sv)) # Avoids unicode
f.close()
print('abssys_utils.write_dat_file: Wrote {:s}'.format(self.dat_file))
# ##
# Parse AbsID file
def parse_absid_file(self, abs_fil):
# FITS binary table
hdu = fits.open(abs_fil)
table = hdu[1].data
newz = table[0]['ZABS']
if (self.zabs > 0.) & (np.abs(self.zabs-newz) > 1e-4):
print('WARNING: Updating zabs from {:s}'.format(abs_fil))
self.zabs = newz
self.absid_file = abs_fil
# Load up lines
for row in table:
self.lines[row['WREST']] = xspec.analysis.Spectral_Line(row['WREST'])
# Velocity limits and flags
try:
self.lines[row['WREST']].analy['VLIM'] = row['VLIM']
except KeyError:
self.lines[row['WREST']].analy['VLIM'] = row['DV']
self.lines[row['WREST']].analy['FLG_ANLY'] = row['FLG_ANLY']
self.lines[row['WREST']].analy['FLG_EYE'] = row['FLG_EYE']
self.lines[row['WREST']].analy['FLG_LIMIT'] = row['FLG_LIMIT']
self.lines[row['WREST']].analy['DATFIL'] = row['DATFIL']
self.lines[row['WREST']].analy['IONNM'] = row['IONNM']
# ##
# Write AbsID file
def write_absid_file(self, outfil=None):
from astropy.table import Column
from astropy.table.table import Table
wrest = self.lines.keys()
wrest.sort()
if outfil is None:
outfil = self.absid_file
# Columns
cols = [Column(np.array(wrest), name='WREST')]
clm_nms = self.lines[wrest[0]].analy.keys()
for clm_nm in clm_nms:
clist = [self.lines[iwrest].analy[clm_nm] for iwrest in wrest]
cols.append( Column(np.array(clist), name=clm_nm) )
cols.append( Column(np.ones(len(cols[0]))*self.zabs, name='ZABS') )
table = Table(cols)
prihdr = fits.Header()
prihdr['COMMENT'] = "Above are the data sources"
prihdu = fits.PrimaryHDU(header=prihdr)
table_hdu = fits.BinTableHDU.from_columns(np.array(table.filled()))
thdulist = fits.HDUList([prihdu, table_hdu])
thdulist.writeto(outfil,clobber=True)
print('Wrote AbsID file: {:s}'.format(outfil))
# #################
# Parse the ion files
def get_ions(self, skip_ions=False, fill_lines=False):
# Read .clm file
clm_fil=self.tree+self.clm_fil
self.clm_analy = Ionic_Clm_File(clm_fil)
if fill_lines is True:
self.lines = self.clm_analy.clm_lines
# Read .all file
ion_fil = self.tree+self.clm_analy.ion_fil # Should check for existence
all_fil = ion_fil.split('.ion')[0]+'.all'
if skip_ions is False:
self.ions = Ions_Clm(all_fil, trans_file=ion_fil)
# #################
# Load low_ion kinematics
def load_low_kin(self):
# Grab spectrum from ions
xdb.set_trace()
out_kin = xkin.orig_kin(spec, vmnx)
@abstractmethod
def print_abs_type(self):
""""Return a string representing the type of vehicle this is."""
pass
# #############
def __repr__(self):
return ('[Absline_System: %s %s %s %s, %g, NHI=%g]' %
(self.name, self.abs_type,
self.coord.ra.to_string(unit=u.hour,sep=':',pad=True),
self.coord.dec.to_string(sep=':',pad=True),
self.zabs, self.NHI))
# Class for Generic Absorption Line System
class Generic_System(Absline_System):
"""A simple absorption system
"""
def print_abs_type(self):
""""Return a string representing the type of vehicle this is."""
return 'Generic'
# Class for Generic Absorption Line System
class Abs_Sub_System(Absline_System):
"""A simple absorption system
"""
def print_abs_type(self):
""""Return a string representing the type of vehicle this is."""
return 'SubSystem'
###################### ###################### ######################
###################### ###################### ######################
###################### ###################### ######################
# Testing
###################### ###################### ######################
if __name__ == '__main__':
# Test Absorption System
tmp1 = Absline_System('LLS')
tmp1.parse_dat_file('/Users/xavier/LLS/Data/UM669.z2927.dat')
print(tmp1)
#pdb.set_trace()
# Test the Survey
tmp = Absline_Survey('Lists/lls_metals.lst',abs_type='LLS',
tree='/Users/xavier/LLS/')
print(tmp)
print('z NHI')
xdb.xpcol(tmp.zabs, tmp.NHI)
#xdb.set_trace()
print('abssys_utils: All done testing..')
| |
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2017 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
import filecmp
import kevlar
from kevlar.tests import data_file
import pytest
from tempfile import NamedTemporaryFile
import sys
def test_pico_4(capsys):
reads = data_file('pico-4.augfastq.gz')
refr = data_file('human-random-pico.fa.gz')
arglist = ['alac', '--ksize', '25', reads, refr]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.alac.main(args)
out, err = capsys.readouterr()
# grep -v ^'#' out
out = '\n'.join([l for l in out.split('\n') if not l.startswith('#')])
vcf = '\t'.join([
'seq1', '1175768', '.', 'T', 'C', '.', 'PASS',
'ALTWINDOW=CCCTGCCATTATAGATGCTAGATTCACATCTTCATTTATTTTTACTTTT;'
'CIGAR=50D192M50D;IKMERS=25;KSW2=179;'
'REFRWINDOW=CCCTGCCATTATAGATGCTAGATTTACATCTTCATTTATTTTTACTTTT;'
'CONTIG=ACCTGATTTTGAAGAAGAAAATCAGTTTAAGTCAAAAGGTTACTTTCCTTGTCCTGAACTGG'
'AGAACTGGGGCCCTGCCATTATAGATGCTAGATTCACATCTTCATTTATTTTTACTTTTTGTCTTGACA'
'GAGTGGGCGCTGGTTTTTTTAATTATTTTTGGCCAATCAAAAAATACTCTCCTTCGTGGGT'
])
assert vcf.strip() == out.strip()
@pytest.mark.parametrize('cc,pos,ref,alt', [
(2, 834645, 'A', 'AGTGGGATTACGTAGGAAATCCGCGGGGCTGTGACATATATTTGTTGACAAGCATA'
'TATTGTTCCTAGAGGTCGTTGGGTTCGTTACACCCAAGGGGGCGTATAACATGTTA'
'CTCAGTTGCGTCGGACCGATTAATAACTCGAATGTAAGGCAGGATATTT'),
(3, 4072, 'G', 'GCCGAGACGCAGCGTGATACTTAAGATTAAGTTAAGCAACAGCTTAGCGTACGCAATT'
'GCGTCTAATTGAGGGGCCGTAGATATAAGCTCCGTGTTCTCAGTTGGTGGGTAACAGA'
'ACCCGCAAGCACACCGCTTTCAGTGTGTCACATGCACA'),
(5, 1175767, 'T', 'C'),
(6, 185751, 'TCAAACTCTGGCATTATACATAGGGTTCCCG', 'T'),
(8, 636698, 'C', 'A'),
(10, 1527138, 'C', 'CTCCTGGTCTGCCACGGTTGACTTGCCTACATAT'),
])
def test_pico_calls(cc, pos, ref, alt):
reads = data_file('pico-var/cc{:d}.afq.gz'.format(cc))
readstream = kevlar.parse_augmented_fastx(kevlar.open(reads, 'r'))
pstream = kevlar.parse_partitioned_reads(readstream)
refrfile = data_file('human-random-pico.fa.gz')
caller = kevlar.alac.alac(pstream, refrfile, ksize=25, delta=50)
calls = [v for v in caller]
assert len(calls) == 1
assert calls[0]._pos == pos
assert calls[0]._refr == ref
assert calls[0]._alt == alt
def test_pico_partitioned(capsys):
reads = data_file('pico-partitioned.augfastq.gz')
refr = data_file('pico-trio-refr.fa.gz')
arglist = ['alac', '--delta', '50', reads, refr]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.alac.main(args)
out, err = capsys.readouterr()
lines = out.strip().split('\n')
nmetalines = 4 # filteformat filteDate source reference
nfilterlines = len(kevlar.vcf.VCFWriter.filter_desc)
ninfolines = len(kevlar.vcf.VCFWriter.info_metadata)
nformatlines = 1
nheaderlines = nmetalines + nfilterlines + ninfolines + nformatlines + 1
assert len(lines) == nheaderlines + 10
lines = [l for l in lines if not l.startswith('#')]
assert len(lines) == 10
numnocalls = sum([1 for line in lines if '\t.\t.\t.\t.\t' in line])
assert numnocalls == 2
def test_ikmer_filter_python():
"""
Smoke test for filtering based in number of supporting ikmers.
Each partition in the data set has only 2 supporting interesting k-mers.
The supplied reference file doesn't actually correspond to the reads, so if
this test passes it's because the filtering worked correctly and the
`localize` code is never invoked.
"""
readfile = data_file('min_ikmers_filt.augfastq.gz')
reads = kevlar.parse_augmented_fastx(kevlar.open(readfile, 'r'))
parts = kevlar.parse_partitioned_reads(reads)
refr = data_file('localize-refr.fa')
calls = list(kevlar.alac.alac(parts, refr, ksize=31, min_ikmers=3))
def test_ikmer_filter_cli():
reads = data_file('min_ikmers_filt.augfastq.gz')
refr = data_file('localize-refr.fa')
arglist = ['alac', '--ksize', '31', '--min-ikmers', '3', reads, refr]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.alac.main(args)
def test_no_reference_match(capsys):
readfile = data_file('pico-4.augfastq.gz')
reads = kevlar.parse_augmented_fastx(kevlar.open(readfile, 'r'))
partitions = kevlar.parse_partitioned_reads(reads)
refr = data_file('localize-refr.fa')
baldwin = kevlar.alac.alac(partitions, refr)
calls = list(baldwin)
out, err = capsys.readouterr()
assert 'WARNING: no reference matches' in err
@pytest.mark.parametrize('label,position', [
('1', 284801),
('2', 1660735),
('3', 2315888),
('4', 2321205),
('5', 593252),
])
def test_alac_single_partition(label, position):
readfile = data_file('fiveparts.augfastq.gz')
refrfile = data_file('fiveparts-refr.fa.gz')
readstream = kevlar.parse_augmented_fastx(kevlar.open(readfile, 'r'))
partstream = kevlar.parse_single_partition(readstream, label)
calls = list(kevlar.alac.alac(partstream, refrfile))
assert len(calls) == 1
assert calls[0].position == position - 1
assert calls[0].attribute('PART') == label
def test_alac_single_partition_badlabel(capsys):
readfile = data_file('fiveparts.augfastq.gz')
refrfile = data_file('fiveparts-refr.fa.gz')
arglist = ['alac', '--part-id', '6', readfile, refrfile]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.alac.main(args)
out, err = capsys.readouterr()
# grep -v ^'#' out
out = '\n'.join([l for l in out.split('\n') if not l.startswith('#')])
assert out == ''
def test_alac_exclude(capsys):
readfile = data_file('fiveparts.augfastq.gz')
refrfile = data_file('fiveparts-refr.fa.gz')
arglist = ['alac', '--exclude', '^seq', readfile, refrfile]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.alac.main(args)
out, err = capsys.readouterr()
print(err)
# grep -v ^'#' out
out = '\n'.join([l for l in out.split('\n') if not l.startswith('#')])
assert out == ''
def test_alac_bigpart():
readfile = data_file('fiveparts.augfastq.gz')
refrfile = data_file('fiveparts-refr.fa.gz')
readstream = kevlar.parse_augmented_fastx(kevlar.open(readfile, 'r'))
partstream = kevlar.parse_partitioned_reads(readstream)
calls = list(kevlar.alac.alac(partstream, refrfile, maxreads=20))
assert len(calls) == 3
def test_alac_generate_mask():
readfile = data_file('fiveparts.augfastq.gz')
refrfile = data_file('fiveparts-refr.fa.gz')
readstream = kevlar.parse_augmented_fastx(kevlar.open(readfile, 'r'))
partstream = kevlar.parse_partitioned_reads(readstream)
with NamedTemporaryFile(suffix='.nt') as maskfile:
calls = list(
kevlar.alac.alac(partstream, refrfile, maskfile=maskfile.name,
maskmem=1e6)
)
assert len(calls) == 5
for c in calls:
print(c.vcf)
testfilename = data_file('fiveparts-genmask.nodetable')
assert filecmp.cmp(testfilename, maskfile.name) is True
def test_alac_generate_mask_lowmem(capsys):
readfile = data_file('fiveparts.augfastq.gz')
refrfile = data_file('fiveparts-refr.fa.gz')
readstream = kevlar.parse_augmented_fastx(kevlar.open(readfile, 'r'))
partstream = kevlar.parse_partitioned_reads(readstream)
with NamedTemporaryFile(suffix='.nt') as maskfile:
calls = list(
kevlar.alac.alac(partstream, refrfile, maskfile=maskfile.name,
maskmem=100)
)
assert len(calls) == 5
out, err = capsys.readouterr()
message = 'WARNING: mask FPR is 0.8065; exceeds user-specified limit'
assert message in out or message in err
def test_alac_matedist():
readfile = data_file('mate-dist/cc130.augfastq.gz')
refrfile = data_file('mate-dist/cc130.refr.fa.gz')
readstream = kevlar.parse_augmented_fastx(kevlar.open(readfile, 'r'))
partstream = kevlar.parse_partitioned_reads(readstream)
caller = kevlar.alac.alac(partstream, refrfile, ksize=31, delta=50,
seedsize=51)
calls = list(caller)
assert len(calls) == 3
passed = [c for c in calls if c.filterstr == 'PASS']
assert len(passed) == 3
assert sorted([c.position for c in passed]) == [1475, 115377, 127540]
def test_alac_nomates():
readfile = data_file('mate-dist/cc130.nomates.augfastq.gz')
refrfile = data_file('mate-dist/cc130.refr.fa.gz')
readstream = kevlar.parse_augmented_fastx(kevlar.open(readfile, 'r'))
partstream = kevlar.parse_partitioned_reads(readstream)
caller = kevlar.alac.alac(partstream, refrfile, ksize=31, delta=50,
seedsize=51)
calls = list(caller)
assert len(calls) == 3
passed = [c for c in calls if c.filterstr == 'PASS']
assert len(passed) == 3
coords = set([c.position for c in passed])
assert coords == set([1476 - 1, 115378 - 1, 127541 - 1])
@pytest.mark.parametrize('vcfposition,X,cigar', [
(40692, 10000, '32713D96M6I91M15142D'),
(40692, 1000, '50D96M6I91M50D'),
(40692, 0, '32713D96M6I91M140025D'),
(40692, None, '50D96M6I91M50D'),
])
def test_alac_maxdiff(vcfposition, X, cigar):
pstream = kevlar.parse_partitioned_reads(
kevlar.parse_augmented_fastx(
kevlar.open(data_file('maxdiff-reads.augfastq.gz'), 'r')
)
)
refrfile = data_file('maxdiff-refr.fa.gz')
caller = kevlar.alac.alac(
pstream, refrfile, ksize=31, delta=50, seedsize=51, maxdiff=X
)
calls = list(caller)
assert len(calls) == 1
assert calls[0].cigar == cigar
assert calls[0].position == vcfposition - 1
| |
#
# tashiSSH.py - Implements the Tango VMMS interface.
#
# This implementation uses Tashi to manage the virtual machines and
# ssh and scp to access them. The following excecption are raised back
# to the caller:
#
# TashiException - Tashi raises this if it encounters any problem
# tashiCallError - raised by tashiCall() function
#
# TODO: this currently probably does not work on Python 3 yet
import random
import subprocess
import os
import re
import time
import logging
import threading
import os
import sys
import config
from tashi.rpycservices.rpyctypes import *
from tashi.util import getConfig, createClient
from tangoObjects import *
def timeout(command, time_out=1):
"""timeout - Run a unix command with a timeout. Return -1 on
timeout, otherwise return the return value from the command, which
is typically 0 for success, 1-255 for failure.
"""
# Launch the command
p = subprocess.Popen(
command, stdout=open("/dev/null", "w"), stderr=subprocess.STDOUT
)
# Wait for the command to complete
t = 0.0
while t < time_out and p.poll() is None:
time.sleep(config.Config.TIMER_POLL_INTERVAL)
t += config.Config.TIMER_POLL_INTERVAL
# Determine why the while loop terminated
if p.poll() is None:
try:
os.kill(p.pid, 9)
except OSError:
pass
returncode = -1
else:
returncode = p.poll()
return returncode
def timeoutWithReturnStatus(command, time_out, returnValue=0):
"""timeoutWithReturnStatus - Run a Unix command with a timeout,
until the expected value is returned by the command; On timeout,
return last error code obtained from the command.
"""
if (config.Config.LOGLEVEL is logging.DEBUG) and (
"ssh" in command or "scp" in command
):
out = sys.stdout
err = sys.stderr
else:
out = open("/dev/null", "w")
err = sys.stdout
# Launch the command
p = subprocess.Popen(
command, stdout=open("/dev/null", "w"), stderr=subprocess.STDOUT
)
t = 0.0
while t < time_out:
ret = p.poll()
if ret is None:
time.sleep(config.Config.TIMER_POLL_INTERVAL)
t += config.Config.TIMER_POLL_INTERVAL
elif ret == returnValue:
return ret
else:
p = subprocess.Popen(
command, stdout=open("/dev/null", "w"), stderr=subprocess.STDOUT
)
return ret
#
# User defined exceptions
#
# tashiCall() exception
class tashiCallError(Exception):
pass
class TashiSSH(object):
_SSH_FLAGS = [
"-q",
"-i",
os.path.dirname(__file__) + "/id_rsa",
"-o",
"StrictHostKeyChecking=no",
"-o",
"GSSAPIAuthentication=no",
]
TASHI_IMAGE_PATH = "/raid/tashi/images"
def __init__(self):
self.config = getConfig(["Client"])[0]
self.client = createClient(self.config)
self.log = logging.getLogger("TashiSSH")
#
# VMMS helper functions
#
def tashiCall(self, function, args):
"""tashiCall - call Tashi function"""
fun = getattr(self.client, function, None)
if fun is None:
raise tashiCallError("No function %s" % function)
return fun(*args)
def instanceName(self, id, name):
"""instanceName - Construct a VM instance name. Always use
this function when you need a VM instance name. Never generate
instance names manually.
"""
return "%s-%s-%s" % (config.Config.PREFIX, id, name)
def domainName(self, id, name):
"""Construct a VM domain name. Always use this function when
you need a domain name for an instance. Never generate them
manually.
"""
return "%s.vmnet" % (self.instanceName(id, name))
def tangoMachineToInstance(self, vm):
"""tangoMachineToInstance - convert a tango machine to a
Tashi instance.
"""
instance = Instance()
instance.cores = vm.cores
instance.memory = vm.memory
instance.disks = [DiskConfiguration(d={"uri": vm.image, "persistent": False})]
instance.name = self.instanceName(vm.id, vm.name)
instance.userId = 42 # ???
# This VMMS requires a network card to use SSH, so we put one on
# regardless of what the user asked for.
mac = "52:54:00:%2.2x:%2.2x:%2.2x" % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
)
instance.nics = [NetworkConfiguration(d={"mac": mac, "network": 1})]
firewall = FirewallConfiguration()
if vm.network and vm.network.firewall:
if vm.network.firewall.allow:
for a in vm.network.firewall.allow:
firewall.allow.append(
PortConfiguration(d={"protocol": a.protocol, "port": a.port})
)
if vm.network.firewall.deny:
for d in vm.network.firewall.deny:
firewall.allow.append(
PortConfiguration(d={"protocol": d.protocol, "port": d.port})
)
if vm.network.firewall.forward:
for f in vm.network.firewall.forward:
firewall.allow.append(
PortConfiguration(d={"protocol": f.protocol, "port": f.port})
)
instance.firewall = firewall
if vm.disk:
# TODO: do we even need this?
pass
if vm.resume:
instance.hints = {"resume_source": vm.image + ".suspend"}
else:
instance.hints = {}
return instance
#
# VMMS API functions
#
def initializeVM(self, vm):
"""initializeVM - Ask Tashi to create a new VM instance"""
# Create the instance
instance = self.tangoMachineToInstance(vm)
tashiInst = self.tashiCall("createVm", [instance])
vm.instance_id = tashiInst.id
return tashiInst
def waitVM(self, vm, max_secs):
"""waitVM - Wait at most max_secs for a VM to become
ready. Return error if it takes too long.
"""
domain_name = self.domainName(vm.id, vm.name)
instance_name = self.instanceName(vm.id, vm.name)
# First, wait for ping to the vm instance to work
instance_down = 1
start_time = time.time()
while instance_down:
instance_down = subprocess.call(
"ping -c 1 %s" % (domain_name),
shell=True,
stdout=open("/dev/null", "w"),
stderr=subprocess.STDOUT,
)
# Wait a bit and then try again if we haven't exceeded
# timeout
if instance_down:
time.sleep(config.Config.TIMER_POLL_INTERVAL)
elapsed_secs = time.time() - start_time
if elapsed_secs > max_secs:
return -1
# The ping worked, so now wait for SSH to work before
# declaring that the VM is ready
self.log.debug("VM %s: ping completed" % (domain_name))
while True:
elapsed_secs = time.time() - start_time
# Give up if the elapsed time exceeds the allowable time
if elapsed_secs > max_secs:
self.log.info(
"VM %s: SSH timeout after %d secs" % (domain_name, elapsed_secs)
)
return -1
# If the call to ssh returns timeout (-1) or ssh error
# (255), then success. Otherwise, keep trying until we run
# out of time.
ret = timeout(
["ssh"] + TashiSSH._SSH_FLAGS + ["autolab@%s" % (domain_name), "(:)"],
max_secs - elapsed_secs,
)
self.log.debug("VM %s: ssh returned with %d" % (instance_name, ret))
if (ret != -1) and (ret != 255):
return 0
# Sleep a bit before trying again
time.sleep(config.Config.TIMER_POLL_INTERVAL)
def copyIn(self, vm, inputFiles):
"""copyIn - Copy input files to VM"""
domain_name = self.domainName(vm.id, vm.name)
self.log.debug("Creating autolab directory on VM")
# Create a fresh input directory
ret = subprocess.call(
["ssh"]
+ TashiSSH._SSH_FLAGS
+ ["autolab@%s" % (domain_name), "(rm -rf autolab; mkdir autolab)"]
)
self.log.debug("Autolab directory created on VM")
# Copy the input files to the input directory
for file in inputFiles:
self.log.debug("Copying file %s to VM %s" % (file.localFile, domain_name))
ret = timeout(
["scp", "-vvv"]
+ TashiSSH._SSH_FLAGS
+ [
file.localFile,
"autolab@%s:autolab/%s" % (domain_name, file.destFile),
],
config.Config.COPYIN_TIMEOUT,
)
if ret == 0:
self.log.debug(
"Success: copied file %s to VM %s with status %s"
% (file.localFile, domain_name, str(ret))
)
else:
self.log.debug(
"Error: failed to copy file %s to VM %s with status %s"
% (file.localFile, domain_name, str(ret))
)
return ret
return 0
def runJob(self, vm, runTimeout, maxOutputFileSize):
"""runJob - Run the make command on a VM using SSH and
redirect output to file "output".
"""
domain_name = self.domainName(vm.id, vm.name)
self.log.debug("runJob: Running job on VM %s" % domain_name)
# Setting ulimits for VM and running job
runcmd = (
"/usr/bin/time --output=time.out autodriver -u %d -f %d -t \
%d -o %d autolab > output 2>&1 "
% (
config.Config.VM_ULIMIT_USER_PROC,
config.Config.VM_ULIMIT_FILE_SIZE,
runTimeout,
config.Config.MAX_OUTPUT_FILE_SIZE,
)
)
ret = timeout(
["ssh", "-vvv"]
+ TashiSSH._SSH_FLAGS
+ ["autolab@%s" % (domain_name), runcmd],
runTimeout * 2,
)
# runTimeout * 2 is a temporary hack. The driver will handle the timout
return ret
def copyOut(self, vm, destFile):
"""copyOut - Copy the file output on the VM to the file
outputFile on the Tango host.
"""
domain_name = self.domainName(vm.id, vm.name)
# Optionally log finer grained runtime info. Adds about 1 sec
# to the job latency, so we typically skip this.
if config.Config.LOG_TIMING:
try:
# regular expression matcher for error message from cat
no_file = re.compile("No such file or directory")
time_info = (
subprocess.check_output(
["ssh"]
+ TashiSSH._SSH_FLAGS
+ ["autolab@%s" % (domain_name), "cat time.out"]
)
.decode("utf-8")
.rstrip("\n")
)
# If the output is empty, then ignore it (timing info wasn't
# collected), otherwise let's log it!
if no_file.match(time_info):
# runJob didn't produce an output file
pass
else:
# remove newline character printed in timing info
# replaces first '\n' character with a space
time_info = re.sub("\n", " ", time_info, count=1)
self.log.info("Timing (%s): %s" % (domain_name, time_info))
except subprocess.CalledProcessError as xxx_todo_changeme:
# Error copying out the timing data (probably runJob failed)
re.error = xxx_todo_changeme
# Error copying out the timing data (probably runJob failed)
pass
ret = timeout(
["scp", "-vvv"]
+ TashiSSH._SSH_FLAGS
+ ["autolab@%s:output" % (domain_name), destFile],
config.Config.COPYOUT_TIMEOUT,
)
return ret
def destroyVM(self, vm):
"""destroyVM - Removes a VM from the system"""
ret = self.tashiCall("destroyVm", [vm.instance_id])
return ret
def safeDestroyVM(self, vm):
"""safeDestroyVM - More robust version of destroyVM.
Make sure a VM has a valid instance_id. Make sure a VM exists
before asking Tashi to destroy it. Make sure that Tashi has
really killed it before returning to the caller. We still keep
the original destroyVM because we don't want n^2 calls to
existsVM().
"""
self.instance_name = self.instanceName(vm.id, vm.name)
if self.existsVM(vm):
self.log.debug("Destroying VM %s" % (self.instance_name))
if vm.instance_id is not None:
self.destroyVM(vm)
self.secs = 0
# Give Tashi time to delete the instance
while self.existsVM(vm) and self.secs < config.Config.DESTROY_SECS:
self.secs += 1
time.sleep(1)
# Something is really screwy, give up and log the event
if self.secs >= config.Config.DESTROY_SECS:
self.log.error("Tashi never destroyed VM %s" % (self.instance_name))
# The instance exist to Tashi but Tango has no instance
# ID. If we were really ambitious we would use getVMs to
# determine the instance_id. For now, we give up.
else:
self.log.error(
"VM %s exists but has no instance_id" % (self.instance_name)
)
# This is the case where Tango thinks there is an instance but for
# some reason it has vanished from Tashi
else:
self.log.debug("VM %s vanished" % self.instance_name)
def getVMs(self):
"""getVMs - Returns the complete list of VMs on this machine. Each
list entry is a TangoMachine.
"""
# Get the list of Tashi instances
instances = self.client.getInstances()
# Convert it to a list of TangoMachines
machines = []
for instance in instances:
machine = TangoMachine()
machine.id = instance.id
machine.instance_id = instance.id
machine.name = instance.name
machine.cores = instance.cores
machine.memory = instance.memory
machine.image = instance.disks[0].uri
machine.vmms = "tashiSSH"
machines.append(machine)
return machines
def existsVM(self, vm):
"""existsVM - Checks whether a VM exists in the vmms."""
instances = self.client.getInstances()
for instance in instances:
if vm.instance_id == instance.id:
return True
return False
def getImages(self):
"""getImages - Lists all images in TASHI_IMAGE_PATH that have the
.img extension
"""
return [
img for img in os.listdir(Config.TASHI_IMAGE_PATH) if img.endswith(".img")
]
| |
import datetime
from StringIO import StringIO
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import force_unicode
from django.core.serializers import json as djangojson
from tastypie.bundle import Bundle
from tastypie.exceptions import UnsupportedFormat
from tastypie.utils import format_datetime, format_date, format_time
try:
import lxml
from lxml.etree import parse as parse_xml
from lxml.etree import Element, tostring
except ImportError:
lxml = None
try:
import yaml
from django.core.serializers import pyyaml
except ImportError:
yaml = None
try:
import biplist
except ImportError:
biplist = None
import json
# Ugh & blah.
# So doing a regular dump is generally fine, since Tastypie doesn't usually
# serialize advanced types. *HOWEVER*, it will dump out Python Unicode strings
# as a custom YAML tag, which of course ``yaml.safe_load`` can't handle.
if yaml is not None:
from yaml.constructor import SafeConstructor
from yaml.loader import Reader, Scanner, Parser, Composer, Resolver
class TastypieConstructor(SafeConstructor):
def construct_yaml_unicode_dammit(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
TastypieConstructor.add_constructor(u'tag:yaml.org,2002:python/unicode', TastypieConstructor.construct_yaml_unicode_dammit)
class TastypieLoader(Reader, Scanner, Parser, Composer, TastypieConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
TastypieConstructor.__init__(self)
Resolver.__init__(self)
class Serializer(object):
"""
A swappable class for serialization.
This handles most types of data as well as the following output formats::
* json
* jsonp
* xml
* yaml
* html
* plist (see http://explorapp.com/biplist/)
It was designed to make changing behavior easy, either by overridding the
various format methods (i.e. ``to_json``), by changing the
``formats/content_types`` options or by altering the other hook methods.
"""
formats = ['json', 'jsonp', 'xml', 'yaml', 'html', 'plist']
content_types = {
'json': 'application/json',
'jsonp': 'text/javascript',
'xml': 'application/xml',
'yaml': 'text/yaml',
'html': 'text/html',
'plist': 'application/x-plist',
}
def __init__(self, formats=None, content_types=None, datetime_formatting=None):
self.supported_formats = []
self.datetime_formatting = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601')
if formats is not None:
self.formats = formats
if content_types is not None:
self.content_types = content_types
if datetime_formatting is not None:
self.datetime_formatting = datetime_formatting
for format in self.formats:
try:
self.supported_formats.append(self.content_types[format])
except KeyError:
raise ImproperlyConfigured("Content type for specified type '%s' not found. Please provide it at either the class level or via the arguments." % format)
def get_mime_for_format(self, format):
"""
Given a format, attempts to determine the correct MIME type.
If not available on the current ``Serializer``, returns
``application/json`` by default.
"""
try:
return self.content_types[format]
except KeyError:
return 'application/json'
def format_datetime(self, data):
"""
A hook to control how datetimes are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16T03:02:14".
"""
if self.datetime_formatting == 'rfc-2822':
return format_datetime(data)
return data.isoformat()
def format_date(self, data):
"""
A hook to control how dates are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16".
"""
if self.datetime_formatting == 'rfc-2822':
return format_date(data)
return data.isoformat()
def format_time(self, data):
"""
A hook to control how times are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "03:02:14".
"""
if self.datetime_formatting == 'rfc-2822':
return format_time(data)
return data.isoformat()
def serialize(self, bundle, format='application/json', options={}):
"""
Given some data and a format, calls the correct method to serialize
the data and returns the result.
"""
desired_format = None
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "to_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available serialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
serialized = getattr(self, "to_%s" % desired_format)(bundle, options)
return serialized
def deserialize(self, content, format='application/json'):
"""
Given some data and a format, calls the correct method to deserialize
the data and returns the result.
"""
desired_format = None
format = format.split(';')[0]
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "from_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available deserialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
deserialized = getattr(self, "from_%s" % desired_format)(content)
return deserialized
def to_simple(self, data, options):
"""
For a piece of data, attempts to recognize it and provide a simplified
form of something complex.
This brings complex Python data structures down to native types of the
serialization format(s).
"""
if isinstance(data, (list, tuple)):
return [self.to_simple(item, options) for item in data]
if isinstance(data, dict):
return dict((key, self.to_simple(val, options)) for (key, val) in data.iteritems())
elif isinstance(data, Bundle):
return dict((key, self.to_simple(val, options)) for (key, val) in data.data.iteritems())
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_simple(data.fk_resource, options)
else:
return self.to_simple(data.value, options)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
return [self.to_simple(bundle, options) for bundle in data.m2m_bundles]
else:
return [self.to_simple(val, options) for val in data.value]
else:
return self.to_simple(data.value, options)
elif isinstance(data, datetime.datetime):
return self.format_datetime(data)
elif isinstance(data, datetime.date):
return self.format_date(data)
elif isinstance(data, datetime.time):
return self.format_time(data)
elif isinstance(data, bool):
return data
elif type(data) in (long, int, float):
return data
elif data is None:
return None
else:
return force_unicode(data)
def to_etree(self, data, options=None, name=None, depth=0):
"""
Given some data, converts that data to an ``etree.Element`` suitable
for use in the XML output.
"""
if isinstance(data, (list, tuple)):
element = Element(name or 'objects')
if name:
element = Element(name)
element.set('type', 'list')
else:
element = Element('objects')
for item in data:
element.append(self.to_etree(item, options, depth=depth+1))
elif isinstance(data, dict):
if depth == 0:
element = Element(name or 'response')
else:
element = Element(name or 'object')
element.set('type', 'hash')
for (key, value) in data.iteritems():
element.append(self.to_etree(value, options, name=key, depth=depth+1))
elif isinstance(data, Bundle):
element = Element(name or 'object')
for field_name, field_object in data.data.items():
element.append(self.to_etree(field_object, options, name=field_name, depth=depth+1))
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_etree(data.fk_resource, options, name, depth+1)
else:
return self.to_etree(data.value, options, name, depth+1)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
element = Element(name or 'objects')
for bundle in data.m2m_bundles:
element.append(self.to_etree(bundle, options, bundle.resource_name, depth+1))
else:
element = Element(name or 'objects')
for value in data.value:
element.append(self.to_etree(value, options, name, depth=depth+1))
else:
return self.to_etree(data.value, options, name)
else:
element = Element(name or 'value')
simple_data = self.to_simple(data, options)
data_type = get_type_string(simple_data)
if data_type != 'string':
element.set('type', get_type_string(simple_data))
if data_type != 'null':
element.text = force_unicode(simple_data)
return element
def from_etree(self, data):
"""
Not the smartest deserializer on the planet. At the request level,
it first tries to output the deserialized subelement called "object"
or "objects" and falls back to deserializing based on hinted types in
the XML element attribute "type".
"""
if data.tag == 'request':
# if "object" or "objects" exists, return deserialized forms.
elements = data.getchildren()
for element in elements:
if element.tag in ('object', 'objects'):
return self.from_etree(element)
return dict((element.tag, self.from_etree(element)) for element in elements)
elif data.tag == 'object' or data.get('type') == 'hash':
return dict((element.tag, self.from_etree(element)) for element in data.getchildren())
elif data.tag == 'objects' or data.get('type') == 'list':
return [self.from_etree(element) for element in data.getchildren()]
else:
type_string = data.get('type')
if type_string in ('string', None):
return data.text
elif type_string == 'integer':
return int(data.text)
elif type_string == 'float':
return float(data.text)
elif type_string == 'boolean':
if data.text == 'True':
return True
else:
return False
else:
return None
def to_json(self, data, options=None):
"""
Given some Python data, produces JSON output.
"""
options = options or {}
data = self.to_simple(data, options)
if django.get_version() >= '1.5':
return djangojson.json.dumps(data, cls=djangojson.DjangoJSONEncoder, sort_keys=True, ensure_ascii=False)
else:
return json.dumps(data, cls=djangojson.DjangoJSONEncoder, sort_keys=True, ensure_ascii=False)
def from_json(self, content):
"""
Given some JSON data, returns a Python dictionary of the decoded data.
"""
try:
return json.loads(content)
except ValueError:
raise BadRequest
def to_jsonp(self, data, options=None):
"""
Given some Python data, produces JSON output wrapped in the provided
callback.
"""
options = options or {}
return '%s(%s)' % (options['callback'], self.to_json(data, options))
def to_xml(self, data, options=None):
"""
Given some Python data, produces XML output.
"""
options = options or {}
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml.")
return tostring(self.to_etree(data, options), xml_declaration=True, encoding='utf-8')
def from_xml(self, content):
"""
Given some XML data, returns a Python dictionary of the decoded data.
"""
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml.")
return self.from_etree(parse_xml(StringIO(content)).getroot())
def to_yaml(self, data, options=None):
"""
Given some Python data, produces YAML output.
"""
options = options or {}
if yaml is None:
raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.")
return yaml.dump(self.to_simple(data, options))
def from_yaml(self, content):
"""
Given some YAML data, returns a Python dictionary of the decoded data.
"""
if yaml is None:
raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.")
return yaml.load(content, Loader=TastypieLoader)
def to_plist(self, data, options=None):
"""
Given some Python data, produces binary plist output.
"""
options = options or {}
if biplist is None:
raise ImproperlyConfigured("Usage of the plist aspects requires biplist.")
return biplist.writePlistToString(self.to_simple(data, options))
def from_plist(self, content):
"""
Given some binary plist data, returns a Python dictionary of the decoded data.
"""
if biplist is None:
raise ImproperlyConfigured("Usage of the plist aspects requires biplist.")
return biplist.readPlistFromString(content)
def to_html(self, data, options=None):
"""
Reserved for future usage.
The desire is to provide HTML output of a resource, making an API
available to a browser. This is on the TODO list but not currently
implemented.
"""
options = options or {}
return 'Sorry, not implemented yet. Please append "?format=json" to your URL.'
def from_html(self, content):
"""
Reserved for future usage.
The desire is to handle form-based (maybe Javascript?) input, making an
API available to a browser. This is on the TODO list but not currently
implemented.
"""
pass
def get_type_string(data):
"""
Translates a Python data type into a string format.
"""
data_type = type(data)
if data_type in (int, long):
return 'integer'
elif data_type == float:
return 'float'
elif data_type == bool:
return 'boolean'
elif data_type in (list, tuple):
return 'list'
elif data_type == dict:
return 'hash'
elif data is None:
return 'null'
elif isinstance(data, basestring):
return 'string'
| |
import tensorflow as tf
import numpy as np
import common
import matplotlib.pyplot as plt
from collections import OrderedDict
class ForwardModel(object):
def __init__(self, state_size, action_size, rho=0.05, beta=0.3, encoding_size=50, batch_size=50, multi_layered_encoder=True, num_steps=1,
separate_encoders=True, merger=tf.mul, activation=tf.sigmoid, dropout_keep=0.5, lstm=False):
self.state_size = state_size
self.action_size = action_size
self.multi_layered_encoder = multi_layered_encoder
self.separate_encoders = separate_encoders
self.merger = merger
self.num_steps = num_steps
self.activation = activation
self.dropout_keep = dropout_keep
self.lstm = lstm
self.arch_params = {
'input_dim': lstm and (state_size + action_size) or (state_size + action_size),
'encoding_dim': encoding_size,
'small_encoding_dim': 5,
'output_dim': state_size
}
self.sparsity_params = {
'rho': tf.constant(rho),
'beta': tf.constant(beta)
}
self.training_params = {
'lr': 1e-4,
'batch_size': batch_size
}
# set all the necessary weights and biases according to the forward model structure
self.weights = OrderedDict()
self.weights.update(self.gru_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], "gru1"))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'decoder1'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['output_dim'], 'decoder2'))
# self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['small_encoding_dim']*self.arch_params['output_dim'], 'multiheaded1'))
# self.weights.update(self.tensor_linear_variables(self.arch_params['small_encoding_dim'],
# self.arch_params['output_dim'], 1, 'multiheaded2'))
self.weights.update(self.linear_variables(state_size, self.arch_params['encoding_dim'], 'encoder1_state'))
self.weights.update(self.linear_variables(action_size, self.arch_params['encoding_dim'], 'encoder1_action'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'encoder2_state'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'encoder2_action'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'encoder3'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'encoder4'))
#self.weights.update(self.bn_variables([1, self.arch_params['encoding_dim']], 'bn1'))
#self.weights.update(self.bn_variables([1, self.arch_params['encoding_dim']], 'bn2'))
self.states_normalizer = []
self.actions_normalizer = []
self.states_min = []
def gru_variables(self, hidden_size, input_size, name):
weights = OrderedDict()
weights[name+'_Wxr'] = self.weight_variable([input_size, hidden_size])
weights[name+'_Wxz'] = self.weight_variable([input_size, hidden_size])
weights[name+'_Wxh'] = self.weight_variable([input_size, hidden_size])
weights[name+'_Whr'] = self.weight_variable([hidden_size, hidden_size])
weights[name+'_Whz'] = self.weight_variable([hidden_size, hidden_size])
weights[name+'_Whh'] = self.weight_variable([hidden_size, hidden_size])
weights[name+'_br'] = self.bias_variable([1, hidden_size])
weights[name+'_bz'] = self.bias_variable([1, hidden_size])
weights[name+'_bh'] = self.bias_variable([1, hidden_size])
return weights
def bn_variables(self, size, name):
weights = OrderedDict()
weights[name+'_mean'] = tf.Variable(tf.constant(0.0, shape=size))
weights[name +'_variance'] = tf.Variable(tf.constant(1.0, shape=size))
weights[name + '_offset'] = tf.Variable(tf.constant(0.0, shape=size))
weights[name + '_scale'] = tf.Variable(tf.constant(1.0, shape=size))
return weights
def tensor_linear_variables(self, input_width, input_depth, output_width, name):
weights = OrderedDict()
self.weights[name+'_weights'] = self.weight_variable([input_depth, input_width, output_width])
self.weights[name+'_biases'] = self.bias_variable([input_depth, 1, output_width])
return weights
def linear_variables(self, input_size, output_size, name):
weights = OrderedDict()
self.weights[name+'_weights'] = self.weight_variable([input_size, output_size])
self.weights[name+'_biases'] = self.bias_variable([1, output_size])
return weights
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def kl_divergence(self, p, p_hat):
# returns KL(p||p_hat) - the kl divergence between two Bernoulli probability vectors p and p_hat
return p*(tf.log(p) - tf.log(p_hat)) + (1-p)*(tf.log(1-p)-tf.log(1-p_hat))
def gru_layer(self, input, hidden, weights, name):
x, h_ = input, hidden
r = tf.sigmoid(tf.matmul(x, weights[name+'_Wxr']) + tf.matmul(h_, weights[name+'_Whr']) + weights[name+'_br'])
z = tf.sigmoid(tf.matmul(x, weights[name+'_Wxz']) + tf.matmul(h_, weights[name+'_Whz']) + weights[name+'_bz'])
h_hat = tf.tanh(
tf.matmul(x, weights[name+'_Wxh']) + tf.matmul(tf.mul(r, h_), weights[name+'_Whh']) + weights[name+'_bh'])
output = tf.mul((1 - z), h_hat) + tf.mul(z, h_)
return output
def encode(self, input):
state = tf.cast(input[0], tf.float32)
action = tf.cast(input[1], tf.float32)
gru_state = tf.cast(input[2], tf.float32)
# returns an encoder
state_embedder1 = tf.nn.relu(tf.matmul(state, self.weights["encoder1_state_weights"]) + self.weights["encoder1_state_biases"])
gru_state = self.gru_layer(state_embedder1, gru_state, self.weights, 'gru1')
state_embedder2 = tf.sigmoid(tf.matmul(gru_state, self.weights["encoder2_state_weights"]) + self.weights["encoder2_state_biases"])
action_embedder1 = tf.nn.relu(tf.matmul(action, self.weights["encoder1_action_weights"]) + self.weights["encoder1_action_biases"])
action_embedder2 = tf.sigmoid(tf.matmul(action_embedder1, self.weights["encoder2_action_weights"]) + self.weights["encoder2_action_biases"])
output = self.merger(state_embedder2, action_embedder2)
hidden = tf.matmul(output, self.weights["encoder3_weights"]) + self.weights["encoder3_biases"]
#bn = tf.nn.batch_normalization(hidden, mean=self.weights["bn1_mean"], variance=self.weights["bn1_variance"],
# offset=self.weights["bn1_offset"], scale=self.weights["bn1_scale"], variance_epsilon=0.001)
hidden_relu = tf.nn.relu(hidden)
output = tf.nn.relu(tf.matmul(hidden_relu, self.weights["encoder4_weights"]) + self.weights["encoder4_biases"])
gru_state = tf.cast(gru_state, tf.float32)
return output, gru_state
def decode(self, input):
# returns a decoder
hidden = tf.matmul(input, self.weights["decoder1_weights"]) + self.weights["decoder1_biases"]
hidden_relu = tf.nn.relu(hidden)
# output is encoding_size x 1 x small_encoding_size
# multiheaded_hidden = tf.matmul(input, self.weights["multiheaded1_weights"]) + self.weights["multiheaded1_biases"]
# multiheaded_hidden = tf.reshape(multiheaded_hidden, [-1, self.arch_params['output_dim'], 1, self.arch_params['small_encoding_dim']])
# multiheaded_hidden = tf.nn.relu(multiheaded_hidden)
#
# h = tf.scan(lambda a,x: tf.batch_matmul(x, self.weights["multiheaded2_weights"]), multiheaded_hidden,
# initializer=tf.Variable(tf.constant(0.0, shape=[self.arch_params['output_dim'],1,1])))
# multiheaded_output = h + self.weights["multiheaded2_biases"]
# output1 = tf.reshape(multiheaded_output, [-1, self.arch_params['output_dim']])
output1 = tf.matmul(hidden_relu, self.weights["decoder2_weights"]) + self.weights["decoder2_biases"]
output = output1
return output
def forward(self, input):
print("Forward Model Advanced")
# run a forward pass
encoding, gru_state = self.encode(input)
output = self.decode(encoding)
sparsity_loss = []
#sparsity_loss = tf.reduce_sum(self.kl_divergence(self.sparsity_params['rho'], encoding))
return output, sparsity_loss, gru_state
def calculate_loss(self, output, target, sparsity_loss):
target = tf.cast(target, dtype=tf.float32)
l2_loss = tf.nn.l2_loss(self.states_normalizer*(output - target)) / float(self.training_params['batch_size'])
return l2_loss, l2_loss + self.sparsity_params['beta']*sparsity_loss
def get_model(self):
input_state = tf.placeholder(tf.float32, shape=[None, self.state_size], name='input_state')
input_action = tf.placeholder(tf.float32, shape=[None, self.action_size], name='input_action')
gru_state = tf.placeholder(tf.float32, shape=[None, self.arch_params['encoding_dim']], name='gru_state')
input = [input_state, input_action, gru_state]
output, sparsity_loss, new_gru_state = self.forward(input)
target = tf.placeholder(tf.float32, shape=[None, self.arch_params['output_dim']], name='target')
l2_loss, loss = self.calculate_loss(output, target, sparsity_loss)
return input, target, output, loss, l2_loss, new_gru_state
def backward(self, loss):
# create an optimizer
opt = tf.train.AdamOptimizer(learning_rate=self.training_params['lr'])
# compute the gradients for a list of variables
grads_and_vars = opt.compute_gradients(loss=loss, var_list=self.weights.values())
mean_abs_grad, mean_abs_w = common.compute_mean_abs_norm(grads_and_vars)
# apply the gradient
apply_grads = opt.apply_gradients(grads_and_vars)
return apply_grads, mean_abs_grad, mean_abs_w
def train(self, objective):
self.loss = objective
self.minimize, self.mean_abs_grad, self.mean_abs_w = self.backward(self.loss)
self.loss_summary = tf.scalar_summary('loss_t', objective)
def pretrain(self, opt, lr, batch_size, num_iterations, expert_er_path):
er_expert = common.load_er(
fname=expert_er_path,
batch_size=batch_size,
history_length=100,
traj_length=2)
self.states_normalizer = er_expert.states_max - er_expert.states_min
self.actions_normalizer = er_expert.actions_max - er_expert.actions_min
#self.states_normalizer = er_expert.states_std
#self.actions_normalizer = er_expert.actions_std
self.states_normalizer[self.states_normalizer < 0.0001] = 1
self.actions_normalizer[self.actions_normalizer < 0.0001] = 1
self.states_min=er_expert.states_min
# get placeholders
input_ph, target_ph, output, loss, l2_loss, new_gru_state = self.get_model()
train_op = opt(learning_rate=lr).minimize(l2_loss)
train_losses = []
last_train_losses = []
with tf.Session() as sess:
tf.initialize_all_variables().run()
for i in range(num_iterations):
fetches = [train_op, output, l2_loss, loss, new_gru_state]
# get a trajectory from the train / test set and preprocess it
trajectory = er_expert.sample_trajectory(self.num_steps + 1)
trajectory_states = (trajectory[0] - er_expert.states_min) / self.states_normalizer
trajectory_actions = (trajectory[1] - er_expert.actions_min) / self.actions_normalizer
# set inputs and targets
s = np.ones((1, self.arch_params['encoding_dim']))
#o = trajectory_states[:batch_size, 0, :]
target = []
for step in range(self.num_steps):
input = [trajectory_states[:batch_size, step, :], trajectory_actions[:batch_size, step, :]]
target = np.squeeze(trajectory_states[:batch_size, step+1, :])
_, o, l2, l, s = sess.run(fetches,
feed_dict={input_ph[0]: input[0], input_ph[1]: input[1], input_ph[2]: s, target_ph: target})
if i % 50 == 0:
print("iteration " + str(i) + " l2 loss = " + str(l2))
print((self.states_normalizer*o[0])[:30])
print((self.states_normalizer*target[0])[:30])
print("***********************")
last_train_losses += [l2]
if len(last_train_losses) > 50:
del last_train_losses[0]
train_losses += [sum(last_train_losses) / float(len(last_train_losses))]
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from keystoneauth1 import loading as kaloading
from ironicclient import client as iroclient
from ironicclient.common import filecache
from ironicclient.common import http
from ironicclient import exc
from ironicclient.tests.unit import utils
from ironicclient.v1 import client as v1
class ClientTest(utils.BaseTestCase):
def test_get_client_with_auth_token_ironic_url(self):
kwargs = {
'ironic_url': 'http://ironic.example.org:6385/',
'os_auth_token': 'USER_AUTH_TOKEN',
}
client = iroclient.get_client('1', **kwargs)
self.assertEqual('USER_AUTH_TOKEN', client.http_client.auth_token)
self.assertEqual('http://ironic.example.org:6385/',
client.http_client.endpoint)
@mock.patch.object(filecache, 'retrieve_data', autospec=True)
@mock.patch.object(kaloading.session, 'Session', autospec=True)
@mock.patch.object(kaloading, 'get_plugin_loader', autospec=True)
def _test_get_client(self, mock_ks_loader, mock_ks_session,
mock_retrieve_data, version=None,
auth='password', **kwargs):
session = mock_ks_session.return_value.load_from_options.return_value
session.get_endpoint.return_value = 'http://localhost:6385/v1/f14b4123'
mock_ks_loader.return_value.load_from_options.return_value = 'auth'
mock_retrieve_data.return_value = version
client = iroclient.get_client('1', **kwargs)
mock_ks_loader.assert_called_once_with(auth)
mock_ks_session.return_value.load_from_options.assert_called_once_with(
auth='auth', timeout=kwargs.get('timeout'),
insecure=kwargs.get('insecure'), cert=kwargs.get('cert'),
cacert=kwargs.get('cacert'), key=kwargs.get('key'))
session.get_endpoint.assert_called_once_with(
service_type=kwargs.get('os_service_type') or 'baremetal',
interface=kwargs.get('os_endpoint_type') or 'publicURL',
region_name=kwargs.get('os_region_name'))
if 'os_ironic_api_version' in kwargs:
self.assertEqual(0, mock_retrieve_data.call_count)
else:
mock_retrieve_data.assert_called_once_with(
host='localhost',
port='6385')
self.assertEqual(version or v1.DEFAULT_VER,
client.http_client.os_ironic_api_version)
def test_get_client_no_auth_token(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
}
self._test_get_client(**kwargs)
def test_get_client_service_and_endpoint_type_defaults(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
'os_service_type': '',
'os_endpoint_type': ''
}
self._test_get_client(**kwargs)
def test_get_client_with_region_no_auth_token(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_region_name': 'REGIONONE',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
}
self._test_get_client(**kwargs)
def test_get_client_no_url(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': '',
}
self.assertRaises(exc.AmbiguousAuthSystem, iroclient.get_client,
'1', **kwargs)
# test the alias as well to ensure backwards compatibility
self.assertRaises(exc.AmbigiousAuthSystem, iroclient.get_client,
'1', **kwargs)
def test_get_client_incorrect_auth_params(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_auth_url': 'http://localhost:35357/v2.0',
}
self.assertRaises(exc.AmbiguousAuthSystem, iroclient.get_client,
'1', **kwargs)
def test_get_client_with_api_version_latest(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
'os_ironic_api_version': "latest",
}
self._test_get_client(**kwargs)
def test_get_client_with_api_version_numeric(self):
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
'os_ironic_api_version': "1.4",
}
self._test_get_client(**kwargs)
def test_get_client_default_version_set_cached(self):
version = '1.3'
# Make sure we don't coincidentally succeed
self.assertNotEqual(v1.DEFAULT_VER, version)
kwargs = {
'os_tenant_name': 'TENANT_NAME',
'os_username': 'USERNAME',
'os_password': 'PASSWORD',
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': '',
}
self._test_get_client(version=version, **kwargs)
def test_get_client_with_auth_token(self):
kwargs = {
'os_auth_url': 'http://localhost:35357/v2.0',
'os_auth_token': 'USER_AUTH_TOKEN',
}
self._test_get_client(auth='token', **kwargs)
def test_get_client_with_region_name_auth_token(self):
kwargs = {
'os_auth_url': 'http://localhost:35357/v2.0',
'os_region_name': 'REGIONONE',
'os_auth_token': 'USER_AUTH_TOKEN',
}
self._test_get_client(auth='token', **kwargs)
def test_get_client_only_session_passed(self):
session = mock.Mock()
session.get_endpoint.return_value = 'http://localhost:35357/v2.0'
kwargs = {
'session': session,
}
iroclient.get_client('1', **kwargs)
session.get_endpoint.assert_called_once_with(service_type='baremetal',
interface='publicURL',
region_name=None)
def test_get_client_incorrect_session_passed(self):
session = mock.Mock()
session.get_endpoint.side_effect = Exception('boo')
kwargs = {
'session': session,
}
self.assertRaises(exc.AmbiguousAuthSystem, iroclient.get_client,
'1', **kwargs)
@mock.patch.object(kaloading.session, 'Session', autospec=True)
@mock.patch.object(kaloading, 'get_plugin_loader', autospec=True)
def _test_loader_arguments_passed_correctly(
self, mock_ks_loader, mock_ks_session,
passed_kwargs, expected_kwargs):
session = mock_ks_session.return_value.load_from_options.return_value
session.get_endpoint.return_value = 'http://localhost:6385/v1/f14b4123'
mock_ks_loader.return_value.load_from_options.return_value = 'auth'
iroclient.get_client('1', **passed_kwargs)
mock_ks_loader.return_value.load_from_options.assert_called_once_with(
**expected_kwargs)
mock_ks_session.return_value.load_from_options.assert_called_once_with(
auth='auth', timeout=passed_kwargs.get('timeout'),
insecure=passed_kwargs.get('insecure'),
cert=passed_kwargs.get('cert'),
cacert=passed_kwargs.get('cacert'), key=passed_kwargs.get('key'))
session.get_endpoint.assert_called_once_with(
service_type=passed_kwargs.get('os_service_type') or 'baremetal',
interface=passed_kwargs.get('os_endpoint_type') or 'publicURL',
region_name=passed_kwargs.get('os_region_name'))
def test_loader_arguments_token(self):
passed_kwargs = {
'os_auth_url': 'http://localhost:35357/v3',
'os_region_name': 'REGIONONE',
'os_auth_token': 'USER_AUTH_TOKEN',
}
expected_kwargs = {
'auth_url': 'http://localhost:35357/v3',
'project_id': None,
'project_name': None,
'user_domain_id': None,
'user_domain_name': None,
'project_domain_id': None,
'project_domain_name': None,
'token': 'USER_AUTH_TOKEN'
}
self._test_loader_arguments_passed_correctly(
passed_kwargs=passed_kwargs, expected_kwargs=expected_kwargs)
def test_loader_arguments_password_tenant_name(self):
passed_kwargs = {
'os_auth_url': 'http://localhost:35357/v3',
'os_region_name': 'REGIONONE',
'os_tenant_name': 'TENANT',
'os_username': 'user',
'os_password': '1234',
'os_project_domain_id': 'DEFAULT',
'os_user_domain_id': 'DEFAULT'
}
expected_kwargs = {
'auth_url': 'http://localhost:35357/v3',
'project_id': None,
'project_name': 'TENANT',
'user_domain_id': 'DEFAULT',
'user_domain_name': None,
'project_domain_id': 'DEFAULT',
'project_domain_name': None,
'username': 'user',
'password': '1234'
}
self._test_loader_arguments_passed_correctly(
passed_kwargs=passed_kwargs, expected_kwargs=expected_kwargs)
def test_loader_arguments_password_project_id(self):
passed_kwargs = {
'os_auth_url': 'http://localhost:35357/v3',
'os_region_name': 'REGIONONE',
'os_project_id': '1000',
'os_username': 'user',
'os_password': '1234',
'os_project_domain_name': 'domain1',
'os_user_domain_name': 'domain1'
}
expected_kwargs = {
'auth_url': 'http://localhost:35357/v3',
'project_id': '1000',
'project_name': None,
'user_domain_id': None,
'user_domain_name': 'domain1',
'project_domain_id': None,
'project_domain_name': 'domain1',
'username': 'user',
'password': '1234'
}
self._test_loader_arguments_passed_correctly(
passed_kwargs=passed_kwargs, expected_kwargs=expected_kwargs)
@mock.patch.object(iroclient, 'Client')
@mock.patch.object(kaloading.session, 'Session', autospec=True)
def test_correct_arguments_passed_to_client_constructor_noauth_mode(
self, mock_ks_session, mock_client):
kwargs = {
'ironic_url': 'http://ironic.example.org:6385/',
'os_auth_token': 'USER_AUTH_TOKEN',
'os_ironic_api_version': 'latest',
'insecure': True,
'max_retries': 10,
'retry_interval': 10,
'os_cacert': 'data'
}
iroclient.get_client('1', **kwargs)
mock_client.assert_called_once_with(
'1', 'http://ironic.example.org:6385/',
**{
'os_ironic_api_version': 'latest',
'max_retries': 10,
'retry_interval': 10,
'token': 'USER_AUTH_TOKEN',
'insecure': True,
'ca_file': 'data',
'cert_file': None,
'key_file': None,
'timeout': None,
'session': None
}
)
self.assertFalse(mock_ks_session.called)
@mock.patch.object(iroclient, 'Client')
@mock.patch.object(kaloading.session, 'Session', autospec=True)
def test_correct_arguments_passed_to_client_constructor_session_created(
self, mock_ks_session, mock_client):
session = mock_ks_session.return_value.load_from_options.return_value
kwargs = {
'os_auth_url': 'http://localhost:35357/v3',
'os_region_name': 'REGIONONE',
'os_project_id': '1000',
'os_username': 'user',
'os_password': '1234',
'os_project_domain_name': 'domain1',
'os_user_domain_name': 'domain1'
}
iroclient.get_client('1', **kwargs)
mock_client.assert_called_once_with(
'1', session.get_endpoint.return_value,
**{
'os_ironic_api_version': None,
'max_retries': None,
'retry_interval': None,
'session': session,
}
)
@mock.patch.object(iroclient, 'Client')
@mock.patch.object(kaloading.session, 'Session', autospec=True)
def test_correct_arguments_passed_to_client_constructor_session_passed(
self, mock_ks_session, mock_client):
session = mock.Mock()
kwargs = {
'session': session,
}
iroclient.get_client('1', **kwargs)
mock_client.assert_called_once_with(
'1', session.get_endpoint.return_value,
**{
'os_ironic_api_version': None,
'max_retries': None,
'retry_interval': None,
'session': session,
}
)
self.assertFalse(mock_ks_session.called)
def test_safe_header_with_auth_token(self):
(name, value) = ('X-Auth-Token', u'3b640e2e64d946ac8f55615aff221dc1')
expected_header = (u'X-Auth-Token',
'{SHA1}6de9fb3b0b89099030a54abfeb468e7b1b1f0f2b')
client = http.HTTPClient('http://localhost/')
header_redact = client._process_header(name, value)
self.assertEqual(expected_header, header_redact)
def test_safe_header_with_no_auth_token(self):
name, value = ('Accept', 'application/json')
header = ('Accept', 'application/json')
client = http.HTTPClient('http://localhost/')
header_redact = client._process_header(name, value)
self.assertEqual(header, header_redact)
| |
# Copyright 2013 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements all necessary Impala HiveServer 2 RPC functionality."""
# This work builds off of:
# 1. the Hue interface:
# hue/apps/beeswax/src/beeswax/server/dbms.py
# hue/apps/beeswax/src/beeswax/server/hive_server2_lib.py
# hue/desktop/core/src/desktop/lib/thrift_util.py
# 2. the Impala shell:
# Impala/shell/impala_shell.py
import datetime
import socket
import operator
import exceptions
import re
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport, TTransportException
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from impala.error import RPCError, err_if_rpc_not_ok
from impala.cli_service import TCLIService
from impala.cli_service.ttypes import (TOpenSessionReq, TFetchResultsReq,
TCloseSessionReq, TExecuteStatementReq, TGetInfoReq, TGetInfoType,
TTypeId, TFetchOrientation, TGetResultSetMetadataReq, TStatusCode,
TGetColumnsReq, TGetSchemasReq, TGetTablesReq, TGetFunctionsReq,
TGetOperationStatusReq, TOperationState, TCancelOperationReq,
TCloseOperationReq, TGetLogReq)
# mapping between Thrift TTypeId (in schema) and TColumnValue (in returned rows)
# helper object for converting from TRow to something friendlier
_TTypeId_to_TColumnValue_getters = {
'BOOLEAN_TYPE': operator.attrgetter('boolVal'),
'TINYINT_TYPE': operator.attrgetter('byteVal'),
'SMALLINT_TYPE': operator.attrgetter('i16Val'),
'INT_TYPE': operator.attrgetter('i32Val'),
'BIGINT_TYPE': operator.attrgetter('i64Val'),
'TIMESTAMP_TYPE': operator.attrgetter('stringVal'),
'FLOAT_TYPE': operator.attrgetter('doubleVal'),
'DOUBLE_TYPE': operator.attrgetter('doubleVal'),
'STRING_TYPE': operator.attrgetter('stringVal')
}
# the type specifiers returned from GetColumns use the strings from
# com.cloudera.impala.catalog.PrimitiveType; here we map those strings to
# TTypeId strings specified in TCLIService
_PrimitiveType_to_TTypeId = {
'BOOLEAN': 'BOOLEAN_TYPE',
'TINYINT': 'TINYINT_TYPE',
'SMALLINT': 'SMALLINT_TYPE',
'INT': 'INT_TYPE',
'BIGINT': 'BIGINT_TYPE',
'TIMESTAMP': 'TIMESTAMP_TYPE',
'FLOAT': 'FLOAT_TYPE',
'DOUBLE': 'DOUBLE_TYPE',
'STRING': 'STRING_TYPE',
}
# datetime only supports 6 digits of microseconds but Impala supports 9.
# If present, the trailing 3 digits will be ignored without warning.
_TIMESTAMP_PATTERN = re.compile(r'(\d+-\d+-\d+ \d+:\d+:\d+(\.\d{,6})?)')
def _parse_timestamp(value):
if value:
match = _TIMESTAMP_PATTERN.match(value)
if match:
if match.group(2):
format = '%Y-%m-%d %H:%M:%S.%f'
# use the pattern to truncate the value
value = match.group()
else:
format = '%Y-%m-%d %H:%M:%S'
value = datetime.datetime.strptime(value, format)
else:
raise Exception(
'Cannot convert "{}" into a datetime'.format(value))
else:
value = None
return value
# TODO: Add another decorator that runs the function in its own thread
def threaded(func):
raise NotImplementedError
def retry(func):
# Retries RPCs after closing/reopening transport
# `service` must be the first arg in args or must be a kwarg
def wrapper(*args, **kwargs):
# get the thrift transport
if 'service' in kwargs:
transport = kwargs['service']._iprot.trans
elif len(args) > 0 and isinstance(args[0], TCLIService.Client):
transport = args[0]._iprot.trans
else:
raise RPCError("RPC function does not have expected 'service' arg")
tries_left = 3
while tries_left > 0:
try:
if not transport.isOpen():
transport.open()
return func(*args, **kwargs)
except socket.error as e:
pass
except TTransportException as e:
pass
except Exception as e:
raise
transport.close()
tries_left -= 1
raise
return wrapper
# _get_socket and _get_transport based on the Impala shell impl
def _get_socket(host, port, use_ssl, ca_cert):
if use_ssl:
from thrift.transport.TSSLSocket import TSSLSocket
if ca_cert is None:
return TSSLSocket(host, port, validate=False)
else:
return TSSLSocket(host, port, validate=True, ca_certs=ca_cert)
else:
return TSocket(host, port)
def _get_transport(sock, host, use_ldap, ldap_user, ldap_password, use_kerberos,
kerberos_service_name):
if not use_ldap and not use_kerberos:
return TBufferedTransport(sock)
try:
import saslwrapper as sasl
except ImportError:
import sasl
from impala.thrift_sasl import TSaslClientTransport
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", host)
if use_ldap:
sasl_client.setAttr("username", ldap_user)
sasl_client.setAttr("password", ldap_password)
else:
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
if use_kerberos:
return TSaslClientTransport(sasl_factory, "GSSAPI", sock)
else:
return TSaslClientTransport(sasl_factory, "PLAIN", sock)
def connect_to_impala(host, port, timeout=45, use_ssl=False, ca_cert=None,
use_ldap=False, ldap_user=None, ldap_password=None, use_kerberos=False,
kerberos_service_name='impala'):
sock = _get_socket(host, port, use_ssl, ca_cert)
sock.setTimeout(timeout * 1000.)
transport = _get_transport(sock, host, use_ldap, ldap_user, ldap_password,
use_kerberos, kerberos_service_name)
transport.open()
protocol = TBinaryProtocol(transport)
service = TCLIService.Client(protocol)
return service
def close_service(service):
service._iprot.trans.close()
def reconnect(service):
service._iprot.trans.close()
service._iprot.trans.open()
@retry
def open_session(service, user, configuration=None):
req = TOpenSessionReq(username=user, configuration=configuration)
resp = service.OpenSession(req)
err_if_rpc_not_ok(resp)
return resp.sessionHandle
@retry
def close_session(service, session_handle):
req = TCloseSessionReq(sessionHandle=session_handle)
resp = service.CloseSession(req)
err_if_rpc_not_ok(resp)
@retry
def execute_statement(service, session_handle, statement, configuration=None):
req = TExecuteStatementReq(sessionHandle=session_handle,
statement=statement, confOverlay=configuration)
resp = service.ExecuteStatement(req)
err_if_rpc_not_ok(resp)
return resp.operationHandle
@retry
def get_result_schema(service, operation_handle):
if not operation_handle.hasResultSet:
return None
req = TGetResultSetMetadataReq(operationHandle=operation_handle)
resp = service.GetResultSetMetadata(req)
err_if_rpc_not_ok(resp)
schema = []
for column in resp.schema.columns:
name = column.columnName
type_ = TTypeId._VALUES_TO_NAMES[
column.typeDesc.types[0].primitiveEntry.type]
schema.append((name, type_))
return schema
@retry
def fetch_results(service, operation_handle, schema=None, max_rows=100,
orientation=TFetchOrientation.FETCH_NEXT):
if not operation_handle.hasResultSet:
return None
# the schema is necessary to pull the proper values (i.e., coalesce)
if schema is None:
schema = get_result_schema(service, operation_handle)
req = TFetchResultsReq(operationHandle=operation_handle,
orientation=orientation,
maxRows=max_rows)
resp = service.FetchResults(req)
err_if_rpc_not_ok(resp)
rows = []
for trow in resp.results.rows:
row = []
for (i, col_val) in enumerate(trow.colVals):
type_ = schema[i][1]
value = _TTypeId_to_TColumnValue_getters[type_](col_val).value
if type_ == 'TIMESTAMP_TYPE':
value = _parse_timestamp(value)
row.append(value)
rows.append(tuple(row))
return rows
@retry
def get_current_database(service, session_handle):
raise NotImplementedError
@retry
def get_databases(service, session_handle):
req = TGetSchemasReq(sessionHandle=session_handle, schemaName='.*')
resp = service.GetSchemas(req)
err_if_rpc_not_ok(resp)
return resp.operation_handle
@retry
def database_exists(service, session_handle, db_name):
req = TGetSchemasReq(sessionHandle=session_handle, schemaName=db_name)
resp = service.GetSchemas(req)
err_if_rpc_not_ok(resp)
operation_handle = resp.operationHandle
# this only fetches default max_rows, but there should only be one row ideally
results = fetch_results(service=service, operation_handle=operation_handle)
exists = False
for result in results:
if result[0] == db_name:
exists = True
close_operation(service, operation_handle)
return exists
@retry
def get_tables(service, session_handle, database_name='.*'):
req = TGetTablesReq(sessionHandle=session_handle,
schemaName=database_name,
tableName='.*')
resp = service.GetTables(req)
err_if_rpc_not_ok(resp)
return resp.operation_handle
@retry
def table_exists(service, session_handle, table_name, database_name='.*'):
req = TGetTablesReq(sessionHandle=session_handle,
schemaName=database_name,
tableName=table_name)
resp = service.GetTables(req)
err_if_rpc_not_ok(resp)
operation_handle = resp.operationHandle
# this only fetches default max_rows, but there should only be one row ideally
results = fetch_results(service=service, operation_handle=operation_handle)
exists = False
for result in results:
if result[2] == table_name:
exists = True
close_operation(service, operation_handle)
return exists
@retry
def get_table_schema(service, session_handle, table_name, database_name='.*'):
req = TGetColumnsReq(sessionHandle=session_handle,
schemaName=database_name,
tableName=table_name,
columnName='.*')
resp = service.GetColumns(req)
err_if_rpc_not_ok(resp)
return resp.operationHandle
@retry
def get_functions(service, session_handle, database_name='.*'):
# TODO: need to test this one especially
req = TGetFunctionsReq(sessionHandle=session_handle,
schemaName=database_name,
functionName='.*')
resp = service.GetFunctions(req)
err_if_rpc_not_ok(resp)
return resp.operationHandle
@retry
def get_operation_status(service, operation_handle):
req = TGetOperationStatusReq(operationHandle=operation_handle)
resp = service.GetOperationStatus(req)
err_if_rpc_not_ok(resp)
return TOperationState._VALUES_TO_NAMES[resp.operationState]
@retry
def cancel_operation(service, operation_handle):
req = TCancelOperationReq(operationHandle=operation_handle)
resp = service.CancelOperation(req)
err_if_rpc_not_ok(resp)
@retry
def close_operation(service, operation_handle):
req = TCloseOperationReq(operationHandle=operation_handle)
resp = service.CloseOperation(req)
err_if_rpc_not_ok(resp)
@retry
def get_log(service, operation_handle):
req = TGetLogReq(operationHandle=operation_handle)
resp = service.GetLog(req)
err_if_rpc_not_ok(resp)
return resp.log
def ping(service, session_handle):
req = TGetInfoReq(sessionHandle=session_handle,
infoType=TGetInfoType.CLI_SERVER_NAME)
try:
resp = service.GetInfo(req)
except TTransportException as e:
return False
try:
err_if_rpc_not_ok(resp)
except RPCError as e:
return False
return True
| |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from boto import exception as boto_exception
from neutronclient.common import exceptions as neutron_exceptions
from saharaclient.api import base as saharaclient_base
from rally.common import log as logging
from rally.common import utils
from rally.plugins.openstack.context.cleanup import base
from rally.plugins.openstack.scenarios.fuel import utils as futils
from rally.plugins.openstack.scenarios.keystone import utils as kutils
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.plugins.openstack.wrappers import keystone as keystone_wrapper
LOG = logging.getLogger(__name__)
def get_order(start):
return iter(range(start, start + 99))
class SynchronizedDeletion(object):
def is_deleted(self):
return True
class QuotaMixin(SynchronizedDeletion):
def id(self):
return self.raw_resource
def delete(self):
self._manager().delete(self.raw_resource)
def list(self):
return [self.tenant_uuid] if self.tenant_uuid else []
# HEAT
@base.resource("heat", "stacks", order=100, tenant_resource=True)
class HeatStack(base.ResourceManager):
pass
# NOVA
_nova_order = get_order(200)
@base.resource("nova", "servers", order=next(_nova_order))
class NovaServer(base.ResourceManager):
def list(self):
"""List all servers."""
if hasattr(self._manager().api, "api_version"):
# NOTE(andreykurilin): novaclient v2.27.0 includes ability to
# return all servers(see https://review.openstack.org/#/c/217101
# for more details). This release can be identified by presence
# of "api_version" property of ``novaclient.client.Client`` cls.
return self._manager().list(limit=-1)
else:
# FIXME(andreykurilin): Remove code below, when minimum version of
# novaclient in requirements will allow it.
# NOTE(andreykurilin): Nova API returns only limited number(
# 'osapi_max_limit' option in nova.conf) of servers, so we need
# to use 'marker' option to list all pages of servers.
result = []
marker = None
while True:
servers = self._manager().list(marker=marker)
if not servers:
break
result.extend(servers)
marker = servers[-1].id
return result
def delete(self):
if getattr(self.raw_resource, "OS-EXT-STS:locked", False):
self.raw_resource.unlock()
super(NovaServer, self).delete()
@base.resource("nova", "floating_ips", order=next(_nova_order))
class NovaFloatingIPs(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("nova", "keypairs", order=next(_nova_order))
class NovaKeypair(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("nova", "security_groups", order=next(_nova_order))
class NovaSecurityGroup(SynchronizedDeletion, base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "default",
super(NovaSecurityGroup, self).list())
@base.resource("nova", "quotas", order=next(_nova_order),
admin_required=True, tenant_resource=True)
class NovaQuotas(QuotaMixin, base.ResourceManager):
pass
@base.resource("nova", "floating_ips_bulk", order=next(_nova_order),
admin_required=True)
class NovaFloatingIpsBulk(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.address
def list(self):
return [floating_ip for floating_ip in self._manager().list()
if utils.name_matches_object(floating_ip.pool,
nova_utils.NovaScenario)]
@base.resource("nova", "networks", order=next(_nova_order),
admin_required=True, tenant_resource=True)
class NovaNetworks(SynchronizedDeletion, base.ResourceManager):
def list(self):
return [net for net in self._manager().list()
if (utils.name_matches_object(net.label,
nova_utils.NovaScenario) or
net.label.startswith("rally_novanet"))]
# EC2
_ec2_order = get_order(250)
class EC2Mixin(object):
def _manager(self):
return getattr(self.user, self._service)()
@base.resource("ec2", "servers", order=next(_ec2_order))
class EC2Server(EC2Mixin, base.ResourceManager):
def is_deleted(self):
try:
instances = self._manager().get_only_instances(
instance_ids=[self.id()])
except boto_exception.EC2ResponseError as e:
# NOTE(wtakase): Nova EC2 API returns 'InvalidInstanceID.NotFound'
# if instance not found. In this case, we consider
# instance has already been deleted.
return getattr(e, "error_code") == "InvalidInstanceID.NotFound"
# NOTE(wtakase): After instance deletion, instance can be 'terminated'
# state. If all instance states are 'terminated', this
# returns True. And if get_only_instaces() returns empty
# list, this also returns True because we consider
# instance has already been deleted.
return all(map(lambda i: i.state == "terminated", instances))
def delete(self):
self._manager().terminate_instances(instance_ids=[self.id()])
def list(self):
return self._manager().get_only_instances()
# NEUTRON
_neutron_order = get_order(300)
@base.resource(service=None, resource=None, admin_required=True)
class NeutronMixin(SynchronizedDeletion, base.ResourceManager):
# Neutron has the best client ever, so we need to override everything
def supports_extension(self, extension):
exts = self._manager().list_extensions().get("extensions", [])
if any(ext.get("alias") == extension for ext in exts):
return True
return False
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource["id"]
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return filter(lambda r: r["tenant_id"] == self.tenant_uuid,
list_method({"tenant_id": self.tenant_uuid})[resources])
class NeutronLbaasV1Mixin(NeutronMixin):
def list(self):
if self.supports_extension("lbaas"):
return super(NeutronLbaasV1Mixin, self).list()
return []
@base.resource("neutron", "vip", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Vip(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "health_monitor", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Healthmonitor(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "pool", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Pool(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "port", order=next(_neutron_order),
tenant_resource=True)
class NeutronPort(NeutronMixin):
def delete(self):
if (self.raw_resource["device_owner"] == "network:router_interface" or
self.raw_resource["device_owner"] ==
"network:router_interface_distributed"):
self._manager().remove_interface_router(
self.raw_resource["device_id"],
{"port_id": self.raw_resource["id"]})
else:
try:
self._manager().delete_port(self.id())
except neutron_exceptions.PortNotFoundClient:
# Port can be already auto-deleted, skip silently
LOG.debug("Port %s was not deleted. Skip silently because "
"port can be already auto-deleted."
% self.id())
@base.resource("neutron", "router", order=next(_neutron_order),
tenant_resource=True)
class NeutronRouter(NeutronMixin):
pass
@base.resource("neutron", "subnet", order=next(_neutron_order),
tenant_resource=True)
class NeutronSubnet(NeutronMixin):
pass
@base.resource("neutron", "network", order=next(_neutron_order),
tenant_resource=True)
class NeutronNetwork(NeutronMixin):
pass
@base.resource("neutron", "floatingip", order=next(_neutron_order),
tenant_resource=True)
class NeutronFloatingIP(NeutronMixin):
pass
@base.resource("neutron", "security_group", order=next(_neutron_order),
tenant_resource=True)
class NeutronSecurityGroup(NeutronMixin):
pass
@base.resource("neutron", "quota", order=next(_neutron_order),
admin_required=True, tenant_resource=True)
class NeutronQuota(QuotaMixin, NeutronMixin):
def delete(self):
self._manager().delete_quota(self.tenant_uuid)
# CINDER
_cinder_order = get_order(400)
@base.resource("cinder", "backups", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeBackup(base.ResourceManager):
pass
@base.resource("cinder", "volume_snapshots", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeSnapshot(base.ResourceManager):
pass
@base.resource("cinder", "transfers", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeTransfer(base.ResourceManager):
pass
@base.resource("cinder", "volumes", order=next(_cinder_order),
tenant_resource=True)
class CinderVolume(base.ResourceManager):
pass
@base.resource("cinder", "quotas", order=next(_cinder_order),
admin_required=True, tenant_resource=True)
class CinderQuotas(QuotaMixin, base.ResourceManager):
pass
# MANILA
_manila_order = get_order(450)
@base.resource("manila", "shares", order=next(_manila_order),
tenant_resource=True)
class ManilaShare(base.ResourceManager):
pass
@base.resource("manila", "share_networks", order=next(_manila_order),
tenant_resource=True)
class ManilaShareNetwork(base.ResourceManager):
pass
@base.resource("manila", "security_services", order=next(_manila_order),
tenant_resource=True)
class ManilaSecurityService(base.ResourceManager):
pass
# GLANCE
@base.resource("glance", "images", order=500, tenant_resource=True)
class GlanceImage(base.ResourceManager):
def list(self):
return self._manager().list(owner=self.tenant_uuid)
# SAHARA
_sahara_order = get_order(600)
@base.resource("sahara", "job_executions", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobExecution(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "jobs", order=next(_sahara_order),
tenant_resource=True)
class SaharaJob(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binary_internals", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinaryInternals(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binaries", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinary(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "data_sources", order=next(_sahara_order),
tenant_resource=True)
class SaharaDataSource(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "clusters", order=next(_sahara_order),
tenant_resource=True)
class SaharaCluster(base.ResourceManager):
# Need special treatment for Sahara Cluster because of the way the
# exceptions are described in:
# https://github.com/openstack/python-saharaclient/blob/master/
# saharaclient/api/base.py#L145
def is_deleted(self):
try:
self._manager().get(self.id())
return False
except saharaclient_base.APIException as e:
return e.error_code == 404
@base.resource("sahara", "cluster_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaClusterTemplate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "node_group_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaNodeGroup(SynchronizedDeletion, base.ResourceManager):
pass
# CEILOMETER
@base.resource("ceilometer", "alarms", order=700, tenant_resource=True)
class CeilometerAlarms(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.alarm_id
def list(self):
query = [{
"field": "project_id",
"op": "eq",
"value": self.tenant_uuid
}]
return self._manager().list(q=query)
# ZAQAR
@base.resource("zaqar", "queues", order=800)
class ZaqarQueues(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self.user.zaqar().queues()
# DESIGNATE
_designate_order = get_order(900)
@base.resource("designate", "domains", order=next(_designate_order))
class Designate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("designate", "servers", order=next(_designate_order),
admin_required=True, perform_for_admin_only=True)
class DesignateServer(SynchronizedDeletion, base.ResourceManager):
pass
# SWIFT
_swift_order = get_order(1000)
class SwiftMixin(SynchronizedDeletion, base.ResourceManager):
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
# NOTE(weiwu): *self.raw_resource is required because for deleting
# container we are passing only container name, to delete object we
# should pass as first argument container and second is object name.
delete_method(*self.raw_resource)
@base.resource("swift", "object", order=next(_swift_order),
tenant_resource=True)
class SwiftObject(SwiftMixin):
def list(self):
object_list = []
containers = self._manager().get_account(full_listing=True)[1]
for con in containers:
objects = self._manager().get_container(con["name"],
full_listing=True)[1]
for obj in objects:
raw_resource = [con["name"], obj["name"]]
object_list.append(raw_resource)
return object_list
@base.resource("swift", "container", order=next(_swift_order),
tenant_resource=True)
class SwiftContainer(SwiftMixin):
def list(self):
containers = self._manager().get_account(full_listing=True)[1]
return [[con["name"]] for con in containers]
# MISTRAL
@base.resource("mistral", "workbooks", order=1100, tenant_resource=True)
class MistralWorkbooks(SynchronizedDeletion, base.ResourceManager):
def delete(self):
self._manager().delete(self.raw_resource.name)
# MURANO
_murano_order = get_order(1200)
@base.resource("murano", "environments", tenant_resource=True,
order=next(_murano_order))
class MuranoEnvironments(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("murano", "packages", tenant_resource=True,
order=next(_murano_order))
class MuranoPackages(base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "Core library",
super(MuranoPackages, self).list())
# IRONIC
_ironic_order = get_order(1300)
@base.resource("ironic", "node", admin_required=True,
order=next(_ironic_order), perform_for_admin_only=True)
class IronicNodes(base.ResourceManager):
def id(self):
return self.raw_resource.uuid
# FUEL
@base.resource("fuel", "environment", order=1400,
admin_required=True, perform_for_admin_only=True)
class FuelEnvironment(base.ResourceManager):
"""Fuel environment.
That is the only resource that can be deleted by fuelclient explicitly.
"""
def id(self):
return self.raw_resource["id"]
def is_deleted(self):
return not self._manager().get(self.id())
def list(self):
return [env for env in self._manager().list()
if utils.name_matches_object(env["name"], futils.FuelScenario)]
# KEYSTONE
_keystone_order = get_order(9000)
class KeystoneMixin(SynchronizedDeletion):
def _manager(self):
return keystone_wrapper.wrap(getattr(self.admin, self._service)())
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
# TODO(boris-42): We should use such stuff in all list commands.
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return filter(kutils.is_temporary, list_method())
@base.resource("keystone", "user", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneUser(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "project", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneProject(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "service", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneService(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "role", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneRole(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "ec2", tenant_resource=True,
order=next(_keystone_order))
class KeystoneEc2(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self._manager().list(self.raw_resource)
| |
# ----------------------------------------------------------------------
# Copyright (c) 2015 Rafael Gonzalez.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------
# ========================== DESIGN NOTES ==============================
# The DB Writter that performs the ETL process to a SQLite Database
#
# ======================================================================
import logging
import sqlite3
import schema
import re
import os
import datetime
import operator
import math
from server import Lazy, Server
# Relays
from emaproto import SRRB, SARB
# Anemometer
from emaproto import SAAB, SAAE, SACB, SACE, SWDB, SWDE
# Barometer
from emaproto import SABB, SABE, SCBB, SCBE
# Cloud Sensor
from emaproto import SCLB, SCLE
# Pluviometer
from emaproto import SPCB, SPCE, SPAB, SPAE
# Pyranometer
from emaproto import SPYB, SPYE
# Photometer
from emaproto import SPHB, SPHE
# Rain detector
from emaproto import SRAB, SRAE
# Thermometer
from emaproto import SATB, SATE, SRHB, SRHE, SDPB, SDPE
# Voltmeter
from emaproto import SPSB, SPSE
# Message Types
from emaproto import SMTB, SMTE, MTCUR, MTHIS, MTISO, MTMIN, MTMAX
from emaproto import STRFTIME, magnitude, decodeFreq
log = logging.getLogger('dbwritter')
DATABASE_LOCKED = "database is locked"
UNKNOWN_STATION_ID = -1
UNKNOWN_MEAS_ID = -1
UNKNOWN_UNITS_ID = -1
UNKNOWN_DATE_ID = -1
UNKNOWN_TIME_ID = -1
TYP_SAMPLES = 'Samples'
TYP_MIN = 'Minima'
TYP_MAX = 'Maxima'
TYP_UNK = 'Unknown'
TYP_MINMAX = "MinMax"
TYP_AVER = "Averages"
RLY_OPEN = 'Open'
RLY_CLOSED = 'Closed'
# ===============================
# Extract and Transform Functions
# ===============================
def roundDateTime(ts):
'''Round a timestamp to the nearest minute'''
tsround = ts + datetime.timedelta(minutes=0.5)
time_id = tsround.hour*100 + tsround.minute
date_id = tsround.year*10000 + tsround.month*100 + tsround.day
return date_id, time_id, ts
def xtDateTime(tstamp):
'''Extract and transform Date & Time from (HH:MM:SS DD/MM/YYYY)'''
ts = datetime.datetime.strptime(tstamp, STRFTIME)
return roundDateTime(ts)
def xtMeasType(message):
'''Extract and transform Measurement Type'''
t = message[SMTB:SMTE]
if t == MTCUR:
msgtype = TYP_SAMPLE
elif t == MTMIN:
msgtype = TYP_MIN
elif t == MTMAX:
msgtype = TYP_MAX
else:
msgtype = TYP_UNK
return msgtype
def xtRoofRelay(message):
'''Extract and transform Roof Relay Status'''
c = message[SRRB]
return RLY_CLOSED if c == 'C' else RLY_OPEN
def xtAuxRelay(message):
'''Extract and transform Aux Relay Status'''
c = message[SARB]
return RLY_OPEN if c == 'E' or c == 'e' else RLY_CLOSED
def xtVoltage(message):
'''Extract and transform Voltage'''
return float(message[SPSB:SPSE]) / 10
def xtWetLevel(message):
'''Extract and transform Rain Probability'''
return float(message[SRAB:SRAE]) / 10
def xtCloudLevel(message):
'''Extract and transform Cloud Level'''
return float(message[SCLB:SCLE]) / 10
def xtCalPressure(message):
'''Extract and transform Calibrated Pressure at sea level'''
return float(message[SCBB:SCBE]) / 10
def xtAbsPressure(message):
'''Extract and transform Absolute Pressure'''
return float(message[SABB:SABE]) / 10
def xtRain(message):
'''Extract and transform Rain Level'''
return float(message[SPCB:SPCE]) / 10
def xtIrradiation(message):
'''Extract and transform Solar Irradiantion Level'''
return float(message[SPYB:SPYE]) / 10
def xtFrequency(message):
'''Extracts Frequency information'''
return decodeFreq(message[SPHB:SPHE])
def xtMagVisual(message):
'''Extract and Transform into Visual maginitued per arcsec 2'''
return magnitude(xtFrequency(message))
def xtTemperature(message):
'''Extract and transform Temperature'''
return float(message[SATB:SATE]) / 10
def xtHumidity(message):
'''Extract and transform Relative Humidity'''
return float(message[SRHB:SRHE]) / 10
def xtDewPoint(message):
'''Extract and transform Dew Point'''
return float(message[SDPB:SDPE]) / 10
def xtWindSpeed10m(message):
'''Extract and transform Wind Speed average during 10 min.'''
return float(message[SAAB:SAAE])
def xtWindSpeed(message):
'''Extract and transform Wind Speed'''
return float(message[SACB:SACE]) / 10
def xtWindDirection(message):
'''Extract and transform Wind Direction'''
return int(message[SWDB:SWDE])
# ===================
# MinMaxHistory Class
# ===================
class MinMaxHistory(object):
def __init__(self, paren):
self.__paren = paren
def reload(self, conn):
'''Reconfigures itself after a reload'''
self.__conn = conn
self.__cursor = self.__conn.cursor()
self.__rowcount = self.rowcount()
paren = self.__paren # shortcut
# Build units cache
self.__relay = {
(RLY_CLOSED,RLY_CLOSED): paren.lkUnits(roof=RLY_CLOSED,aux=RLY_CLOSED),
(RLY_CLOSED,RLY_OPEN): paren.lkUnits(roof=RLY_CLOSED, aux=RLY_OPEN),
(RLY_OPEN,RLY_CLOSED): paren.lkUnits(roof=RLY_OPEN, aux=RLY_CLOSED),
(RLY_OPEN,RLY_OPEN): paren.lkUnits(roof=RLY_OPEN, aux=RLY_OPEN),
}
# Build type cache
self.__type = {
TYP_MIN: paren.lkType(TYP_MIN),
TYP_MAX: paren.lkType(TYP_MAX),
}
def rowcount(self):
'''Find out the current row count'''
self.__cursor.execute("SELECT count(*) FROM MinMaxHistory")
return self.__cursor.fetchone()[0]
def insert(self, rows):
'''Update the MinMaxHistory Fact Table'''
log.debug("MinMaxHistory: updating table")
try:
self.__cursor.executemany(
"INSERT OR FAIL INTO MinMaxHistory VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
rows)
except sqlite3.IntegrityError, e:
log.debug("MinMaxHistory: overlapping rows")
except sqlite3.OperationalError, e:
self.__conn.rollback()
if e.args[0] != DATABASE_LOCKED:
raise
log.critical("MinMaxHistory: %d rows starting from %s cound not be written: %s",
len(rows),
rows[0][0:4],
DATABASE_LOCKED
)
except sqlite3.Error, e:
log.error(e)
self.__conn.rollback()
raise
self.__conn.commit() # commit anyway what was really updated
rowcount = self.rowcount()
commited = rowcount - self.__rowcount
self.__rowcount = rowcount
log.info("MinMaxHistory: commited rows (%d/%d)", commited, len(rows))
return commited
def row(self, date_id, time_id, station_id, tstamp, message):
'''Produces one minmax row to be inserted into the database'''
# Get values from cache
units_id = self.__relay.get((xtRoofRelay(message),xtAuxRelay(message)), -11)
type_id = self.__type.get(xtMeasType(message), -1)
return (
date_id, # date_id
time_id, # time_id
station_id, # station_id
type_id, # type_id
units_id, # units_id
xtVoltage(message), # voltage
xtWetLevel(message), # wet
xtCloudLevel(message), # cloudy
xtCalPressure(message), # cal_pressure
xtAbsPressure(message), # abs_pressure
xtRain(message), # rain
xtIrradiation(message), # irradiation
xtMagVisual(message), # vis_magnitude
xtFrequency(message), # frequency
xtTemperature(message), # temperature
xtHumidity(message), # rel_humidity
xtDewPoint(message), # dew_point
xtWindSpeed(message), # wind_speed
xtWindSpeed10m(message), # wind_speed10m
xtWindDirection(message), # wind_direction
tstamp, # timestamp
)
# =====================
# AveragesHistory Class
# =====================
class AveragesHistory(object):
def __init__(self, paren):
self.__paren = paren
def reload(self, conn):
'''Reconfigures itself after a reload'''
self.__conn = conn
self.__cursor = self.__conn.cursor()
self.__rowcount = self.rowcount()
paren = self.__paren # shortcut
# Build units cache
self.__relay = {
(RLY_CLOSED,RLY_CLOSED): paren.lkUnits(roof=RLY_CLOSED,aux=RLY_CLOSED),
(RLY_CLOSED,RLY_OPEN): paren.lkUnits(roof=RLY_CLOSED, aux=RLY_OPEN),
(RLY_OPEN,RLY_CLOSED): paren.lkUnits(roof=RLY_OPEN, aux=RLY_CLOSED),
(RLY_OPEN,RLY_OPEN): paren.lkUnits(roof=RLY_OPEN, aux=RLY_OPEN),
}
def rowcount(self):
'''Find out the current row count'''
self.__cursor.execute("SELECT count(*) FROM AveragesHistory")
return self.__cursor.fetchone()[0]
def insert(self, rows):
'''Update the AveragesHistory Fact Table'''
log.debug("AveragesHistory: updating table")
try:
self.__cursor.executemany(
"INSERT OR FAIL INTO AveragesHistory VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
rows)
except sqlite3.IntegrityError, e:
log.debug("AveragesHistory: overlapping rows")
except sqlite3.OperationalError, e:
self.__conn.rollback()
if e.args[0] != DATABASE_LOCKED:
raise
log.critical("AveragesHistory: %d rows starting from %s cound not be written: %s",
len(rows),
rows[0][0:4],
DATABASE_LOCKED
)
except sqlite3.Error, e:
log.error(e)
self.__conn.rollback()
raise
self.__conn.commit() # commit anyway what was really updated
rowcount = self.rowcount()
commited = rowcount - self.__rowcount
self.__rowcount = rowcount
log.info("AveragesHistory: commited rows (%d/%d)", commited, len(rows))
return commited
def row(self, date_id, time_id, station_id, tstamp, message):
'''Produces one averages history row to be inserted into the database'''
# Get values from cache
units_id = self.__relay.get((xtRoofRelay(message),xtAuxRelay(message)), -11)
return (
date_id, # date_id
time_id, # time_id
station_id, # station_id
units_id, # units_id
xtVoltage(message), # voltage
xtWetLevel(message), # wet
xtCloudLevel(message), # cloudy
xtCalPressure(message), # cal_pressure
xtAbsPressure(message), # abs_pressure
xtRain(message), # rain
xtIrradiation(message), # irradiation
xtMagVisual(message), # vis_magnitude
xtFrequency(message), # frequency
xtTemperature(message), # temperature
xtHumidity(message), # rel_humidity
xtDewPoint(message), # dew_point
xtWindSpeed(message), # wind_speed
xtWindSpeed10m(message), # wind_speed10m
xtWindDirection(message), # wind_direction
tstamp, # timestamp
)
# =====================
# RealTimeSamples Class
# =====================
class RealTimeSamples(object):
def __init__(self, paren):
self.__paren = paren
def reload(self, conn):
'''Reconfigures itself after a reload'''
self.__conn = conn
self.__cursor = self.__conn.cursor()
self.__rowcount = self.rowcount()
paren = self.__paren # shortcut
# Build units cache
self.__relay = {
(RLY_CLOSED,RLY_CLOSED): paren.lkUnits(roof=RLY_CLOSED,aux=RLY_CLOSED),
(RLY_CLOSED,RLY_OPEN): paren.lkUnits(roof=RLY_CLOSED, aux=RLY_OPEN),
(RLY_OPEN,RLY_CLOSED): paren.lkUnits(roof=RLY_OPEN, aux=RLY_CLOSED),
(RLY_OPEN,RLY_OPEN): paren.lkUnits(roof=RLY_OPEN, aux=RLY_OPEN),
}
# Build type cache
self.__type = {
TYP_SAMPLES: paren.lkType(TYP_SAMPLES),
TYP_AVER: paren.lkType(TYP_AVER),
}
def rowcount(self):
'''Find out the current row count'''
self.__cursor.execute("SELECT count(*) FROM RealTimeSamples")
return self.__cursor.fetchone()[0]
def insert(self, rows):
'''Update the RealTimeSamples Fact Table'''
log.debug("RealTimeSamples: updating table")
try:
self.__cursor.executemany(
"INSERT OR FAIL INTO RealTimeSamples VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
rows)
except sqlite3.IntegrityError, e:
log.warn("RealTimeSamples: overlapping rows")
except sqlite3.OperationalError, e:
self.__conn.rollback()
if e.args[0] != DATABASE_LOCKED:
raise
log.critical("RealTimeSamples: %d rows starting from %s cound not be written: %s",
len(rows),
rows[0][0:3],
DATABASE_LOCKED
)
except sqlite3.Error, e:
log.error(e)
self.__conn.rollback()
raise
self.__conn.commit() # commit anyway what was really updated
rowcount = self.rowcount()
commited = rowcount - self.__rowcount
self.__rowcount = rowcount
log.debug("RealTimeSamples: commited rows (%d/%d)", commited, len(rows))
return commited
def row(self, date_id, time_id, station_id, meas_type, tstamp, message):
'''Produces one real time row to be inserted into the database'''
# get units from cache
units_id = self.__relay.get((xtRoofRelay(message),xtAuxRelay(message)),-1)
type_id = self.__type.get(meas_type, -1)
return (
date_id, # date_id
time_id, # time_id
station_id, # station_id
type_id, # type_id
units_id, # units_id
xtVoltage(message), # voltage
xtWetLevel(message), # wet
xtCloudLevel(message), # cloudy
xtCalPressure(message), # cal_pressure
xtAbsPressure(message), # abs_pressure
xtRain(message), # rain
xtIrradiation(message), # irradiation
xtMagVisual(message), # vis_magnitude
xtFrequency(message), # frequency
xtTemperature(message), # temperature
xtHumidity(message), # rel_humidity
xtDewPoint(message), # dew_point
xtWindSpeed(message), # wind_speed
xtWindSpeed10m(message), # wind_speed10m
xtWindDirection(message), # wind_direction
tstamp, # timestamp
)
def delete(self, date_id):
'''Delete samples older than a given date_id'''
log.debug("Delete RealTimeSamples Table data older than %d", date_id)
try:
self.__cursor.execute(
"DELETE FROM RealTimeSamples WHERE date_id < ?", (date_id,))
except sqlite3.OperationalError, e:
self.__conn.rollback()
if e.args[0] != DATABASE_LOCKED:
raise
log.error("Table could not be purged: %s",DATABASE_LOCKED)
except sqlite3.Error, e:
log.error(e)
self.__conn.rollback()
raise
self.__conn.commit() # commit anyway what was really updated
rowcount = self.rowcount()
commited = rowcount - self.__rowcount
self.__rowcount = rowcount
log.debug("RealTimeSamples: deleted %d rows", commited)
return commited
# ===================
# HistoryStats Class
# ===================
class HistoryStats(object):
def __init__(self, paren):
self.__paren = paren
def reload(self, conn):
'''Reconfigures itself after a reload'''
self.__conn = conn
self.__cursor = self.__conn.cursor()
paren = self.__paren # shortcut
# Build type cache
self.__type = {
TYP_MINMAX: paren.lkType(TYP_MINMAX),
TYP_AVER: paren.lkType(TYP_AVER),
}
def insert(self, rows):
'''Update the HistoryStats Fact Table'''
log.debug("HistoryStats: updating table")
try:
self.__cursor.executemany(
"INSERT OR FAIL INTO HistoryStats VALUES(?,?,?,?,?,?,?)",
rows)
except sqlite3.IntegrityError, e:
log.debug("HistoryStats: duplicate detected, probably a retained message")
except sqlite3.OperationalError, e:
self.__conn.rollback()
if e.args[0] != DATABASE_LOCKED:
raise
log.critical("HistoryStats: %d rows cound not be written: %s",
len(rows),DATABASE_LOCKED)
except sqlite3.Error, e:
log.error(e)
self.__conn.rollback()
raise
self.__conn.commit() # commit anyway what was really updated
def rows(self, station_id, meastype, submitted, commited):
'''Produces one history stats record to be inserted into the database'''
# Get values from cache
type_id = self.__type.get(meastype, -1)
date_id, time_id, ts = roundDateTime(datetime.datetime.utcnow())
timestamp = ts.strftime("%Y-%m-%d %H:%M:%S")
return (
(
date_id, # date_id
time_id, # time_id
station_id, # station_id
type_id, # type_id
submitted, # submitted rows
commited, # commited rows
timestamp, # timestamp
),
)
# ===================
# RealTimeStats Class
# ===================
class RealTimeStats(object):
def __init__(self, paren):
self.__paren = paren
def reload(self, conn):
'''Reconfigures itself after a reload'''
self.__conn = conn
self.__cursor = self.__conn.cursor()
paren = self.__paren # shortcut
# Build type cache
self.__type = {
TYP_SAMPLES: paren.lkType(TYP_SAMPLES),
TYP_AVER: paren.lkType(TYP_AVER),
}
def rowcount(self):
'''Find out the current row count'''
self.__cursor.execute("SELECT count(*) FROM RealTimeStats")
return self.__cursor.fetchone()[0]
def insert(self, rows):
'''Update the RealTimeStats Fact Table'''
log.debug("RealTimeStats: updating table")
try:
self.__cursor.executemany(
"INSERT OR FAIL INTO RealTimeStats VALUES(?,?,?,?,?,?,?,?,?)",
rows)
except sqlite3.IntegrityError, e:
log.debug("RealTimeStats: duplicate detected")
except sqlite3.OperationalError, e:
self.__conn.rollback()
if e.args[0] != DATABASE_LOCKED:
raise
log.critical("RealTimeStats: %d rows cound not be written: %s",
len(rows), DATABASE_LOCKED)
except sqlite3.Error, e:
log.error(e)
self.__conn.rollback()
raise
self.__conn.commit() # commit anyway what was really updated
def rows(self, date_id, time_id, station_id, meas_type, tstamp,
window_size, nsamples, num_bytes, lag):
'''Produces one history stats record to be inserted into the database'''
# Get values from cache
type_id = self.__type.get(meas_type, -1)
return (
(
date_id, # date_id
time_id, # time_id
station_id, # station_id
type_id, # type_id
tstamp, # timestamp
window_size, # window_size
nsamples, # num_samples
num_bytes, # num_bytes
lag, # lag
),
)
def delete(self, date_id):
'''Delete samples older than a given date_id'''
log.debug("Delete RealTimeStats Table data older than %d", date_id)
try:
self.__cursor.execute(
"DELETE FROM RealTimeStats WHERE date_id < ?", (date_id,))
except sqlite3.OperationalError, e:
self.__conn.rollback()
if e.args[0] != DATABASE_LOCKED:
raise
log.error("Table could not be purged: %s",DATABASE_LOCKED)
except sqlite3.Error, e:
log.error(e)
self.__conn.rollback()
raise
self.__conn.commit() # commit anyway what was really updated
rowcount = self.rowcount()
commited = rowcount - self.__rowcount
self.__rowcount = rowcount
log.debug("RealTimeSamples: deleted %d rows", commited)
return commited
# ==========
# Main Class
# ==========
class DBWritter(Lazy):
N_RT_WRITES = 60
def __init__(self, srv, parser):
Lazy.__init__(self, 60)
self.srv = srv
self.period = 1
self.__rtwrites = 0
self.__parser = parser
self.__file = None
self.__conn = None
self.minmax = MinMaxHistory(self)
self.realtime = RealTimeSamples(self)
self.aver5min = AveragesHistory(self)
self.histats = HistoryStats(self)
self.rtstats = RealTimeStats(self)
srv.addLazy(self)
self.reload()
log.info("DBWritter object created")
def reload(self):
'''Reload config data and reconfigure itself'''
parser = self.__parser
lvl = parser.get("DBASE", "dbase_log")
dbfile = parser.get("DBASE", "dbase_file")
json_dir = parser.get("DBASE", "dbase_json_dir")
period = parser.getint("DBASE", "dbase_period")
date_fmt = parser.get("DBASE", "dbase_date_fmt")
year_start = parser.getint("DBASE", "dbase_year_start")
year_end = parser.getint("DBASE", "dbase_year_end")
purge_flag = parser.getboolean("DBASE", "dbase_purge")
stats_flag = parser.getboolean("DBASE", "dbase_stats")
self.__purge = purge_flag
self.__stats = stats_flag
log.setLevel(lvl)
self.period = period
self.setPeriod(60*period)
try:
if self.__conn is not None and self.__file != dbfile:
self.__conn.close()
self.__conn = None
if self.__conn is None:
log.debug("opening database %s", dbfile)
self.__conn = sqlite3.connect(dbfile)
else:
log.debug("reusing database connection to %s", dbfile)
self.__cursor = self.__conn.cursor()
self.__file = dbfile
schema.generate(self.__conn,
json_dir,
date_fmt,
year_start,
year_end,
replace=False)
except sqlite3.OperationalError, e:
self.__conn.rollback()
if e.args[0] != DATABASE_LOCKED:
raise
log.critical("Dimension Table could not be populated: %s",
DATABASE_LOCKED
)
except sqlite3.Error as e:
log.error("Error %s:", e.args[0])
if self.__conn:
self.__conn.rollback()
raise
self.minmax.reload(self.__conn)
self.aver5min.reload(self.__conn)
self.realtime.reload(self.__conn)
self.histats.reload(self.__conn)
self.rtstats.reload(self.__conn)
log.debug("Reload complete")
# =======
# ETL API
# =======
# -------------------------------
# Process Hourly MinMax bulk dump
# -------------------------------
def processMinMax(self, mqtt_id, payload):
'''extract MinMax History data and load into its table'''
log.debug("Received minmax history message from station %s", mqtt_id)
station_id = self.lkStation(mqtt_id)
if station_id == UNKNOWN_STATION_ID:
log.warn("Ignoring minmax message from unregistered station %s",
mqtt_id)
return
rows = []
message = payload.split('\n')
msglen = len(message)
for i in range(0 , msglen/3):
date_id, time_id, t0 = xtDateTime(message[3*i+2])
tsmp = t0.strftime("%Y-%m-%d %H:%M:%S")
r = self.minmax.row(date_id, time_id, station_id, tsmp, message[3*i])
rows.append(r)
r = self.minmax.row(date_id, time_id, station_id, tsmp, message[3*i+1])
rows.append(r)
# It seemd there is no need to sort the dates
# non-overlapping data do get written anyway
#rows = sorted(rows, key=operator.itemgetter(0,1), reverse=True)
commited = self.minmax.insert(rows)
if self.__stats:
# Insert record into the statistics table
self.histats.insert(
self.histats.rows(station_id, TYP_MINMAX, len(rows), commited)
)
# -------------------------------
# Process Current Status Messages
# -------------------------------
def processCurrentStatus(self, mqtt_id, payload, t1):
'''Extract real time EMA status message and store it into its table
t1 is the timestamp at the mqtt on_message callback.
'''
station_id = self.lkStation(mqtt_id)
if station_id == UNKNOWN_STATION_ID:
log.warn("Ignoring status message from unregistered station %s",
mqtt_id)
return
message = payload.split('\n')
if len(message) != 2:
log.error("Wrong current status message from station %s", mqtt_id)
return
date_id, time_id, t0 = xtDateTime(message[1])
tstamp = t0.strftime("%Y-%m-%d %H:%M:%S")
log.debug("Received current status message from station %s", mqtt_id)
type_m = TYP_SAMPLES
row = self.realtime.row(date_id, time_id, station_id, type_m, tstamp,
message[0])
self.__rtwrites += self.realtime.insert((row,))
if (self.__rtwrites % DBWritter.N_RT_WRITES) == 1:
log.info("RealTimeSamples rows written so far: %d" % self.__rtwrites)
# Compute and store statistics
# lag = measured lag MQTT[local] - RPi[remote]
# the timestamp reference is RPi[remote]
if self.__stats:
lag = int(round((t1 - t0).total_seconds()))
nbytes = len(payload)
num_samples = 1
window_size = 0 # by definition (1 sample)
self.rtstats.insert(
self.rtstats.rows(date_id, time_id, station_id, type_m, tstamp,
window_size, num_samples, nbytes, lag)
)
# -------------------------------
# Process Average Status Messages
# -------------------------------
def processAverageStatus(self, mqtt_id, payload, t1):
'''Extract real time EMA status message and store it into its table
t1 is the timestamp at the mqtt on_message callback.
'''
station_id = self.lkStation(mqtt_id)
if station_id == UNKNOWN_STATION_ID:
log.warn("Ignoring status message from unregistered station %s",
mqtt_id)
return
message = payload.split('\n')
if len(message) != 4:
log.error("Wrong average status message from station %s", mqtt_id)
return
date_id, time_id, t0 = xtDateTime(message[1])
tstamp = t0.strftime("%Y-%m-%d %H:%M:%S")
log.debug("Received average status message from station %s", mqtt_id)
type_m = TYP_AVER
row = self.realtime.row(date_id, time_id, station_id, type_m, tstamp,
message[0])
self.__rtwrites += self.realtime.insert((row,))
if (self.__rtwrites % DBWritter.N_RT_WRITES) == 1:
log.info("RealTimeSamples rows written so far: %d" % self.__rtwrites)
# Compute and store statistics
# lag = measured lag MQTT[local] - RPi[remote]
# the timestamp reference is RPi[remote]
if self.__stats:
_, _, tOldest = xtDateTime(message[2])
num_samples = int(message[3][1:-1])
lag = int(round((t1 - t0).total_seconds()))
nbytes = len(payload)
window_size = (t0 - tOldest).total_seconds()
self.rtstats.insert(
self.rtstats.rows(date_id, time_id, station_id, type_m, tstamp,
window_size, num_samples, nbytes, lag)
)
# ---------------------------------
# Process 5 min. averaged Bulk Dump
# ----------------------------------
def processAveragesHistory(self, mqtt_id, payload):
log.debug("Received averages history message from station %s", mqtt_id)
station_id = self.lkStation(mqtt_id)
if station_id == UNKNOWN_STATION_ID:
log.warn("Ignoring averags history message from unregistered station %s",
mqtt_id)
return
rows = []
message = payload.split('\n')
msglen = len(message)
for i in range(0 , msglen/2):
date_id, time_id, t0 = xtDateTime(message[2*i+1])
tsmp = t0.strftime("%Y-%m-%d %H:%M:%S")
r = self.aver5min.row(date_id, time_id, station_id, tsmp, message[2*i])
rows.append(r)
commited = self.aver5min.insert(rows)
if self.__stats:
# Insert record into the statistics table
self.histats.insert(
self.histats.rows(station_id, TYP_AVER, len(rows), commited)
)
# ----------------------------
# Implement The Lazy interface
# ----------------------------
def work(self):
'''
Called periodically from a Server object.
Write blocking behaviour.
'''
log.debug("work()")
if self.__purge:
date_id = self.datePurgeFrom()
if date_id:
self.realtime.delete(date_id)
self.rtstats.delete(date_id)
# ------------------------------------
# Dimensions SQL Lookup helper methods
# ------------------------------------
def lkType(self, meas_type):
'''return meas_id key from meas_type'''
self.__cursor.execute("SELECT type_id FROM Type WHERE type=?",
(meas_type,))
meas_id = self.__cursor.fetchone() or (UNKNOWN_MEAS_ID,)
log.verbose("lkType(%s) => %s", meas_type, meas_id)
return meas_id[0]
def lkStation(self, mqtt_id):
'''return station_id key from mqtt_id'''
self.__cursor.execute("SELECT station_id FROM Station WHERE mqtt_id=?",
(mqtt_id,))
station_id = self.__cursor.fetchone() or (UNKNOWN_STATION_ID,)
log.verbose("lkStation(%s) => %s", mqtt_id, station_id)
return station_id[0]
def lkUnits(self, roof, aux):
'''return units_id key from units'''
self.__cursor.execute("SELECT units_id FROM Units WHERE roof_relay=? AND aux_relay=?",
(roof, aux))
units_id = self.__cursor.fetchone() or (UNKNOWN_UNITS_ID,)
log.verbose("lkUnits(roof=%s,aux=%s) => %s", roof, aux, units_id)
return units_id[0]
# -------------------------------
# Facts SQL Loader helper methods
# -------------------------------
def datePurgeFrom(self):
'''Return date to purge from'''
now = datetime.datetime.utcnow()
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
delta = datetime.timedelta(minutes=2*self.period)
if now - midnight < delta:
return now.year*10000 + now.month*100 +now.day
else:
return None
| |
# Modifying graphs: We will now extend basicgraphs.py. Copy the code
# in this module and call it e.g. mygraphs.py.
"""
This is a module for working with *undirected* graphs (simple graphs or multigraphs).
It contains three classes: Vertex, Edge and Graph.
The interface of these classes is extensive and allows programming all kinds of Graph algorithms.
However, the data structure used is quite basic and inefficient: a Graph object stores only a Vertex list and an Edge list, and methods such as adjacency testing / finding neighbors of a Vertex require going through the entire Edge list!
"""
# version: 29-01-2015, Paul Bonsma
unsafe = False
# Set to True for faster, but unsafe listing of all vertices and edges.
class GraphError(Exception):
def __init__(self, message):
self.mess = message
def __str__(self):
return self.mess
class Vertex:
"""
Vertex objects have an attribute <_graph> pointing to the Graph they are part of,
and an attribute <_label> which can be anything: it is not used for any methods,
except for __repr__.
"""
def __init__(self, graph: "Graph", label: int = 0):
"""
Creates a Vertex, part of <Graph>, with optional label <label>.
(Labels of different vertices may be chosen the same; this does
not influence correctness of the methods, but will make the string
representation of the Graph ambiguous.)
"""
self._graph = graph
self._label = label
self._links = []
self._nbs = {}
def __repr__(self):
return str(self._label)
def adj(self, other: "Vertex") -> bool:
"""
Returns True iff Vertex <self> is adjacent to <other> Vertex.
"""
return self._nbs.get(other, 0) > 0
def inclist(self) -> list:
"""
Returns the list of edges incident with Vertex <self>.
"""
return self._links.copy()
def nbs(self) -> list:
"""
Returns the list of neighbors of Vertex <self>.
In case of parallel edges: duplicates are not removed from this list!
"""
nbl = []
for e in self.inclist():
nbl.append(e.otherend(self))
return nbl
def deg(self) -> int:
"""
Returns the degree of Vertex <self>.
"""
return len(self.inclist())
def internalAddEdge(self, edge: "Edge"):
self._links.append(edge)
other = edge.otherend(self)
self._nbs[other] = self._nbs.get(other, 0) + 1
def internalDelEdge(self, edge: "Edge"):
self._links.remove(edge)
other = edge.otherend(self)
self._nbs[other] = self._nbs[other]
if (self._nbs[other] == 0):
self._nbs.pop(other, None)
def label(self):
return self._label
class Edge:
"""
Edges have attributes <_tail> and <_head> which point to the end vertices
(Vertex objects). The order of these is arbitrary (undirected edges).
"""
def __init__(self, tail : "Vertex", head : "Vertex"):
"""
Creates an Edge between vertices <tail> and <head>.
"""
# tail and head must be Vertex objects.
if not tail._graph == head._graph:
raise GraphError(
'Can only add edges between vertices of the same Graph')
self._tail = tail
self._head = head
def __repr__(self):
return '(' + str(self._tail) + ',' + str(self._head) + ')'
def tail(self) -> Vertex:
return self._tail
def head(self) -> Vertex:
return self._head
def otherend(self, oneend: "Vertex") -> "Vertex":
"""
Given one end Vertex <oneend> of the Edge <self>, this returns
the other end Vertex of <self>.
"""
# <oneend> must be either the head or the tail of this Edge.
if self._tail == oneend:
return self._head
elif self._head == oneend:
return self._tail
raise GraphError(
'Edge.otherend(oneend): oneend must be head or tail of Edge')
def incident(self, vertex: "Vertex") -> bool:
"""
Returns True iff the Edge <self> is incident with the
Vertex <Vertex>.
"""
if self._tail == vertex or self._head == vertex:
return True
else:
return False
#TODO: Create unit tests to confirm correct behaviour
class Graph():
"""
A Graph object has as main attributes:
<_V>: the list of its vertices
<_E>: the list of its edges
In addition:
<_simple> is True iff the Graph must stay simple (used when trying to add edges)
<_directed> is False for now (feel free to write a directed variant of this
module)
<_nextlabel> is used to assign default labels to vertices.
"""
def __init__(self, n: int = 0, simple: bool = False):
"""
Creates a Graph.
Optional argument <n>: number of vertices.
Optional argument <simple>: indicates whether the Graph should stay simple.
"""
self._V = []
self._E = []
self._directed = False
# may be changed later for a more general version that can also
# handle directed graphs.
self._simple = simple
self._nextlabel = 0
for i in range(n):
self.addvertex()
def __repr__(self):
return 'V=' + str(self._V) + '\nE=' + str(self._E)
def V(self) -> list:
"""
Returns the list of vertices of the Graph.
"""
if unsafe: # but fast
return self._V
else:
return self._V[:] # return a *copy* of this list
def E(self) -> list:
"""
Returns the list of edges of the Graph.
"""
if unsafe: # but fast
return self._E
else:
return self._E[:] # return a *copy* of this list
def __getitem__(self, i) -> "Vertex":
"""
Returns the <i>th Vertex of the Graph -- as given in the Vertex list;
this is not related to the Vertex labels.
"""
return self._V[i]
def addvertex(self, label: int = -1) -> "Vertex":
"""
Add a Vertex to the Graph.
Optional argument: a Vertex label (arbitrary)
"""
if label == -1:
label = self._nextlabel
self._nextlabel += 1
u = Vertex(self, label)
self._V.append(u)
return u
def addedge(self, tail: "Vertex", head: "Vertex") -> "Edge":
"""
Add an Edge to the Graph between <tail> and <head>.
Includes some checks in case the Graph should stay simple.
"""
if self._simple:
if tail == head:
raise GraphError('No loops allowed in simple graphs')
for e in self._E:
if (e._tail == tail and e._head == head):
raise GraphError(
'No multiedges allowed in simple graphs')
if not self._directed:
if (e._tail == head and e._head == tail):
raise GraphError(
'No multiedges allowed in simple graphs')
if not (tail._graph == self and head._graph == self):
raise GraphError(
'Edges of a Graph G must be between vertices of G')
e = Edge(tail, head)
tail.internalAddEdge(e)
head.internalAddEdge(e)
self._E.append(e)
return e
def addedge_simple(self, tail:int,head:int)->"Edge":
return self.addedge(self.findvertex(tail), self.findvertex(head))
def findvertex(self, label : int) -> "Vertex":
for v in self.V():
if v.label() == label:
return v
def findedge(self, u: "Vertex", v: "Vertex") -> "Edge":
"""
If <u> and <v> are adjacent, this returns an Edge between them.
(Arbitrary in the case of multigraphs.)
Otherwise this returns <None>.
"""
for e in self._E:
if (e._tail == u and e._head == v) or (e._tail == v and e._head == u):
return e
return None
def adj(self, u: "Vertex", v: "Vertex") -> bool:
"""
Returns True iff vertices <u> and <v> are adjacent.
"""
if self.findedge(u, v) == None:
return False
else:
return True
def isdirected(self) -> bool:
"""
Returns False, because for now these graphs are always undirected.
"""
return self._directed
def deledge(self, e: "Edge"):
self._E.remove(e)
e._head.internalDelEdge(e)
e._tail.internalDelEdge(e)
def delvertex(self, v: "Vertex"):
for e in v.inclist():
self.deledge(e)
self._V.remove(v)
def complement(self) -> "Graph":
c = Graph()
newVerts = {}
for v in self.V():
newVerts[v] = c.addvertex(v.label())
for v, newV in newVerts.items():
for w, newW in newVerts.items():
if v == w: continue
if not w.adj(v):
c.addedge(newV, newW)
return c
def clone(self) -> "Graph":
c = Graph()
newVerts = {}
for v in self.V():
newVerts[v] = c.addvertex(v.label())
linkedVerts = []
for v, newV in newVerts.items():
for w, newW in newVerts.items():
if (v != w) and w.adj(v):
if c.isdirected() or (newW not in linkedVerts):
c.addedge(newV, newW)
linkedVerts.append(newV)
return c
| |
""" Test how signals behave on classes.
"""
from pytest import raises
from flexx.util.testing import run_tests_if_main
from flexx.react import HasSignals, input, connect, lazy, Signal, InputSignal
def test_signals_on_classes_are_descriptors():
shown = []
class Test(HasSignals):
@input
def title(v=''):
return str(v)
@connect('title')
def show_title1(v=''):
shown.append(v)
@connect('title')
def show_title2(self, v=''):
shown.append(v)
assert len(shown) == 0
assert Test.title.not_connected
assert Test.show_title1.not_connected
assert Test.show_title2.not_connected
raises(RuntimeError, Test.show_title1.connect)
t = Test()
assert len(shown) == 2
def test_hassignals_without_self():
title_lengths = []
class Test(HasSignals):
@input
def title(v=''):
return str(v)
@connect('title')
def title_len(v):
return len(v)
@connect('title_len')
def show_title(v):
title_lengths.append(v)
t = Test()
assert t.title.__self__ is t
assert set(t.__signals__) == set(['title', 'title_len', 'show_title'])
assert len(title_lengths) == 1
assert title_lengths[-1] == 0
t.title('foo')
assert len(title_lengths) == 2
assert title_lengths[-1] == 3
def test_hassignals_with_self():
title_lengths = []
class Test(HasSignals):
@input
def title(self, v=''):
return str(v)
@connect('title')
def title_len(self, v):
return len(v)
@connect('title_len')
def show_title(self, v):
title_lengths.append(v)
t = Test()
assert set(t.__signals__) == set(['title', 'title_len', 'show_title'])
assert len(title_lengths) == 1
assert title_lengths[-1] == 0
t.title('foo')
assert len(title_lengths) == 2
assert title_lengths[-1] == 3
def test_hassignals_init():
class Str(InputSignal):
def __init__(self, func=None, upstream=[], *args):
InputSignal.__init__(self, str, [], *args)
class Test(HasSignals):
name = Str
@input
def title(self, v='a'):
return str(v)
raises(ValueError, Test, foo=3)
t = Test()
assert t.title() == 'a'
assert t.name() == ''
t = Test(title='b', name='c')
assert t.title() == 'b'
assert t.name() == 'c'
def test_anyclass():
title_lengths = []
class Test(object):
@input
def title(self, v=''):
return str(v)
@connect('title')
def title_len(self, v):
return len(v)
@connect('title_len')
def show_title(self, v):
title_lengths.append(v)
t = Test()
assert not hasattr(t, '__signals__')
# No signals instances have been created yet
assert len(title_lengths) == 0
t.show_title.connect(False)
# Upstream signals do not yet exist
assert len(title_lengths) == 0
# Initialize the signals for real
t.title
t.title_len.connect()
t.show_title.connect()
assert len(title_lengths) == 1 # class signal does not fire, because it sees self arg
assert title_lengths[-1] == 0
t.title('foo')
assert len(title_lengths) == 2
assert title_lengths[-1] == 3
def test_connection_locals1():
class Test(HasSignals):
@connect('title')
def title_len(self, v):
return len(v)
t = Test()
assert t.title_len.not_connected
@input
def title(v=''):
return str(v)
t.connect_signals()
assert not t.title_len.not_connected
assert t.title_len() == 0
title('foo')
assert t.title_len() == 3
def test_connection_locals2():
class Test(HasSignals):
@connect('title')
def title_len(self, v):
return len(v)
def create_instance():
@input
def title(v=''):
return str(v)
return Test()
# The frame to look for "title" is relative to the clas def, not the instance
t = create_instance()
assert t.title_len.not_connected
def test_func_name():
# Do not allow weird names, though not recommended
# todo: now that we have a metaclass, we can allow it!
with raises(RuntimeError):
class Test(HasSignals):
s1 = Signal(lambda x: x, [])
with raises(RuntimeError):
class Test(HasSignals):
s2 = Signal(float, [])
def test_props():
from flexx.react.decorators import prop
title_lengths = []
class Test(HasSignals):
@prop
def title(self, v=''):
return str(v)
@connect('title')
def title_len(self, v):
return len(v)
@connect('title_len')
def show_title(self, v):
title_lengths.append(v)
t = Test()
assert set(t.__signals__) == set(['title', 'title_len', 'show_title'])
assert len(title_lengths) == 1
assert title_lengths[-1] == 0
t.title = 'foo'
assert len(title_lengths) == 2
assert title_lengths[-1] == 3
run_tests_if_main()
| |
# Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import contextlib
import mock
import mox
from oslo import messaging
from nova.api.ec2 import ec2utils
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import quota
from nova import rpc
from nova.scheduler import driver as scheduler_driver
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import cast_as_call
from nova.tests.compute import test_compute
from nova.tests import fake_instance
from nova.tests import fake_notifier
from nova.tests import fake_server_actions
from nova.tests.objects import test_migration
from nova import utils
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'])
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args)
self.assertEqual(result, 'foo')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_instance_info_cache_delete(self):
self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
db.instance_info_cache_delete(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_info_cache_delete(self.context,
{'uuid': 'fake-uuid'})
def test_vol_get_usage_by_time(self):
self.mox.StubOutWithMock(db, 'vol_get_usage_by_time')
db.vol_get_usage_by_time(self.context, 'fake-time').AndReturn(
'fake-usage')
self.mox.ReplayAll()
result = self.conductor.vol_get_usage_by_time(self.context,
'fake-time')
self.assertEqual(result, 'fake-usage')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst,
'fake-update-time', False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
{'fake': 'values'})
self.assertEqual(result, 'fake-result')
def test_compute_node_update_with_non_json_stats(self):
node = {'id': 'fake-id'}
fake_input = {'stats': {'a': 'b'}}
fake_vals = {'stats': jsonutils.dumps(fake_input['stats'])}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], fake_vals
).AndReturn('fake-result')
self.mox.ReplayAll()
self.conductor.compute_node_update(self.context, node,
fake_input)
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertIsNone(result)
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host')
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
def test_notify_usage_exists(self):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = {
'system_metadata': [],
}
self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
notifications.bandwidth_usage(instance, 'start', True).AndReturn(
'bw_usage')
notifier = self.conductor_manager.notifier
compute_utils.notify_about_instance_usage(notifier,
self.context, instance,
'exists',
system_metadata={},
extra_usage_info=info)
self.mox.ReplayAll()
self.conductor.notify_usage_exists(self.context, instance,
system_metadata={},
extra_usage_info=dict(extra='info'))
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_instance_info_cache_update(self):
fake_values = {'key1': 'val1', 'key2': 'val2'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_info_cache_update(self.context, 'fake-uuid',
fake_values)
self.mox.ReplayAll()
self.conductor.instance_info_cache_update(self.context,
fake_inst,
fake_values)
def test_migration_get(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
self.assertEqual(jsonutils.to_primitive(migration),
self.conductor.migration_get(self.context,
migration['id']))
def test_migration_get_unconfirmed_by_dest_compute(self):
self.mox.StubOutWithMock(db,
'migration_get_unconfirmed_by_dest_compute')
db.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
self.mox.ReplayAll()
self.conductor.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
def test_compute_confirm_resize(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'confirm_resize')
self.conductor_manager.compute_api.confirm_resize(
self.context, 'instance', migration='migration')
self.mox.ReplayAll()
self.conductor.compute_confirm_resize(self.context, 'instance',
'migration')
def test_migration_create(self):
inst = {'uuid': 'fake-uuid',
'host': 'fake-host',
'node': 'fake-node'}
self.mox.StubOutWithMock(db, 'migration_create')
db.migration_create(self.context.elevated(),
{'instance_uuid': inst['uuid'],
'source_compute': inst['host'],
'source_node': inst['node'],
'fake-key': 'fake-value'}).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.migration_create(self.context, inst,
{'fake-key': 'fake-value'})
self.assertEqual(result, 'result')
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id', 'device_name': 'foo'}
fake_bdm2 = {'id': 'fake-id', 'device_name': 'foo2'}
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context, fake_bdm2,
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context,
fake_bdm2,
create=False)
db.block_device_mapping_update_or_create(
self.context, fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context,
fake_bdm2,
create=None)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host')
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node')
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(messaging.ExpectedException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def test_compute_confirm_resize_with_objects(self):
# use an instance object rather than a dict
instance = self._create_fake_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance)
migration = test_migration.fake_db_migration()
mig_obj = migration_obj.Migration._from_db_object(
self.context.elevated(), migration_obj.Migration(),
migration)
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'confirm_resize')
self.conductor_manager.compute_api.confirm_resize(
self.context, inst_obj, migration=mig_obj)
self.mox.ReplayAll()
self.conductor.compute_confirm_resize(self.context, inst_obj,
mig_obj)
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj = TestObject()
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
tuple(), {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def test_aggregate_metadata_add(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
metadata = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
db.aggregate_metadata_add(
mox.IgnoreArg(), aggregate['id'], metadata, False).AndReturn(
metadata)
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_add(self.context,
aggregate,
metadata)
self.assertEqual(result, metadata)
def test_aggregate_metadata_delete(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
db.aggregate_metadata_delete(mox.IgnoreArg(), aggregate['id'], 'fake')
self.mox.ReplayAll()
self.conductor.aggregate_metadata_delete(self.context, aggregate,
'fake')
def test_security_group_get_by_instance(self):
fake_inst = {'uuid': 'fake-instance'}
self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
db.security_group_get_by_instance(
self.context, fake_inst['uuid']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_get_by_instance(self.context,
fake_inst)
self.assertEqual(result, 'it worked')
def test_security_group_rule_get_by_security_group(self):
fake_secgroup = {'id': 'fake-secgroup'}
self.mox.StubOutWithMock(db,
'security_group_rule_get_by_security_group')
db.security_group_rule_get_by_security_group(
self.context, fake_secgroup['id']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_rule_get_by_security_group(
self.context, fake_secgroup)
self.assertEqual(result, 'it worked')
def _test_expected_exceptions(self, db_method, conductor_method, errors,
*args, **kwargs):
# Tests that expected exceptions are handled properly.
for error in errors:
with mock.patch.object(db, db_method, side_effect=error):
self.assertRaises(messaging.ExpectedException,
conductor_method,
self.context, *args, **kwargs)
def test_action_event_start_expected_exceptions(self):
error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
self._test_expected_exceptions(
'action_event_start', self.conductor.action_event_start, [error],
{'foo': 'bar'})
def test_action_event_finish_expected_exceptions(self):
errors = (exc.InstanceActionNotFound(request_id='1',
instance_uuid='2'),
exc.InstanceActionEventNotFound(event='1', action_id='2'))
self._test_expected_exceptions(
'action_event_finish', self.conductor.action_event_finish,
errors, {'foo': 'bar'})
def test_instance_update_expected_exceptions(self):
errors = (exc.InvalidUUID(uuid='foo'),
exc.InstanceNotFound(instance_id=1),
exc.UnexpectedTaskStateError(expected='foo',
actual='bar'))
self._test_expected_exceptions(
'instance_update', self.conductor.instance_update,
errors, None, {'foo': 'bar'})
def test_instance_get_expected_exceptions(self):
error = exc.InstanceNotFound(instance_id=1)
self._test_expected_exceptions(
'instance_get', self.conductor.instance_get,
[error], None)
def test_instance_get_by_uuid_expected_exceptions(self):
error = exc.InstanceNotFound(instance_id=1)
self._test_expected_exceptions(
'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
[error], None)
def test_migration_get_expected_exceptions(self):
error = exc.MigrationNotFound(migration_id=1)
self._test_expected_exceptions(
'migration_get', self.conductor.migration_get,
[error], None)
def test_migration_update_expected_exceptions(self):
error = exc.MigrationNotFound(migration_id=1)
self._test_expected_exceptions(
'migration_update', self.conductor.migration_update,
[error], {'id': 1}, None)
def test_aggregate_host_add_expected_exceptions(self):
error = exc.AggregateHostExists(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_add', self.conductor.aggregate_host_add,
[error], {'id': 1}, None)
def test_aggregate_host_delete_expected_exceptions(self):
error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_delete', self.conductor.aggregate_host_delete,
[error], {'id': 1}, None)
def test_aggregate_get_expected_exceptions(self):
error = exc.AggregateNotFound(aggregate_id=1)
self._test_expected_exceptions(
'aggregate_get', self.conductor.aggregate_get,
[error], None)
def test_aggregate_metadata_delete_expected_exceptions(self):
error = exc.AggregateMetadataNotFound(aggregate_id=1,
metadata_key='foo')
self._test_expected_exceptions(
'aggregate_metadata_delete',
self.conductor.aggregate_metadata_delete,
[error], {'id': 1}, None)
def test_service_update_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_update',
self.conductor.service_update,
[error], {'id': 1}, None)
def test_service_destroy_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_destroy',
self.conductor.service_destroy,
[error], 1)
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertTrue(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertFalse(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_destroy(self.context,
{'uuid': 'fake-uuid'})
self.assertEqual(result, 'fake-result')
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=False)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def test_instance_get_all_by_filters_use_slave(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=True)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
use_slave=True)
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, fake_bdm)
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
db.block_device_mapping_update_or_create(self.context, fake_bdm)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, 'fake-bdm')
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host')
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise messaging.MessagingTimeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
live_migrate.execute(self.context,
mox.IsA(instance_obj.Instance),
'destination',
'block_migration',
'disk_over_commit')
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'live_migrate_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
def test_cold_migrate(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(
self.conductor_manager.compute_rpcapi, 'prep_resize')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'select_destinations')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst, [])
flavor = flavors.get_default_flavor()
flavor['extra_specs'] = 'extra_specs'
request_spec = {'instance_type': flavor}
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'image_ref', mox.IsA(instance_obj.Instance)).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(instance_obj.Instance)],
instance_type=flavor).AndReturn(request_spec)
hosts = [dict(host='host1', nodename=None, limits={})]
self.conductor_manager.scheduler_rpcapi.select_destinations(
self.context, request_spec, {}).AndReturn(hosts)
filter_properties = {'limits': {}}
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(instance_obj.Instance),
mox.IsA(dict), 'host1', [], request_spec=request_spec,
filter_properties=filter_properties, node=None)
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [])
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [])
def test_build_instances(self):
system_metadata = flavors.save_flavor_info({},
flavors.get_default_flavor())
instances = [fake_instance.fake_instance_obj(
self.context,
system_metadata=system_metadata,
expected_attrs=['system_metadata']) for i in xrange(2)]
instance_type = flavors.extract_flavor(instances[0])
instance_type['extra_specs'] = 'fake-specs'
instance_properties = jsonutils.to_primitive(instances[0])
self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
db.flavor_extra_specs_get(
self.context,
instance_type['flavorid']).AndReturn('fake-specs')
self.conductor_manager.scheduler_rpcapi.select_destinations(
self.context, {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': jsonutils.to_primitive(
instances[0]),
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2}, {}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.instance_get_by_uuid(self.context, instances[0].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[0]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
filter_properties={'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.instance_get_by_uuid(self.context, instances[1].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[1]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
filter_properties={'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs'}
exception = exc.NoValidHost(reason='fake-reason')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'select_destinations')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
self.conductor_manager.scheduler_rpcapi.select_destinations(
self.context, spec, {}).AndRaise(exception)
for instance in instances:
scheduler_driver.handle_schedule_error(self.context, exception,
instance.uuid, spec)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_unshelve_instance_on_host(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_offloaded_instance_glance_image_not_found(self):
shelved_image_id = "image_not_found"
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(
self.context,
db_instance['uuid'],
expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_service, 'show')
e = exc.ImageNotFound(image_id=shelved_image_id)
self.conductor_manager.image_service.show(
self.context, shelved_image_id).AndRaise(e)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
system_metadata['shelved_image_id'] = shelved_image_id
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ERROR)
def test_unshelve_instance_schedule_and_rebuild(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_service, 'show')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_service.show(self.context,
'fake_image_id').AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image='fake_image',
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
raise exc.NoValidHost(reason='')
with contextlib.nested(
mock.patch.object(self.conductor_manager.image_service, 'show',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances)
) as (_get_image, _schedule_instances):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'])])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_service, 'show')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_service.show(self.context,
'fake_image_id').AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image=None,
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, "dummy", None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
def test_migrate_server_deals_with_expected_exceptions(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.DestinationHypervisorTooOld()
live_migrate.execute(self.context, mox.IsA(instance_obj.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.DestinationHypervisorTooOld,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
live_migrate.execute(self.context, mox.IsA(instance_obj.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InvalidCPUInfo,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_instancenotrunning_exception(self,
mock_live_migrate, mock_set_state):
inst = fake_instance.fake_db_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst, [])
error = exc.InstanceNotRunning(instance_id="fake")
mock_live_migrate.side_effect = error
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InstanceNotRunning,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
request_spec = self._build_request_spec(inst_obj)
mock_set_state.assert_called_once_with(self.context, 'compute_task',
'migrate_server',
dict(vm_state=inst_obj.vm_state,
task_state=None,
expected_task_state=task_states.MIGRATING),
error, request_spec, self.conductor_manager.db)
def test_migrate_server_deals_with_unexpected_exceptions(self):
instance = fake_instance.fake_db_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = IOError()
live_migrate.execute(self.context, mox.IsA(instance_obj.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.MigrationError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 'method', 'updates', 'ex', 'request_spec')
def test_cold_migrate_no_valid_host_back_in_active_state(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref')
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()))
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'fake-image_ref', mox.IsA(instance_obj.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_rpcapi.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.conductor._cold_migrate(self.context, inst_obj,
'flavor', filter_props, [resvs])
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()))
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'fake-image_ref', mox.IsA(instance_obj.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_rpcapi.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.conductor._cold_migrate(self.context, inst_obj,
'flavor', filter_props, [resvs])
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()))
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
hosts = [dict(host='host1', nodename=None, limits={})]
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(scheduler_utils,
'populate_filter_properties')
self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
'prep_resize')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'fake-image_ref', mox.IsA(instance_obj.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
self.conductor.scheduler_rpcapi.select_destinations(
self.context, request_spec, filter_props).AndReturn(hosts)
scheduler_utils.populate_filter_properties(filter_props,
hosts[0])
# context popped
expected_filter_props = dict()
# extra_specs popped
expected_request_spec = dict(instance_type=dict())
exc_info = test.TestingException('something happened')
self.conductor.compute_rpcapi.prep_resize(
self.context, image, inst_obj,
'flavor', hosts[0]['host'], [resvs],
request_spec=expected_request_spec,
filter_properties=expected_filter_props,
node=hosts[0]['nodename']).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
expected_request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
filter_props, [resvs])
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs'}
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
self.conductor_manager.scheduler_rpcapi.select_destinations(
self.context, spec, {}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].refresh().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].refresh()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
class ConductorV2ManagerProxyTestCase(test.NoDBTestCase):
def test_v2_manager_proxy(self):
manager = conductor_manager.ConductorManager()
proxy = conductor_manager._ConductorManagerV2Proxy(manager)
ctxt = context.get_admin_context()
methods = [
# (method, number_of_args)
('instance_update', 3),
('instance_get_by_uuid', 2),
('migration_get_in_progress_by_host_and_node', 2),
('aggregate_host_add', 2),
('aggregate_host_delete', 2),
('aggregate_metadata_get_by_host', 2),
('bw_usage_update', 9),
('provider_fw_rule_get_all', 0),
('agent_build_get_by_triple', 3),
('block_device_mapping_update_or_create', 2),
('block_device_mapping_get_all_by_instance', 2),
('instance_get_all_by_filters', 5),
('instance_get_active_by_window_joined', 4),
('instance_destroy', 1),
('instance_info_cache_delete', 1),
('vol_get_usage_by_time', 1),
('vol_usage_update', 8),
('service_get_all_by', 3),
('instance_get_all_by_host', 3),
('instance_fault_create', 1),
('action_event_start', 1),
('action_event_finish', 1),
('service_create', 1),
('service_destroy', 1),
('compute_node_create', 1),
('compute_node_update', 2),
('compute_node_delete', 1),
('service_update', 2),
('task_log_get', 5),
('task_log_begin_task', 6),
('task_log_end_task', 6),
('notify_usage_exists', 5),
('security_groups_trigger_handler', 2),
('security_groups_trigger_members_refresh', 1),
('network_migrate_instance_start', 2),
('network_migrate_instance_finish', 2),
('quota_commit', 3),
('quota_rollback', 3),
('get_ec2_ids', 1),
('compute_unrescue', 1),
('object_class_action', 5),
('object_action', 4),
('object_backport', 2),
]
for method, num_args in methods:
args = []
for _i in xrange(num_args):
args.append(None)
with mock.patch.object(manager, method) as mock_method:
getattr(proxy, method)(ctxt, *args)
mock_method.assert_called_once_with(mock.ANY, *args)
| |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
"""
This script imports Raw Triangle File format files to Blender.
The raw triangle format is very simple; it has no verts or faces lists.
It's just a simple ascii text file with the vertices of each triangle
listed on each line. In addition, a line with 12 values will be
imported as a quad. This may be in conflict with some other
applications, which use a raw format, but this is how it was
implemented back in blender 2.42.
Usage:
Execute this script from the "File->Import" menu and choose a Raw file to
open.
Notes:
Generates the standard verts and faces lists, but without duplicate
verts. Only *exact* duplicates are removed, there is no way to specify a
tolerance.
"""
import bpy
def getNextLine(fs):
"""
Return the next non-blank non-comment line.
Returns None if EOF.
"""
while True:
ln = fs.readline().decode('ascii', 'ignore').strip()
if ln is None: # EOF
return None
print(ln)
if ln and not ln.startswith('#'):
return ln
def readVertices(fs):
"""
Read a chunk of vertices.
Returns a list of (x,y,z) float tuples
"""
nVerts = int(getNextLine(fs))
verts = []
for i in range(nVerts):
ln = getNextLine(fs)
x, y, z = tuple(float(a) for a in ln.split())
verts.append((x, z, -y))
return verts
def readFaces(fs):
"""
Read a chunk of faces.
Returns a list of (a,b,c) int tuples,
where a,b,c are vertex indices.
"""
nFaces = int(getNextLine(fs))
faces = []
for i in range(nFaces):
ln = getNextLine(fs)
faces.append(tuple(int(a) for a in ln.split()))
return faces
def _readDeform(fs, defaultName):
count = int(getNextLine(fs))
for i in range(count):
ln = getNextLine(fs).split()
v = int(ln[0])
x = float(ln[1])
y = float(ln[2])
z = float(ln[3])
def _readDeformations(fs, defaultName):
count = int(getNextLine(fs))
if count > 0:
dv = []
for i in range(count):
dv.append(_readDeform(fs, defaultName))
return dv
def readDynamicDeformations(fs):
"""
Read a chunk of dynamic deformations (ie. action units).
Returns a list of deformation objects.
"""
_dynamicDeformations = _readDeformations(fs, "Action Unit")
#_dynamicParams = [0 for x in range(len(_dynamicDeformations))]
return _dynamicDeformations
def readStaticDeformations(fs):
"""
Read a chunk of static deformations (ie. shape units).
Returns a list of deformation objects.
"""
_staticDeformations = _readDeformations(fs, "Shape Unit")
#_staticParams = [0 for x in range(len(_staticParams))]
return _staticDeformations
def readParams(fs):
"""
Read a chunk of parameter values for dynamic/shape deformations.
Returns a list of (int idx, float value) tuples,
where idx is the index of the deformation, and value is the amount to deform by (-1 to +1).
NOTE: NOT USED IN BLENDER IMPORT.
"""
nParamsInFile = int(getNextLine(fs))
dv = [0 for x in range(nParamsInFile)]
for i in range(nParamsInFile):
ln = getNextLine(fs).split()
paramNo = int(ln[0])
paramVal = float(ln[1])
dv[paramNo] = paramVal
return dv
def readTexCoords(fs):
"""
Read a chunk of texture co-ordinates.
Returns a list of (u,v) float tuples, and the filename. (uvcoords, filename)
"""
nCoordsInFile = int(getNextLine(fs))
if nCoordsInFile > 0:
texCoords = []
for i in range(nCoordsInFile):
ln = getNextLine(fs)
uv = tuple(float(a) for a in ln.split())
texCoords.append(uv)
texFilename = getNextLine(fs)
return (texCoords, texFilename)
def readGlobal(fs):
rotation = [float(a) for a in getNextLine(fs).split()]
scale = [float(a) for a in getNextLine(fs).split()]
translation = [float(a) for a in getNextLine(fs).split()]
return (rotation, scale, translation)
def read(filepath):
#convert the filename to an object name
objName = bpy.path.display_name_from_filepath(filepath)
print("Parsing WFM file")
with open(filepath, "rb") as fs:
print("\t... mesh data")
verts = readVertices(fs)
faces = readFaces(fs)
print("\t... deform parameters")
dynamicDeformations = readDynamicDeformations(fs)
staticDeformations = readStaticDeformations(fs)
dynamicParams = readParams(fs)
staticParams = readParams(fs)
print("\t... texture coords")
texCoords, texFilename = readTexCoords(fs)
print("\t... affine transform")
rotation, scale, translation = readGlobal(fs)
# Create mesh
print("Creating mesh")
mesh = bpy.data.meshes.new(objName)
mesh.from_pydata(verts, [], faces)
# Create scene object
print("Creating object")
scn = bpy.context.scene
for o in scn.objects:
o.select = False
mesh.update()
mesh.validate()
nobj = bpy.data.objects.new(objName, mesh)
#nobj.rotation_euler = rot
scn.objects.link(nobj)
nobj.select = True
if scn.objects.active is None or scn.objects.active.mode == 'OBJECT':
scn.objects.active = nobj
# # Generate verts and faces lists, without duplicates
# verts = []
# coords = {}
# index_tot = 0
# faces_indices = []
#
# for f in faces:
# fi = []
# for i, v in enumerate(f):
# index = coords.get(v)
#
# if index is None:
# index = coords[v] = index_tot
# index_tot += 1
# verts.append(v)
#
# fi.append(index)
#
# faces_indices.append(fi)
#
# mesh = bpy.data.meshes.new(objName)
# mesh.from_pydata(verts, [], faces_indices)
#
# return mesh
| |
"""
===============================================
Base code for running Kalman filter
===============================================
This module implements a Kalman filter that is slightly different fromt the
standard one, following West and Harrison (1999). This Kalman filter accepts
one-dimension discounting factor to adaptively learn the innovation matrix
itself, instead of accepting it from the user. (Although such option is still
provided)
"""
# This code take care of the Kalman filter
import numpy as np
import pydlm.base.tools as tl
# Define the class of Kalman filter which offers a forward filter
# backward smoother and backward sampler for one-step move
class kalmanFilter:
""" The kalmanFilter class the provide the basic functionalities
Attributes:
discount: the discounting factor determining how much information to carry on
updateInnovation: indicate whether the innovation matrix should be updated.
default to True.
Methods:
predict: predict one step ahead of the current state
forwardFilter: one step filter on the model given a new observation
backwardSmoother: one step backward smooth given the future model and the
filtered state and systematic covariance
backwardSampler: similar to backwardSmoother, using sampling instead of
deterministic equations.
updateDiscount: for updating the discount factors
"""
def __init__(self, discount=[0.99], \
updateInnovation='whole',
index=None):
""" Initializing the kalmanFilter class
Args:
discount: the discounting factor, could be a vector
updateInnovation: the indicator for whether updating innovation matrix
"""
self.__checkDiscount__(discount)
self.discount = np.matrix(np.diag(1 / np.sqrt(np.array(discount))))
self.updateInnovation = updateInnovation
self.index = index
def predict(self, model, dealWithMissingEvaluation = False):
""" Predict the next states of the model by one step
Args:
model: the @baseModel class provided all necessary information
dealWithMissingValue: indicate whether we need to treat the missing value.
it will be turned off when used in forwardFilter as
missing cases have already been address by forwardFilter
Returns:
The predicted result is stored in 'model.prediction'
"""
# check whether evaluation has missing data, if so, we need to take care of it
if dealWithMissingEvaluation:
loc = self._modifyTransitionAccordingToMissingValue(model)
# if the step number == 0, we use result from the model state
if model.prediction.step == 0:
model.prediction.state = np.dot(model.transition, model.state)
model.prediction.obs = np.dot(model.evaluation, model.prediction.state)
model.prediction.sysVar = np.dot(np.dot(model.transition, model.sysVar),
model.transition.T)
# update the innovation
if self.updateInnovation == 'whole':
self.__updateInnovation__(model)
elif self.updateInnovation == 'component':
self.__updateInnovation2__(model)
# add the innovation to the system variance
model.prediction.sysVar += model.innovation
model.prediction.obsVar = np.dot(np.dot(model.evaluation, \
model.prediction.sysVar), \
model.evaluation.T) + model.noiseVar
model.prediction.step = 1
# otherwise, we use previous result to predict next time stamp
else:
model.prediction.state = np.dot(model.transition, model.prediction.state)
model.prediction.obs = np.dot(model.evaluation, model.prediction.state)
model.prediction.sysVar = np.dot(np.dot(model.transition, \
model.prediction.sysVar),\
model.transition.T)
model.prediction.obsVar = np.dot(np.dot(model.evaluation, \
model.prediction.sysVar), \
model.evaluation.T) + model.noiseVar
model.prediction.step += 1
# recover the evaluation and the transition matrix
if dealWithMissingEvaluation:
self._recoverTransitionAndEvaluation(model, loc)
def forwardFilter(self, model, y, dealWithMissingEvaluation = False):
""" The forwardFilter used to run one step filtering given new data
Args:
model: the @baseModel provided the basic information
y: the newly observed data
Returns:
The filtered result is stored in the 'model' replacing the old states
"""
# check whether evaluation has missing data, if so, we need to take care of it
if dealWithMissingEvaluation:
loc = self._modifyTransitionAccordingToMissingValue(model)
# since we have delt with the missing value, we don't need to double treat it.
self.predict(model, dealWithMissingEvaluation=False)
# when y is not a missing data
if y is not None:
# first obtain the predicted status
# we make the prediction step equal to 0 to ensure the prediction
# is based on the model state and innovation is updated correctlly
# model.prediction.step = 0
model.prediction.step = 0
# the prediction error and the correction matrix
err = y - model.prediction.obs
correction = np.dot(model.prediction.sysVar, model.evaluation.T) \
/ model.prediction.obsVar
# update new states
model.df += 1
lastNoiseVar = model.noiseVar # for updating model.sysVar
model.noiseVar = model.noiseVar * \
(1.0 - 1.0 / model.df + \
err * err / model.df / model.prediction.obsVar)
model.state = model.prediction.state + correction * err
model.sysVar = model.noiseVar[0, 0] / lastNoiseVar[0, 0] * \
(model.prediction.sysVar - np.dot(correction, correction.T) * \
model.prediction.obsVar[0, 0])
model.obs = np.dot(model.evaluation, model.state)
model.obsVar = np.dot(np.dot(model.evaluation, model.sysVar), \
model.evaluation.T) + model.noiseVar
# update the innovation using discount
# model.innovation = model.sysVar * (1 / self.discount - 1)
# when y is missing, then we update the status by the predicted results
else:
# we do not update the model.predict.step because we need to take of the case
# [5, None, None, None]. In such case, we do not add more innovation, because
# no information is comming in.
# This is correct because
# 1. for the first 'None', the step starts from 0 because '5' appears before
# 2. for the second 'None', the step starts from 1, but the prediction.state
# is correct, because now model.state = model.prediciton.state
# 3. The last 'None' follows the same
model.state = model.prediction.state
model.sysVar = model.prediction.sysVar
model.obs = model.prediction.obs
model.obsVar = model.prediction.obsVar
# recover the evaluation and the transition matrix
if dealWithMissingEvaluation:
self._recoverTransitionAndEvaluation(model, loc)
# The backward smoother for a given unsmoothed states at time t
# what model should store:
# model.state: the last smoothed states (t + 1)
# model.sysVar: the last smoothed system variance (t + 1)
# model.transition: the transition at time t + 1
# model.evaluation: the evaluation vector at time t
# model.prediction.sysVar: the predicted system variance for time t + 1
# model.prediction.state: the predicted state for time t + 1
# rawState: the unsmoothed state at time t
# rawSysVar: the unsmoothed system variance at time t
def backwardSmoother(self, model, rawState, rawSysVar):
""" The backwardSmoother for one step backward smoothing
Args:
model: the @baseModel used for backward smoothing, the model shall store
the following information
model.state: the last smoothed states (t + 1)
model.sysVar: the last smoothed system variance (t + 1)
model.transition: the transition at time t + 1
model.evaluation: the evaluation vector at time t
model.prediction.sysVar: the predicted system variance for time t + 1
model.prediction.state: the predicted state for time t + 1
rawState: the unsmoothed state at time t
rawSysVar: the unsmoothed system variance at time t
rawState: the filtered state at the current time stamp
rawSysVar: the filtered systematic covariance at the current time stamp
Returns:
The smoothed results are stored in the 'model' replacing the filtered result.
"""
# check whether evaluation has missing data, if so, we need to take care of it
# if dealWithMissingEvaluation:
# loc = self._modifyTransitionAccordingToMissingValue(model)
#### use generalized inverse to ensure the computation stability #######
predSysVarInv = self._gInverse(model.prediction.sysVar)
################################################
backward = np.dot(np.dot(rawSysVar, model.transition.T), predSysVarInv)
model.state = rawState + np.dot(backward, (model.state - model.prediction.state))
model.sysVar = rawSysVar + \
np.dot(np.dot(backward, \
(model.sysVar - model.prediction.sysVar)), backward.T)
model.obs = np.dot(model.evaluation, model.state)
model.obsVar = np.dot(np.dot(model.evaluation, model.sysVar), \
model.evaluation.T) + model.noiseVar
# recover the evaluation and the transition matrix
#if dealWithMissingEvaluation:
# self._recoverTransitionAndEvaluation(model, loc)
def backwardSampler(self, model, rawState, rawSysVar):
""" The backwardSampler for one step backward sampling
Args:
model: the @baseModel used for backward sampling, the model shall store
the following information
model.state: the last smoothed states (t + 1)
model.sysVar: the last smoothed system variance (t + 1)
model.transition: the transition at time t + 1
model.evaluation: the evaluation vector at time t
model.prediction.sysVar: the predicted system variance for time t + 1
model.prediction.state: the predicted state for time t + 1
rawState: the unsmoothed state at time t
rawSysVar: the unsmoothed system variance at time t
rawState: the filtered state at the current time stamp
rawSysVar: the filtered systematic covariance at the current time stamp
Returns:
The sampled results are stored in the 'model' replacing the filtered result.
"""
#### use generalized inverse to ensure the computation stability #######
predSysVarInv = self._gInverse(model.prediction.sysVar)
################################################
backward = np.dot(np.dot(rawSysVar, model.transition.T), predSysVarInv)
model.state = rawState + np.dot(backward, (model.state - model.prediction.state))
model.sysVar = rawSysVar + \
np.dot(np.dot(backward, \
(model.sysVar - model.prediction.sysVar)), backward.T)
model.state = np.matrix(np.random.multivariate_normal(model.state.A1, \
model.sysVar)).T
model.obs = np.dot(model.evaluation, model.state)
model.obsVar = np.dot(np.dot(model.evaluation, model.sysVar), \
model.evaluation.T) + model.noiseVar
model.obs = np.matrix(np.random.multivariate_normal(model.obs.A1, \
model.obsVar)).T
# for updating the discounting factor
def updateDiscount(self, newDiscount):
""" For updating the discounting factor
Args:
newDiscount: the new discount factor
"""
self.__checkDiscount__(newDiscount)
self.discount = np.matrix(np.diag(1 / np.sqrt(newDiscount)))
def __checkDiscount__(self, discount):
""" Check whether the discount fact is within (0, 1)
"""
for i in range(len(discount)):
if discount[i] < 0 or discount[i] > 1:
raise tl.matrixErrors('discount factor must be between 0 and 1')
# update the innovation
def __updateInnovation__(self, model):
""" update the innovation matrix of the model
"""
model.innovation = np.dot(np.dot(self.discount, model.prediction.sysVar), \
self.discount) - model.prediction.sysVar
# update the innovation
def __updateInnovation2__(self, model):
""" update the innovation matrix of the model, but only for component
indepdently. (i.e., only add innovation to block diagonals, not on off
block diagonals)
"""
innovation = np.dot(np.dot(self.discount, model.prediction.sysVar), \
self.discount) - model.prediction.sysVar
model.innovation = np.matrix(np.zeros(innovation.shape))
for name in self.index:
indx = self.index[name]
model.innovation[indx[0]: (indx[1] + 1), indx[0]: (indx[1] + 1)] = \
innovation[indx[0]: (indx[1] + 1), indx[0]: (indx[1] + 1)]
# a generalized inverse of matrix A
def _gInverse(self, A):
""" A generalized inverse of matrix A
"""
return np.linalg.pinv(A)
def _modifyTransitionAccordingToMissingValue(self, model):
""" When evaluation contains None value, we modify the corresponding entries
in the transition to deal with the missing value
"""
loc = []
for i in range(model.evaluation.shape[1]):
if model.evaluation[0, i] is None:
loc.append(i)
model.transition[i, i] = 0.0
model.evaluation[0, i] = 0.0
return loc
def _recoverTransitionAndEvaluation(self, model, loc):
""" We recover the transition and evaluation use the results from
_modifyTransitionAccordingToMissingValue
"""
for i in loc:
model.evaluation[0, i] = None
model.transition[i, i] = 1.0
| |
from __future__ import print_function
import os.path
import sys
import re
import warnings
from django.db import connection, models
from django.db.backends.util import truncate_name
from django.core.management.color import no_style
from django.db.models.fields import NOT_PROVIDED
from django.db.utils import DatabaseError
from south.utils.py3 import string_types, text_type
#from OpenEdge.pyodbc import operations
from south.db import generic
class DatabaseOperations(generic.DatabaseOperations):
"""
OpenEdge implementation of database operations.
"""
backend_name = 'OpenEdge'
alter_string_set_type = 'ALTER COLUMN %(column)s TYPE %(type)s'
alter_string_set_null = 'ALTER COLUMN %(column)s DROP NOT NULL'
alter_string_drop_null = 'ALTER COLUMN %(column)s SET NOT NULL'
delete_check_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s'
add_column_string = 'ALTER TABLE %s ADD COLUMN %s;'
delete_unique_sql = "ALTER TABLE %s DROP CONSTRAINT %s"
delete_foreign_key_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s'
create_table_sql = 'CREATE TABLE %(table)s (%(columns)s)'
max_index_name_length = 32
drop_index_string = 'DROP INDEX %(index_name)s'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s CASCADE;'
create_primary_key_string = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s PRIMARY KEY (%(columns)s)"
delete_primary_key_sql = "ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s"
add_check_constraint_fragment = "ADD CONSTRAINT %(constraint)s CHECK (%(check)s)"
rename_table_sql = "ALTER TABLE %s RENAME TO %s;"
default_schema_name = "public"
# Features
allows_combined_alters = True
supports_foreign_keys = True
has_check_constraints = True
has_booleans = True
raises_default_errors = True
def column_sql(self, table_name, field_name, field, tablespace='', with_name=True, field_prepared=False):
"""
Creates the SQL snippet for a column. Used by add_column and add_table.
"""
# If the field hasn't already been told its attribute name, do so.
if not field_prepared:
field.set_attributes_from_name(field_name)
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Possible hook to fiddle with the fields (e.g. defaults & TEXT on MySQL)
field = self._field_sanity(field)
try:
sql = field.db_type(connection=self._get_connection())
except TypeError:
sql = field.db_type()
if sql:
# Some callers, like the sqlite stuff, just want the extended type.
if with_name:
field_output = [self.quote_name(field.column), sql]
else:
field_output = [sql]
field_output.append('%s' % (not field.null and 'NOT NULL ' or ''))
if field.primary_key:
field_output.append('PRIMARY KEY')
elif field.unique:
# Just use UNIQUE (no indexes any more, we have delete_unique)
field_output.append('UNIQUE')
tablespace = field.db_tablespace or tablespace
if tablespace and getattr(self._get_connection().features, "supports_tablespaces", False) and field.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self._get_connection().ops.tablespace_sql(tablespace, inline=True))
sql = ' '.join(field_output)
sqlparams = ()
# if the field is "NOT NULL" and a default value is provided, create the column with it
# this allows the addition of a NOT NULL field to a table with existing rows
if not getattr(field, '_suppress_default', False):
if field.has_default():
default = field.get_default()
# If the default is actually None, don't add a default term
if default is not None:
# If the default is a callable, then call it!
if callable(default):
default = default()
default = field.get_db_prep_save(default, connection=self._get_connection())
default = self._default_value_workaround(default)
# Now do some very cheap quoting. TODO: Redesign return values to avoid this.
if isinstance(default, string_types):
default = "'%s'" % default.replace("'", "''")
# Escape any % signs in the output (bug #317)
if isinstance(default, string_types):
default = default.replace("%", "%%")
# Add it in
sql += " DEFAULT %s"
sqlparams = (default)
elif (not field.null and field.blank) or (field.get_default() == ''):
if field.empty_strings_allowed and self._get_connection().features.interprets_empty_strings_as_nulls:
sql += " DEFAULT ''"
# Error here would be nice, but doesn't seem to play fair.
#else:
# raise ValueError("Attempting to add a non null column that isn't character based without an explicit default value.")
if field.rel and self.supports_foreign_keys:
self.add_deferred_sql(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
# Things like the contrib.gis module fields have this in 1.1 and below
if hasattr(field, 'post_create_sql'):
for stmt in field.post_create_sql(no_style(), table_name):
self.add_deferred_sql(stmt)
# In 1.2 and above, you have to ask the DatabaseCreation stuff for it.
# This also creates normal indexes in 1.1.
if hasattr(self._get_connection().creation, "sql_indexes_for_field"):
# Make a fake model to pass in, with only db_table
model = self.mock_model("FakeModelForGISCreation", table_name)
for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()):
self.add_deferred_sql(stmt)
if sql:
return sql % sqlparams
else:
return None
@generic.invalidate_table_constraints
def create_unique(self, table_name, columns):
"""
Creates a UNIQUE index on the columns on the given table.
"""
if not isinstance(columns, (list, tuple)):
columns = [columns]
name = self.create_index_name(table_name, columns, suffix="_uniq")
cols = ", ".join(map(self.quote_name, columns))
self.execute('CREATE UNIQUE INDEX %s ON "%s" (%s)'%(name,table_name,cols))
return name
def _createSequence(self,table,column):
"""
Use django.db.backends.creation.BaseDatabaseCreation._digest
to create index name in Django style. An evil hack :(
"""
return self._get_connection().ops.autoinc_sql(table, column)
@generic.invalidate_table_constraints
def create_table(self, table_name, fields):
"""
Creates the table 'table_name'. 'fields' is a tuple of fields,
each repsented by a 2-part tuple of field name and a
django.db.models.fields.Field object
"""
if len(table_name) > 63:
print(" ! WARNING: You have a table name longer than 63 characters; this will not fully work on PostgreSQL or MySQL.")
# avoid default values in CREATE TABLE statements (#925)
for field_name, field in fields:
field._suppress_default = True
columns = [
self.column_sql(table_name, field_name, field)
for field_name, field in fields
]
self.execute(self.create_table_sql % {
"table": self.quote_name(table_name),
"columns": ', '.join([col for col in columns if col]),
})
self.execute(self._createSequence(table_name,'id')[0])
@generic.invalidate_table_constraints
def add_column(self, table_name, name, field, keep_default=True):
"""
Adds the column 'name' to the table 'table_name'.
Uses the 'field' paramater, a django.db.models.fields.Field instance,
to generate the necessary sql
@param table_name: The name of the table to add the column to
@param name: The name of the column to add
@param field: The field to use
"""
sql = self.column_sql(table_name, name, field)
if sql:
params = (
self.quote_name(table_name),
sql,
)
sql = self.add_column_string % params
self.execute(sql)
@generic.invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
"""
Deletes the table 'table_name'.
"""
params = (self.quote_name(table_name), )
self.execute('DROP TABLE %s;' % params)
# Drop associated sequence
self.execute('DROP SEQUENCE PUB.%s_%s'%('ID',table_name[:self.max_index_name_length-3]))
| |
from django.contrib.auth.models import User
from django.test import TestCase
from core import models
class ModelVariable(TestCase):
def setUp(self):
models.Variable.objects.create(
name='qwer',
value='asdf',
)
def test_str(self):
variable = models.Variable.objects.get(name='qwer')
self.assertEqual(str(variable), 'qwer: asdf')
class ModelHostGroup(TestCase):
def setUp(self):
models.HostGroup.objects.create(
name='qwer',
)
def test_str(self):
host_group = models.HostGroup.objects.get(name='qwer')
self.assertEqual(str(host_group), 'qwer')
class ModelHost(TestCase):
def setUp(self):
models.Host.objects.create(
name='qwer',
address='192.168.12.20',
)
models.Host.objects.create(
address='192.168.44.74'
)
def test_str(self):
host_with_address = models.Host.objects.get(name='qwer')
host_without_address = models.Host.objects.get(address='192.168.44.74')
self.assertEqual(str(host_with_address), 'qwer (192.168.12.20)')
self.assertEqual(str(host_without_address), '192.168.44.74')
def test_get_vars(self):
var = models.Variable.objects.create(
name='test',
value='value',
)
group = models.HostGroup.objects.create(
name='test group'
)
group.vars.add(var)
group.save()
host = models.Host.objects.get(id=1)
host.groups.add(group)
host.save()
self.assertEqual(list(host.get_vars()), [models.Variable.objects.get(id=1)])
class ModelAnsibleUser(TestCase):
def setUp(self):
models.AnsibleUser.objects.create(
name='qwer'
)
def test_str(self):
ansible_user = models.AnsibleUser.objects.get(name='qwer')
self.assertEqual(str(ansible_user), 'qwer')
class ModelTaskTemplate(TestCase):
def setUp(self):
models.TaskTemplate.objects.create(
name='qwer',
playbook='/home/',
)
def test_str(self):
task_template = models.TaskTemplate.objects.get(name='qwer')
self.assertEqual(str(task_template), 'qwer')
def test_create_task(self):
task_template = models.TaskTemplate.objects.get(name='qwer')
self.user = User.objects.create(
username='Serega',
password='passwd'
)
answer = task_template.create_task(self.user)
self.assertEqual(models.Task.objects.all().count(), 1)
self.assertEqual(models.Task.objects.get(template=task_template).playbook, '/home/')
self.assertEqual(models.Task.objects.get(template=task_template).user, self.user)
self.assertEqual(models.Task.objects.get(template=task_template).ansible_user, task_template.ansible_user)
self.assertEqual(answer, models.Task.objects.get(template=task_template))
self.assertEqual(models.TaskLog.objects.all().count(), 1)
self.assertEqual(models.TaskLog.objects.get(message='Task created by user Serega').status, 'wait')
def test_uncompleted_task_false(self):
self.assertFalse(models.TaskTemplate.objects.get(id=1).have_uncompleted_task())
def test_uncompleted_task_true(self):
models.AnsibleUser.objects.create(
name='Test'
)
models.Task.objects.create(
playbook='/home/',
template=models.TaskTemplate.objects.get(id=1),
ansible_user=models.AnsibleUser.objects.get(id=1),
status='wait'
)
self.assertTrue(models.TaskTemplate.objects.get(id=1).have_uncompleted_task())
def test_get_hosts_without_group(self):
host_without_group = models.Host.objects.create(
name='test',
address='192.168.19.19'
)
group = models.HostGroup.objects.create(
name='group test'
)
host_with_group = models.Host.objects.create(
name='test test',
address='192.168.19.20',
)
host_with_group.groups.add(group)
host_with_group.save()
template = models.TaskTemplate.objects.get(id=1)
template.hosts.add(host_with_group, host_without_group)
template.host_groups.add(group)
template.save()
self.assertEqual(len(template.get_hosts_without_groups()), 1)
class ModelTask(TestCase):
def setUp(self):
ansible_user = models.AnsibleUser.objects.create(
name='Serega'
)
self.user = User.objects.create(
username='Serega',
password='passwd'
)
task_template = models.TaskTemplate.objects.create(
name='qwer',
playbook='/home/',
)
models.Task.objects.create(
playbook='/home/',
template=task_template,
user=self.user,
ansible_user=ansible_user
)
models.Task.objects.create(
playbook='/otherhome/image',
user=self.user
)
def test_str(self):
task_with_template_name = models.Task.objects.get(playbook='/home/')
task_without_template_name = models.Task.objects.get(playbook='/otherhome/image')
self.assertEqual(str(task_with_template_name), '#%s qwer' % task_with_template_name.id)
self.assertEqual(str(task_without_template_name),
'#%s image' % task_without_template_name.id)
def test_get_duration_date(self):
models.TaskLog.objects.create(
task=models.Task.objects.get(playbook='/home/'),
status='fail',
)
self.assertEqual(models.Task.objects.get(playbook='/home/').get_duration(),
models.Task.objects.get(playbook='/home/').logs.last().dc -
models.Task.objects.get(playbook='/home/').dc)
def test_get_duration_none(self):
models.TaskLog.objects.create(
task=models.Task.objects.get(playbook='/home/'),
status='test',
)
self.assertEqual(models.Task.objects.get(playbook='/home/').get_duration(), None)
def test_ansible_command(self):
task = models.Task.objects.get(playbook='/home/')
self.assertEqual(type(task.get_ansible_command()), str)
class ModelTaskLog(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
task = models.Task.objects.create(
playbook='/home/image',
user=self.user,
)
models.TaskLog.objects.create(
task=task,
status='fail',
)
def test_str(self):
task_log = models.TaskLog.objects.get(id=1)
self.assertEqual(str(task_log), '#%s image' % task_log.id)
| |
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.db.models.query import QuerySet
from django.test import TestCase
from guardian.shortcuts import get_perms_for_model
from guardian.core import ObjectPermissionChecker
from guardian.compat import get_model_name
from guardian.compat import get_user_model
from guardian.compat import get_user_permission_full_codename
from guardian.shortcuts import assign
from guardian.shortcuts import assign_perm
from guardian.shortcuts import remove_perm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_objects_for_user
from guardian.shortcuts import get_objects_for_group
from guardian.exceptions import MixedContentTypeError
from guardian.exceptions import NotUserNorGroup
from guardian.exceptions import WrongAppError
from guardian.testapp.tests.core_test import ObjectPermissionTestCase
from guardian.models import Group, Permission
import warnings
User = get_user_model()
user_app_label = User._meta.app_label
user_model_name = get_model_name(User)
class ShortcutsTests(ObjectPermissionTestCase):
def test_get_perms_for_model(self):
self.assertEqual(get_perms_for_model(self.user).count(), 3)
self.assertTrue(list(get_perms_for_model(self.user)) ==
list(get_perms_for_model(User)))
self.assertEqual(get_perms_for_model(Permission).count(), 3)
model_str = 'contenttypes.ContentType'
self.assertEqual(
sorted(get_perms_for_model(model_str).values_list()),
sorted(get_perms_for_model(ContentType).values_list()))
obj = ContentType()
self.assertEqual(
sorted(get_perms_for_model(model_str).values_list()),
sorted(get_perms_for_model(obj).values_list()))
class AssignPermTest(ObjectPermissionTestCase):
"""
Tests permission assigning for user/group and object.
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, assign_perm,
perm="change_object",
user_or_group="Not a Model",
obj=self.ctype)
def test_global_wrong_perm(self):
self.assertRaises(ValueError, assign_perm,
perm="change_site", # for global permissions must provide app_label
user_or_group=self.user)
def test_user_assign_perm(self):
assign_perm("change_contenttype", self.user, self.ctype)
assign_perm("change_contenttype", self.group, self.ctype)
self.assertTrue(self.user.has_perm("change_contenttype", self.ctype))
def test_group_assign_perm(self):
assign_perm("change_contenttype", self.group, self.ctype)
assign_perm("delete_contenttype", self.group, self.ctype)
check = ObjectPermissionChecker(self.group)
self.assertTrue(check.has_perm("change_contenttype", self.ctype))
self.assertTrue(check.has_perm("delete_contenttype", self.ctype))
def test_user_assign_perm_global(self):
perm = assign_perm("contenttypes.change_contenttype", self.user)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype"))
self.assertTrue(isinstance(perm, Permission))
def test_group_assign_perm_global(self):
perm = assign_perm("contenttypes.change_contenttype", self.group)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype"))
self.assertTrue(isinstance(perm, Permission))
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
assign("contenttypes.change_contenttype", self.group)
self.assertEqual(len(warns), 1)
self.assertTrue(isinstance(warns[0].message, DeprecationWarning))
class RemovePermTest(ObjectPermissionTestCase):
"""
Tests object permissions removal.
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, remove_perm,
perm="change_object",
user_or_group="Not a Model",
obj=self.ctype)
def test_global_wrong_perm(self):
self.assertRaises(ValueError, remove_perm,
perm="change_site", # for global permissions must provide app_label
user_or_group=self.user)
def test_user_remove_perm(self):
# assign perm first
assign_perm("change_contenttype", self.user, self.ctype)
remove_perm("change_contenttype", self.user, self.ctype)
self.assertFalse(self.user.has_perm("change_contenttype", self.ctype))
def test_group_remove_perm(self):
# assign perm first
assign_perm("change_contenttype", self.group, self.ctype)
remove_perm("change_contenttype", self.group, self.ctype)
check = ObjectPermissionChecker(self.group)
self.assertFalse(check.has_perm("change_contenttype", self.ctype))
def test_user_remove_perm_global(self):
# assign perm first
perm = "contenttypes.change_contenttype"
assign_perm(perm, self.user)
remove_perm(perm, self.user)
self.assertFalse(self.user.has_perm(perm))
def test_group_remove_perm_global(self):
# assign perm first
perm = "contenttypes.change_contenttype"
assign_perm(perm, self.group)
remove_perm(perm, self.group)
app_label, codename = perm.split('.')
perm_obj = Permission.objects.get(codename=codename,
content_type__app_label=app_label)
self.assertFalse(perm_obj in self.group.permissions.all())
class GetPermsTest(ObjectPermissionTestCase):
"""
Tests get_perms function (already done at core tests but left here as a
placeholder).
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, get_perms,
user_or_group=None,
obj=self.ctype)
def test_user(self):
perms_to_assign = ("change_contenttype",)
for perm in perms_to_assign:
assign_perm("change_contenttype", self.user, self.ctype)
perms = get_perms(self.user, self.ctype)
for perm in perms_to_assign:
self.assertTrue(perm in perms)
class GetUsersWithPermsTest(TestCase):
"""
Tests get_users_with_perms function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_empty(self):
result = get_users_with_perms(self.obj1)
self.assertTrue(isinstance(result, QuerySet))
self.assertEqual(list(result), [])
result = get_users_with_perms(self.obj1, attach_perms=True)
self.assertTrue(isinstance(result, dict))
self.assertFalse(bool(result))
def test_simple(self):
assign_perm("change_contenttype", self.user1, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user3, self.obj2)
result = get_users_with_perms(self.obj1)
result_vals = result.values_list('username', flat=True)
self.assertEqual(
set(result_vals),
set([user.username for user in (self.user1, self.user2)]),
)
def test_users_groups_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
result = get_users_with_perms(self.obj1).values_list('id',
flat=True)
self.assertEqual(
set(result),
set([u.id for u in (self.user1, self.user2)])
)
def test_users_groups_after_removal(self):
self.test_users_groups_perms()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1).values_list('id',
flat=True)
self.assertEqual(
set(result),
set([self.user2.id]),
)
def test_attach_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.user3, self.obj2)
# Check contenttype1
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {
self.user1: ["change_contenttype"],
self.user2: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
# Check contenttype2
result = get_users_with_perms(self.obj2, attach_perms=True)
expected = {
self.user3: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
def test_attach_groups_only_has_perms(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {self.user1: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_mixed(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj2)
assign_perm("change_contenttype", self.user3, self.obj2)
assign_perm("change_%s" % user_model_name, self.user3, self.user1)
result = get_users_with_perms(self.obj1)
self.assertEqual(
set(result),
set([self.user1, self.user2]),
)
def test_with_superusers(self):
admin = User.objects.create(username='admin', is_superuser=True)
assign_perm("change_contenttype", self.user1, self.obj1)
result = get_users_with_perms(self.obj1, with_superusers=True)
self.assertEqual(
set(result),
set([self.user1, admin]),
)
def test_without_group_users(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False)
expected = set([self.user2])
self.assertEqual(set(result), expected)
def test_without_group_users_but_perms_attached(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False,
attach_perms=True)
expected = {self.user2: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_without_group_users_no_result(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True,
with_group_users=False)
expected = {}
self.assertEqual(result, expected)
def test_without_group_users_no_result_but_with_superusers(self):
admin = User.objects.create(username='admin', is_superuser=True)
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False,
with_superusers=True)
expected = [admin]
self.assertEqual(set(result), set(expected))
class GetGroupsWithPerms(TestCase):
"""
Tests get_groups_with_perms function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_empty(self):
result = get_groups_with_perms(self.obj1)
self.assertTrue(isinstance(result, QuerySet))
self.assertFalse(bool(result))
result = get_groups_with_perms(self.obj1, attach_perms=True)
self.assertTrue(isinstance(result, dict))
self.assertFalse(bool(result))
def test_simple(self):
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], self.group1)
def test_simple_after_removal(self):
self.test_simple()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(len(result), 0)
def test_simple_attach_perms(self):
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
expected = {self.group1: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_simple_attach_perms_after_removal(self):
self.test_simple_attach_perms()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
self.assertEqual(len(result), 0)
def test_mixed(self):
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group1, self.obj2)
assign_perm("change_%s" % user_model_name, self.group1, self.user3)
assign_perm("change_contenttype", self.group2, self.obj2)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("change_%s" % user_model_name, self.group3, self.user1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(set(result), set([self.group1, self.group2]))
def test_mixed_attach_perms(self):
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group1, self.obj2)
assign_perm("change_group", self.group1, self.group3)
assign_perm("change_contenttype", self.group2, self.obj2)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("change_group", self.group3, self.group1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
expected = {
self.group1: ["change_contenttype"],
self.group2: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
class GetObjectsForUser(TestCase):
def setUp(self):
self.user = User.objects.create(username='joe')
self.group = Group.objects.create(name='group')
self.ctype = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
def test_superuser(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
self.assertEqual(set(ctypes), set(objects))
def test_with_superuser_true(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes, with_superuser=True)
self.assertEqual(set(ctypes), set(objects))
def test_with_superuser_false(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
assign_perm('change_contenttype', self.user, obj1)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes, with_superuser=False)
self.assertEqual(set([obj1]), set(objects))
def test_anonymous(self):
self.user = AnonymousUser()
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
assign_perm('change_contenttype', self.user, obj1)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
self.assertEqual(set([obj1]), set(objects))
def test_mixed_perms(self):
codenames = [
get_user_permission_full_codename('change'),
'auth.change_permission',
]
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, codenames)
def test_perms_with_mixed_apps(self):
codenames = [
get_user_permission_full_codename('change'),
'contenttypes.change_contenttype',
]
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, codenames)
def test_mixed_perms_and_klass(self):
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, ['auth.change_group'], User)
def test_no_app_label_nor_klass(self):
self.assertRaises(WrongAppError, get_objects_for_user, self.user,
['change_group'])
def test_empty_perms_sequence(self):
self.assertEqual(
set(get_objects_for_user(self.user, [], Group.objects.all())),
set()
)
def test_perms_single(self):
perm = 'auth.change_group'
assign_perm(perm, self.user, self.group)
self.assertEqual(
set(get_objects_for_user(self.user, perm)),
set(get_objects_for_user(self.user, [perm])))
def test_klass_as_model(self):
assign_perm('contenttypes.change_contenttype', self.user, self.ctype)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ContentType)
self.assertEqual([obj.name for obj in objects], [self.ctype.name])
def test_klass_as_manager(self):
assign_perm('auth.change_group', self.user, self.group)
objects = get_objects_for_user(self.user, ['auth.change_group'],
Group.objects)
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_klass_as_queryset(self):
assign_perm('auth.change_group', self.user, self.group)
objects = get_objects_for_user(self.user, ['auth.change_group'],
Group.objects.all())
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_ensure_returns_queryset(self):
objects = get_objects_for_user(self.user, ['auth.change_group'])
self.assertTrue(isinstance(objects, QuerySet))
def test_simple(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm('change_group', self.user, group)
objects = get_objects_for_user(self.user, ['auth.change_group'])
self.assertEqual(len(objects), len(groups))
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects),
set(groups))
def test_multiple_perms_to_check(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm('auth.change_group', self.user, group)
assign_perm('auth.delete_group', self.user, groups[1])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'])
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[1].name]))
def test_multiple_perms_to_check_no_groups(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm('auth.change_group', self.user, group)
assign_perm('auth.delete_group', self.user, groups[1])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'], use_groups=False)
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[1].name]))
def test_any_of_multiple_perms_to_check(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
assign_perm('auth.change_group', self.user, groups[0])
assign_perm('auth.delete_group', self.user, groups[2])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'], any_perm=True)
self.assertEqual(len(objects), 2)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[0].name, groups[2].name]))
def test_groups_perms(self):
group1 = Group.objects.create(name='group1')
group2 = Group.objects.create(name='group2')
group3 = Group.objects.create(name='group3')
groups = [group1, group2, group3]
for group in groups:
self.user.groups.add(group)
# Objects to operate on
ctypes = list(ContentType.objects.all().order_by('id'))
assign_perm('change_contenttype', self.user, ctypes[0])
assign_perm('change_contenttype', self.user, ctypes[1])
assign_perm('delete_contenttype', self.user, ctypes[1])
assign_perm('delete_contenttype', self.user, ctypes[2])
assign_perm('change_contenttype', groups[0], ctypes[3])
assign_perm('change_contenttype', groups[1], ctypes[3])
assign_perm('change_contenttype', groups[2], ctypes[4])
assign_perm('delete_contenttype', groups[0], ctypes[0])
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set(ctypes[i].id for i in [0, 1, 3, 4]))
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype',
'contenttypes.delete_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set(ctypes[i].id for i in [0, 1]))
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set(ctypes[i].id for i in [0, 1, 3, 4]))
class GetObjectsForGroup(TestCase):
"""
Tests get_objects_for_group function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.obj3 = ContentType.objects.create(name='ct3', model='baz',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_mixed_perms(self):
codenames = [
get_user_permission_full_codename('change'),
'auth.change_permission',
]
self.assertRaises(MixedContentTypeError, get_objects_for_group,
self.group1, codenames)
def test_perms_with_mixed_apps(self):
codenames = [
get_user_permission_full_codename('change'),
'contenttypes.contenttypes.change_contenttype',
]
self.assertRaises(MixedContentTypeError, get_objects_for_group,
self.group1, codenames)
def test_mixed_perms_and_klass(self):
self.assertRaises(MixedContentTypeError, get_objects_for_group,
self.group1, ['auth.change_group'], User)
def test_no_app_label_nor_klass(self):
self.assertRaises(WrongAppError, get_objects_for_group, self.group1,
['change_contenttype'])
def test_empty_perms_sequence(self):
self.assertEqual(
set(get_objects_for_group(self.group1, [], ContentType)),
set()
)
def test_perms_single(self):
perm = 'contenttypes.change_contenttype'
assign_perm(perm, self.group1, self.obj1)
self.assertEqual(
set(get_objects_for_group(self.group1, perm)),
set(get_objects_for_group(self.group1, [perm]))
)
def test_klass_as_model(self):
assign_perm('contenttypes.change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1,
['contenttypes.change_contenttype'], ContentType)
self.assertEqual([obj.name for obj in objects], [self.obj1.name])
def test_klass_as_manager(self):
assign_perm('contenttypes.change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1, ['change_contenttype'],
ContentType.objects)
self.assertEqual(list(objects), [self.obj1])
def test_klass_as_queryset(self):
assign_perm('contenttypes.change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1, ['change_contenttype'],
ContentType.objects.all())
self.assertEqual(list(objects), [self.obj1])
def test_ensure_returns_queryset(self):
objects = get_objects_for_group(self.group1, ['contenttypes.change_contenttype'])
self.assertTrue(isinstance(objects, QuerySet))
def test_simple(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('change_contenttype', self.group1, self.obj2)
objects = get_objects_for_group(self.group1, 'contenttypes.change_contenttype')
self.assertEqual(len(objects), 2)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects),
set([self.obj1, self.obj2]))
def test_simple_after_removal(self):
self.test_simple()
remove_perm('change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1, 'contenttypes.change_contenttype')
self.assertEqual(len(objects), 1)
self.assertEqual(objects[0], self.obj2)
def test_multiple_perms_to_check(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('delete_contenttype', self.group1, self.obj1)
assign_perm('change_contenttype', self.group1, self.obj2)
objects = get_objects_for_group(self.group1, [
'contenttypes.change_contenttype',
'contenttypes.delete_contenttype'])
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(objects[0], self.obj1)
def test_any_of_multiple_perms_to_check(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('delete_contenttype', self.group1, self.obj1)
assign_perm('add_contenttype', self.group1, self.obj2)
assign_perm('delete_contenttype', self.group1, self.obj3)
objects = get_objects_for_group(self.group1,
['contenttypes.change_contenttype',
'contenttypes.delete_contenttype'], any_perm=True)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual([obj for obj in objects.order_by('name')],
[self.obj1, self.obj3])
def test_results_for_different_groups_are_correct(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('delete_contenttype', self.group2, self.obj2)
self.assertEqual(set(get_objects_for_group(self.group1, 'contenttypes.change_contenttype')),
set([self.obj1]))
self.assertEqual(set(get_objects_for_group(self.group2, 'contenttypes.change_contenttype')),
set())
self.assertEqual(set(get_objects_for_group(self.group2, 'contenttypes.delete_contenttype')),
set([self.obj2]))
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model constructor for Tensorflow state-level models."""
from typing import Dict, List
import numpy as np
import tensorflow as tf
from covid_epidemiology.src import constants
from covid_epidemiology.src.models import generic_seir_model_constructor
from covid_epidemiology.src.models import losses
from covid_epidemiology.src.models.shared import model_utils
class StateModelConstructor(generic_seir_model_constructor.ModelConstructor):
"""Constructs a state Tensorflow model, to be used in tf_seir."""
def __init__(self, model_spec, random_seed=0):
super(StateModelConstructor, self).__init__(model_spec, random_seed)
self.num_states = 17
def extract_prediction(self, all_states):
"""Extract the death and confirmed predictions."""
confirmed_all = list()
death_all = list()
for curr_state in all_states:
# pylint: disable=unused-variable
(exposed_t, infected_d_t, infected_ud_t, recovered_d_t, recovered_ud_t,
hospitalized_t, hospitalized_cumulative_t, hospitalized_increase_t,
icu_t, ventilator_t, death_t, population_t, reinfectable_d_t,
reinfectable_ud_t, reinfectable_vaccine_t, vaccine_immuned_t,
infected_ud_increase_t) = tf.unstack(curr_state)
# Include ICU and Ventilator since they are separate compartments.
confirmed_t = (
infected_d_t + recovered_d_t + hospitalized_t + icu_t + ventilator_t +
death_t + reinfectable_d_t)
confirmed_all.append(confirmed_t)
death_all.append(death_t)
return {"confirmed": confirmed_all, "death": death_all}
def compute_coef(self,
ground_truth_timeseries,
ground_truth_state,
num_train_steps,
num_known_steps,
power=2.0):
"""Compute train/valid coefficients for loss computation.
Args:
ground_truth_timeseries: ground truth compartments
ground_truth_state: ground truth state level compartments
num_train_steps: number of timesteps for training
num_known_steps: number of known timesteps
power: 2 for MSE and 1 for MAE
Returns:
train_coefs: training coeffcients for each compartment
valid_coefs: valid coeffcients for each compartment
"""
(_, gt_list, gt_indicator, _, _) = ground_truth_timeseries
# Recovered
recovered_train, recovered_valid = model_utils.compartment_base(
gt_list["recovered"], gt_indicator["recovered"], num_train_steps,
num_known_steps)
# Death
death_train, death_valid = model_utils.compartment_base(
gt_list["death"], gt_indicator["death"], num_train_steps,
num_known_steps)
# Confirmed
confirmed_train, confirmed_valid = model_utils.compartment_base(
gt_list["confirmed"], gt_indicator["confirmed"], num_train_steps,
num_known_steps)
# Hospitalized
hospitalized_train, hospitalized_valid = model_utils.compartment_base(
gt_list["hospitalized"], gt_indicator["hospitalized"], num_train_steps,
num_known_steps)
# Hospitalized cumulative
hospitalized_cumulative_train, hospitalized_cumulative_valid = model_utils.compartment_base(
gt_list["hospitalized_cumulative"],
gt_indicator["hospitalized_cumulative"], num_train_steps,
num_known_steps)
# ICU
icu_train, icu_valid = model_utils.compartment_base(gt_list["icu"],
gt_indicator["icu"],
num_train_steps,
num_known_steps)
# Ventilator
ventilator_train, ventilator_valid = model_utils.compartment_base(
gt_list["ventilator"], gt_indicator["ventilator"], num_train_steps,
num_known_steps)
train_coefs = [
0, (death_train / recovered_train)**power, 1,
(death_train / confirmed_train)**power,
(death_train / hospitalized_train)**power,
(death_train / hospitalized_cumulative_train)**power,
(death_train / icu_train)**power,
(death_train / ventilator_train)**power
]
valid_coefs = [
0, (death_valid / recovered_valid)**power, 1,
(death_valid / confirmed_valid)**power,
(death_valid / hospitalized_valid)**power,
(death_valid / hospitalized_cumulative_valid)**power,
(death_valid / icu_valid)**power,
(death_valid / ventilator_valid)**power
]
train_coefs = np.nan_to_num(train_coefs).tolist()
valid_coefs = np.nan_to_num(valid_coefs).tolist()
return train_coefs, valid_coefs
def seir_dynamics(self, current_state, seir_variables):
"""Model dynamics."""
(first_dose_vaccine_ratio_per_day, second_dose_vaccine_ratio_per_day,
average_contact_id, average_contact_iud, reinfectable_rate, alpha,
diagnosis_rate, recovery_rate_id, recovery_rate_iud, recovery_rate_h,
recovery_rate_i, recovery_rate_v, hospitalization_rate, icu_rate,
ventilator_rate, death_rate_id, death_rate_h, death_rate_i,
death_rate_v) = seir_variables
# pylint: disable=unused-variable
(exposed_t, infected_d_t, infected_ud_t, recovered_d_t, recovered_ud_t,
hospitalized_t, hospitalized_cumulative_t, hospitalized_increase_t, icu_t,
ventilator_t, death_t, population_t, reinfectable_d_t, reinfectable_ud_t,
reinfectable_vaccine_t, vaccine_immuned_t,
infected_ud_increase_t) = tf.unstack(current_state)
# Setting the susceptible so that the population adds up to a constant.
normalized_susceptible_t = 1.0 - (
exposed_t + infected_d_t + infected_ud_t + recovered_d_t +
recovered_ud_t + hospitalized_t + icu_t + ventilator_t + death_t +
vaccine_immuned_t) / population_t
normalized_susceptible_t = tf.nn.relu(normalized_susceptible_t)
# Differential change on vaccine immuned.
d_vaccine_immuned_dt = (
first_dose_vaccine_ratio_per_day * population_t +
second_dose_vaccine_ratio_per_day * population_t -
reinfectable_vaccine_t - vaccine_immuned_t)
# Differential change on reinfectable after vaccination.
d_reinfectable_vaccine_dt = vaccine_immuned_t * 1.0 / constants.VACCINE_IMMUNITY_DURATION
# Differential change on exposed
d_exposed_dt = (average_contact_id * infected_d_t +
average_contact_iud * infected_ud_t
) * normalized_susceptible_t - alpha * exposed_t
# Differential change on infected, documented and undocumented
d_infected_d_dt = (
diagnosis_rate * infected_ud_t - recovery_rate_id * infected_d_t -
death_rate_id * infected_d_t - hospitalization_rate * infected_d_t)
d_infected_ud_dt = (
alpha * exposed_t - diagnosis_rate * infected_ud_t -
recovery_rate_iud * infected_ud_t)
d_infected_ud_increase_dt = alpha * exposed_t - infected_ud_increase_t
# Differential change on recovered, documented and undocumented
d_recovered_d_dt = (
recovery_rate_id * infected_d_t + recovery_rate_h * hospitalized_t -
reinfectable_rate * recovered_d_t)
d_recovered_ud_dt = (
recovery_rate_iud * infected_ud_t - reinfectable_rate * recovered_ud_t)
# Differential change on hospitalized
d_hospitalized_d_dt = (
hospitalization_rate * infected_d_t -
(death_rate_h + recovery_rate_h + icu_rate) * hospitalized_t +
recovery_rate_i * icu_t)
d_hospitalized_cumulative_d_dt = (hospitalization_rate * infected_d_t)
d_hospitalized_increase_d_dt = (
hospitalization_rate * infected_d_t - hospitalized_increase_t)
# Differential change on icu
d_icu_d_dt = (
icu_rate * hospitalized_t -
(death_rate_i + recovery_rate_i + ventilator_rate) * icu_t +
recovery_rate_v * ventilator_t)
# Differential change on ventilator
d_ventilator_d_dt = (
ventilator_rate * icu_t -
(death_rate_v + recovery_rate_v) * ventilator_t)
# Differential change on death, documented
d_death_d_dt = (
death_rate_id * infected_d_t + death_rate_h * hospitalized_t +
death_rate_i * icu_t + death_rate_v * ventilator_t)
# Differential change on recovered, who may get the disease again.
d_reinfectable_d_dt = reinfectable_rate * recovered_d_t
d_reinfectable_ud_dt = reinfectable_rate * recovered_ud_t
all_state_derivatives = [
d_exposed_dt, d_infected_d_dt, d_infected_ud_dt, d_recovered_d_dt,
d_recovered_ud_dt, d_hospitalized_d_dt, d_hospitalized_cumulative_d_dt,
d_hospitalized_increase_d_dt, d_icu_d_dt, d_ventilator_d_dt,
d_death_d_dt, -d_death_d_dt, d_reinfectable_d_dt, d_reinfectable_ud_dt,
d_reinfectable_vaccine_dt, d_vaccine_immuned_dt,
d_infected_ud_increase_dt
]
return tf.stack(all_state_derivatives)
def compute_losses(self,
hparams,
train_coefs,
valid_coefs,
propagated_states,
ground_truth_timeseries,
r_eff,
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
num_forecast_steps,
quantiles=None):
train_loss_coefs = hparams["train_loss_coefs"]
valid_loss_coefs = hparams["valid_loss_coefs"]
time_scale_weight = hparams["time_scale_weight"]
width_coef_train = hparams["width_coef_train"]
width_coef_valid = hparams["width_coef_valid"]
quantile_cum_viol_coef = hparams["quantile_cum_viol_coef"]
increment_loss_weight = hparams["increment_loss_weight"]
train_crps_weight = hparams["train_crps_weight"]
valid_crps_weight = hparams["valid_crps_weight"]
(_, gt_list, gt_indicator, _, _) = ground_truth_timeseries
unstacked_propagated_states = tf.unstack(propagated_states, axis=1)
pred_infected = unstacked_propagated_states[1]
pred_recovered = unstacked_propagated_states[3]
pred_hospitalized = unstacked_propagated_states[5]
pred_hospitalized_cumulative = unstacked_propagated_states[6]
pred_icu = unstacked_propagated_states[8]
pred_ventilator = unstacked_propagated_states[9]
pred_death = unstacked_propagated_states[10]
pred_reinfected = unstacked_propagated_states[12]
pred_confirmed = (
pred_infected + pred_recovered + pred_death + pred_hospitalized +
pred_icu + pred_ventilator + pred_reinfected)
train_start_index = tf.identity(train_start_index)
train_end_index = tf.identity(train_end_index)
valid_start_index = tf.identity(valid_start_index)
valid_end_index = tf.identity(valid_end_index)
if quantiles is not None:
quantiles = tf.constant(quantiles, dtype=tf.float32)
# Use quantile loss if the value of quantiles are given
def loss(pred_states,
gt_list,
gt_indicator,
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=0,
is_training=True):
if quantiles is not None:
if is_training:
train_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_train)
valid_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_train)
else:
train_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_valid)
valid_loss = losses.weighted_interval_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight,
width_coef=width_coef_valid)
train_loss += train_crps_weight * losses.crps_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight)
valid_loss += valid_crps_weight * losses.crps_loss(
quantile_pred_states=pred_states,
tau_list=quantiles,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight)
else:
train_loss = losses.state_estimation_loss(
pred_states=pred_states,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=train_start_index,
end_timestep=train_end_index,
time_scale_weight=time_scale_weight,
increment_loss_weight=increment_loss_weight,
num_forecast_steps=num_forecast_steps)
valid_loss = losses.state_estimation_loss(
pred_states=pred_states,
gt_list=gt_list,
gt_indicator=gt_indicator,
begin_timestep=valid_start_index,
end_timestep=valid_end_index,
time_scale_weight=time_scale_weight,
increment_loss_weight=increment_loss_weight,
num_forecast_steps=num_forecast_steps)
return train_loss, valid_loss
infected_doc_train_loss, infected_doc_valid_loss = loss(
pred_infected,
gt_list["infected"],
gt_indicator["infected"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
recovered_doc_train_loss, recovered_doc_valid_loss = loss(
pred_recovered + pred_reinfected,
gt_list["recovered"],
gt_indicator["recovered"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
death_train_loss, death_valid_loss = loss(
pred_death,
gt_list["death"],
gt_indicator["death"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
hospitalized_train_loss, hospitalized_valid_loss = loss(
pred_hospitalized + pred_icu + pred_ventilator,
gt_list["hospitalized"],
gt_indicator["hospitalized"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
hospitalized_cumulative_train_loss, hospitalized_cumulative_valid_loss = loss(
pred_hospitalized_cumulative,
gt_list["hospitalized_cumulative"],
gt_indicator["hospitalized_cumulative"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
icu_train_loss, icu_valid_loss = loss(
pred_icu + pred_ventilator,
gt_list["icu"],
gt_indicator["icu"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
ventilator_train_loss, ventilator_valid_loss = loss(
pred_ventilator,
gt_list["ventilator"],
gt_indicator["ventilator"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
confirmed_train_loss, confirmed_valid_loss = loss(
pred_confirmed,
gt_list["confirmed"],
gt_indicator["confirmed"],
train_start_index,
train_end_index,
valid_start_index,
valid_end_index,
time_scale_weight=time_scale_weight)
train_loss_overall = (
train_coefs[0] * train_loss_coefs[0] * infected_doc_train_loss +
train_coefs[1] * train_loss_coefs[1] * recovered_doc_train_loss +
train_coefs[2] * train_loss_coefs[2] * death_train_loss +
train_coefs[3] * train_loss_coefs[3] * confirmed_train_loss +
train_coefs[4] * train_loss_coefs[4] * hospitalized_train_loss +
train_coefs[5] *
(train_loss_coefs[5] * hospitalized_cumulative_train_loss) +
train_coefs[6] * train_loss_coefs[6] * icu_train_loss +
train_coefs[7] * train_loss_coefs[7] * ventilator_train_loss)
valid_loss_overall = (
valid_coefs[0] * valid_loss_coefs[0] * infected_doc_valid_loss +
valid_coefs[1] * valid_loss_coefs[1] * recovered_doc_valid_loss +
valid_coefs[2] * valid_loss_coefs[2] * death_valid_loss +
valid_coefs[3] * valid_loss_coefs[3] * confirmed_valid_loss +
valid_coefs[4] * valid_loss_coefs[4] * hospitalized_valid_loss +
valid_coefs[5] *
(valid_loss_coefs[5] * hospitalized_cumulative_valid_loss) +
valid_coefs[6] * valid_loss_coefs[6] * icu_valid_loss +
valid_coefs[7] * valid_loss_coefs[7] * ventilator_valid_loss)
# Loss for r_eff. Penalize r_eff>5
if quantiles is None:
if r_eff is not None:
train_loss_overall += (
hparams["r_eff_penalty_coef"] * tf.math.reduce_mean(
tf.math.softplus(r_eff - hparams["r_eff_penalty_cutoff"])))
# Calculate accelration
train_loss_overall += (
hparams["acceleration_death_coef"] *
self.acceleration_loss(pred_death, 3))
train_loss_overall += (
hparams["acceleration_confirm_coef"] *
self.acceleration_loss(pred_confirmed, 3))
train_loss_overall += (
hparams["acceleration_hospital_coef"] *
self.acceleration_loss(pred_hospitalized, 3))
else:
# Quantile cumulative violation penalty
forecasting_horizon = valid_end_index - valid_start_index
train_violation_confirmed = losses.quantile_viol_loss(
forecasting_horizon, train_end_index, forecasting_horizon,
gt_indicator["confirmed"], gt_list["confirmed"], pred_confirmed)
train_violation_death = losses.quantile_viol_loss(
forecasting_horizon, train_end_index, forecasting_horizon,
gt_indicator["death"], gt_list["death"], pred_death)
train_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
train_violation_confirmed)
train_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
train_violation_death)
valid_violation_confirmed = losses.quantile_viol_loss(
valid_start_index, valid_end_index, forecasting_horizon,
gt_indicator["confirmed"], gt_list["confirmed"], pred_confirmed)
valid_violation_death = losses.quantile_viol_loss(
valid_start_index, valid_end_index, forecasting_horizon,
gt_indicator["death"], gt_list["death"], pred_death)
valid_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
valid_violation_confirmed)
valid_loss_overall += quantile_cum_viol_coef * tf.reduce_mean(
valid_violation_death)
return train_loss_overall, valid_loss_overall
def unpack_states(self,
chosen_location_list,
ground_truth_timeseries,
propagated_states,
propagated_variables,
num_forecast_steps,
quantile_regression=False):
# Assign in the desired dictionary form.
susceptible_f_all_locations = {}
exposed_f_all_locations = {}
infected_d_f_all_locations = {}
infected_ud_f_all_locations = {}
recovered_d_f_all_locations = {}
recovered_ud_f_all_locations = {}
death_d_f_all_locations = {}
death_horizon_ahead_d_f_all_locations = {}
confirmed_f_all_locations = {}
confirmed_horizon_ahead_d_f_all_locations = {}
hospitalized_f_all_locations = {}
hospitalized_increase_f_all_locations = {}
hospitalized_cumulative_f_all_locations = {}
icu_f_all_locations = {}
ventilator_f_all_locations = {}
reinfectable_d_f_all_locations = {}
reinfectable_ud_f_all_locations = {}
population_f_all_locations = {}
reinfectable_vaccine_f_all_locations = {}
vaccine_immuned_t_f_all_locations = {}
infected_ud_increase_f_all_locations = {}
for location_index, location in enumerate(chosen_location_list):
exposed_f_all_locations[
location] = propagated_states[:, 0, location_index].numpy()
infected_d_f_all_locations[
location] = propagated_states[:, 1, location_index].numpy()
infected_ud_f_all_locations[
location] = propagated_states[:, 2, location_index].numpy()
recovered_d_f_all_locations[location] = (
propagated_states[:, 3, location_index].numpy())
recovered_ud_f_all_locations[location] = (
propagated_states[:, 4, location_index].numpy())
hospitalized_f_all_locations[location] = (
propagated_states[:, 5, location_index].numpy() +
propagated_states[:, 8, location_index].numpy() +
propagated_states[:, 9, location_index].numpy())
hospitalized_increase_f_all_locations[
location] = propagated_states[:, 7, location_index].numpy()
hospitalized_cumulative_f_all_locations[
location] = propagated_states[:, 6, location_index].numpy()
icu_f_all_locations[location] = (
propagated_states[:, 8, location_index].numpy() +
propagated_states[:, 9, location_index].numpy())
ventilator_f_all_locations[
location] = propagated_states[:, 9, location_index].numpy()
death_d_f_all_locations[
location] = propagated_states[:, 10, location_index].numpy()
death_horizon_ahead_d_f_all_locations[location] = (
propagated_states[num_forecast_steps - 1:, 10,
location_index].numpy() -
propagated_states[:-num_forecast_steps + 1, 10,
location_index].numpy())
population_f_all_locations[
location] = propagated_states[:, 11, location_index].numpy()
reinfectable_d_f_all_locations[
location] = propagated_states[:, 12, location_index].numpy()
reinfectable_ud_f_all_locations[
location] = propagated_states[:, 13, location_index].numpy()
reinfectable_vaccine_f_all_locations[
location] = propagated_states[:, 14, location_index].numpy()
vaccine_immuned_t_f_all_locations[
location] = propagated_states[:, 15, location_index].numpy()
infected_ud_increase_f_all_locations[
location] = propagated_states[:, 16, location_index].numpy()
confirmed_f_all_locations[location] = (
infected_d_f_all_locations[location] +
recovered_d_f_all_locations[location] +
death_d_f_all_locations[location] +
hospitalized_f_all_locations[location])
confirmed_horizon_ahead_d_f_all_locations[location] = (
confirmed_f_all_locations[location][num_forecast_steps - 1:, :] -
confirmed_f_all_locations[location][:-num_forecast_steps + 1, :])
susceptible_f_all_locations[location] = np.maximum(
0, (population_f_all_locations[location] -
confirmed_f_all_locations[location] -
exposed_f_all_locations[location] -
recovered_ud_f_all_locations[location] -
infected_ud_f_all_locations[location] -
vaccine_immuned_t_f_all_locations[location]))
recovered_d_f_all_locations[location] = (
recovered_d_f_all_locations[location] +
reinfectable_d_f_all_locations[location])
recovered_ud_f_all_locations[location] = (
recovered_ud_f_all_locations[location] +
reinfectable_ud_f_all_locations[location])
confirmed_f_all_locations[location] = (
confirmed_f_all_locations[location] +
reinfectable_d_f_all_locations[location])
# Lower bound of the cumulative quantiles are the last values.
# for all constructors.
if quantile_regression:
(_, gt_list, _, _, _) = ground_truth_timeseries
death_d_f_all_locations = self.lowerbound_postprocessing(
death_d_f_all_locations, gt_list["death"][:, location_index],
location, num_forecast_steps)
confirmed_f_all_locations = self.lowerbound_postprocessing(
confirmed_f_all_locations, gt_list["confirmed"][:, location_index],
location, num_forecast_steps)
recovered_d_f_all_locations = self.lowerbound_postprocessing(
recovered_d_f_all_locations, gt_list["recovered"][:,
location_index],
location, num_forecast_steps)
recovered_ud_f_all_locations = self.lowerbound_postprocessing(
recovered_ud_f_all_locations, None, location, num_forecast_steps)
reinfectable_d_f_all_locations = self.lowerbound_postprocessing(
reinfectable_d_f_all_locations, None, location, num_forecast_steps)
reinfectable_ud_f_all_locations = self.lowerbound_postprocessing(
reinfectable_ud_f_all_locations, None, location, num_forecast_steps)
rates = self.extract_rates(propagated_variables, chosen_location_list)
return (susceptible_f_all_locations, exposed_f_all_locations,
infected_d_f_all_locations, infected_ud_f_all_locations,
recovered_d_f_all_locations, recovered_ud_f_all_locations,
death_d_f_all_locations, death_horizon_ahead_d_f_all_locations,
confirmed_f_all_locations,
confirmed_horizon_ahead_d_f_all_locations,
hospitalized_f_all_locations, hospitalized_increase_f_all_locations,
hospitalized_cumulative_f_all_locations, icu_f_all_locations,
ventilator_f_all_locations, infected_ud_increase_f_all_locations,
rates)
def pack_compartments(self, states, ground_truth_timeseries,
num_forecast_steps):
"""Packs predictions into compartments with associated ground truth."""
(susceptible_f_all_locations, exposed_f_all_locations,
infected_d_f_all_locations, infected_ud_f_all_locations,
recovered_d_f_all_locations, recovered_ud_f_all_locations,
death_d_f_all_locations, death_horizon_ahead_d_f_all_locations,
confirmed_f_all_locations, confirmed_horizon_ahead_d_f_all_locations,
hospitalized_f_all_locations, hospitalized_increase_f_all_locations,
hospitalized_cumulative_f_all_locations, icu_f_all_locations,
ventilator_f_all_locations, infected_ud_increase_f_all_locations,
rates) = states
(_, _, _, _, orig_gt) = ground_truth_timeseries
# pack all results in a list of compartment dataclasses.
susceptible_compartment = generic_seir_model_constructor.Compartment(
name=constants.SUSCEPTIBLE,
predictions=susceptible_f_all_locations,
num_forecast_steps=num_forecast_steps)
exposed_compartment = generic_seir_model_constructor.Compartment(
name=constants.EXPOSED,
predictions=exposed_f_all_locations,
num_forecast_steps=num_forecast_steps)
infected_d_compartment = generic_seir_model_constructor.Compartment(
name=constants.INFECTED_DOC,
predictions=infected_d_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["infected"])
infected_ud_compartment = generic_seir_model_constructor.Compartment(
name=constants.INFECTED_UNDOC,
predictions=infected_ud_f_all_locations,
num_forecast_steps=num_forecast_steps)
infected_ud_increase_compartment = generic_seir_model_constructor.Compartment(
name=constants.INFECTED_UNDOC_INCREASE,
predictions=infected_ud_increase_f_all_locations,
num_forecast_steps=num_forecast_steps)
recovered_d_compartment = generic_seir_model_constructor.Compartment(
name=constants.RECOVERED_DOC,
predictions=recovered_d_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["recovered"])
recovered_ud_compartment = generic_seir_model_constructor.Compartment(
name=constants.RECOVERED_UNDOC,
predictions=recovered_ud_f_all_locations,
num_forecast_steps=num_forecast_steps)
death_d_compartment = generic_seir_model_constructor.Compartment(
name=constants.DEATH,
predictions=death_d_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["death"])
confirmed_compartment = generic_seir_model_constructor.Compartment(
name=constants.CONFIRMED,
predictions=confirmed_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["confirmed"])
hospitalized_compartment = generic_seir_model_constructor.Compartment(
name=constants.HOSPITALIZED,
predictions=hospitalized_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["hospitalized"])
hospitalized_increase_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HOSPITALIZED_INCREASE,
predictions=hospitalized_increase_f_all_locations,
num_forecast_steps=num_forecast_steps))
hospitalized_cumulative_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HOSPITALIZED_CUMULATIVE,
predictions=hospitalized_cumulative_f_all_locations,
num_forecast_steps=num_forecast_steps))
icu_compartment = generic_seir_model_constructor.Compartment(
name=constants.ICU,
predictions=icu_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["icu"])
ventilator_compartment = generic_seir_model_constructor.Compartment(
name=constants.VENTILATOR,
predictions=ventilator_f_all_locations,
num_forecast_steps=num_forecast_steps,
ground_truth=orig_gt["ventilator"])
def create_horizon_ahead_gt(gt):
"""Creates incremental (1-day) ground truth values."""
horizon_ahead_gt = {}
for location in gt:
horizon_ahead_gt[location] = (
gt[location][num_forecast_steps - 1:] -
gt[location][:-num_forecast_steps + 1])
return horizon_ahead_gt
death_horizon_ahead_d_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HORIZON_AHEAD_DEATH,
predictions=death_horizon_ahead_d_f_all_locations,
num_forecast_steps=1,
ground_truth=create_horizon_ahead_gt(orig_gt["death"])))
confirmed_horizon_ahead_d_compartment = (
generic_seir_model_constructor.Compartment(
name=constants.HORIZON_AHEAD_CONFIRMED,
predictions=confirmed_horizon_ahead_d_f_all_locations,
num_forecast_steps=1,
ground_truth=create_horizon_ahead_gt(orig_gt["confirmed"])))
rates_compartments = []
for name, predictions in rates.items():
rates_compartments.append(
generic_seir_model_constructor.Compartment(
name=name,
predictions=predictions,
num_forecast_steps=num_forecast_steps,
use_quantiles=False))
compartments = [
susceptible_compartment, exposed_compartment, infected_d_compartment,
infected_ud_compartment, recovered_d_compartment,
recovered_ud_compartment, death_d_compartment,
death_horizon_ahead_d_compartment, confirmed_compartment,
confirmed_horizon_ahead_d_compartment, hospitalized_compartment,
hospitalized_increase_compartment, hospitalized_cumulative_compartment,
icu_compartment, ventilator_compartment,
infected_ud_increase_compartment
]
compartments += rates_compartments
return compartments
def apply_quantile_transform(self,
hparams,
propagated_states,
quantile_kernel,
quantile_biases,
ground_truth_timeseries,
num_train_steps,
num_forecast_steps,
num_quantiles=23,
epsilon=1e-8,
is_training=True,
initial_quantile_step=0):
"""Transform predictions into vector representing different quantiles.
Args:
hparams: Hyperparameters.
propagated_states: single value predictions, its dimensions represent
timestep * states * location.
quantile_kernel: Quantile mapping kernel.
quantile_biases: Biases for quantiles.
ground_truth_timeseries: Ground truth time series.
num_train_steps: number of train steps
num_forecast_steps: number of forecasting steps
num_quantiles: Number of quantiles
epsilon: A small number to avoid 0 division issues.
is_training: Whether the phase is training or inference.
initial_quantile_step: start index for quantile training
Returns:
Vector value predictions of size
timestep * states * location * num_quantiles
"""
(_, gt_list, gt_indicator, _, _) = ground_truth_timeseries
unstacked_propagated_states = tf.unstack(propagated_states, axis=1)
pred_infected = unstacked_propagated_states[1]
pred_recovered = unstacked_propagated_states[3]
pred_hospitalized = unstacked_propagated_states[5]
pred_icu = unstacked_propagated_states[8]
pred_ventilator = unstacked_propagated_states[9]
pred_death = unstacked_propagated_states[10]
pred_reinfected = unstacked_propagated_states[12]
pred_confirmed = (
pred_infected + pred_recovered + pred_death + pred_hospitalized +
pred_icu + pred_ventilator + pred_reinfected)
quantile_encoding_window = hparams["quantile_encoding_window"]
smooth_coef = hparams["quantile_smooth_coef"]
partial_mean_interval = hparams["partial_mean_interval"]
quantile_mapping_kernel = tf.math.softplus(
tf.expand_dims(quantile_kernel, 2))
quantile_biases = tf.math.softplus(tf.expand_dims(quantile_biases, 1))
propagated_states_quantiles = []
state_quantiles_multiplier_prev = tf.ones_like(
tf.expand_dims(propagated_states[0, :, :], 2))
def gt_ratio_feature(gt_values,
predicted):
"""Creates the GT ratio feature."""
# This uses the imputed values when the values are not valid.
ratio_pred = (1 - (predicted[:num_train_steps, :] /
(epsilon + gt_values[:num_train_steps])))
# Add 0 at the beginning
ratio_pred = tf.concat([
0 * ratio_pred[:(quantile_encoding_window + num_forecast_steps), :],
ratio_pred
],
axis=0)
ratio_pred = tf.expand_dims(ratio_pred, 1)
ratio_pred = tf.tile(ratio_pred, [1, self.num_states, 1])
return ratio_pred
def indicator_feature(gt_indicator):
"""Creates the indicator feature."""
indicator = 1. - gt_indicator
# Add 0 at the beginning
indicator = tf.concat([
0 * indicator[:(quantile_encoding_window + num_forecast_steps), :],
indicator
],
axis=0)
indicator = tf.expand_dims(indicator, 1)
indicator = tf.tile(indicator, [1, self.num_states, 1])
return indicator
# Propagated states features
temp_propagated_states = tf.concat([
0 * propagated_states[:quantile_encoding_window, :, :],
propagated_states
],
axis=0)
# GT ratio features
death_gt_ratio_feature = gt_ratio_feature(gt_list["death"], pred_death)
confirmed_gt_ratio_feature = gt_ratio_feature(gt_list["confirmed"],
pred_confirmed)
hospitalized_gt_ratio_feature = gt_ratio_feature(gt_list["hospitalized"],
pred_hospitalized)
# Indicator features
death_indicator_feature = indicator_feature(gt_indicator["death"])
confirmed_indicator_feature = indicator_feature(gt_indicator["confirmed"])
hospitalized_indicator_feature = indicator_feature(
gt_indicator["hospitalized"])
for ti in range(initial_quantile_step,
num_train_steps + num_forecast_steps):
if ti < num_train_steps:
state_quantiles_multiplier = tf.ones_like(
tf.expand_dims(propagated_states[0, :, :], 2))
state_quantiles_multiplier = tf.tile(state_quantiles_multiplier,
[1, 1, num_quantiles])
else:
# Construct the input features to be used for quantile estimation.
encoding_input = []
# Features coming from the trend of the estimated.
encoding_input.append(1 - (
temp_propagated_states[ti:(ti + quantile_encoding_window), :, :] /
(epsilon +
temp_propagated_states[ti + quantile_encoding_window, :, :])))
# Features coming from the ground truth ratio of death.
encoding_input.append(
death_gt_ratio_feature[ti:(ti + quantile_encoding_window), :, :])
# Features coming from the ground truth ratio of confirmed.
encoding_input.append(
confirmed_gt_ratio_feature[ti:(ti +
quantile_encoding_window), :, :])
# Features coming from the ground truth ratio of hospitalized.
encoding_input.append(
hospitalized_gt_ratio_feature[ti:(ti +
quantile_encoding_window), :, :])
# Features coming from death indicator.
encoding_input.append(
death_indicator_feature[ti:(ti + quantile_encoding_window), :, :])
# Features coming from confirmed indicator.
encoding_input.append(
confirmed_indicator_feature[ti:(ti +
quantile_encoding_window), :, :])
# Features coming from hospitalized indicator.
encoding_input.append(
hospitalized_indicator_feature[ti:(ti +
quantile_encoding_window), :, :])
encoding_input_t = tf.expand_dims(tf.concat(encoding_input, axis=0), 3)
# Limit the range of features.
encoding_input_t = model_utils.apply_relu_bounds(
encoding_input_t,
lower_bound=0.0,
upper_bound=2.0,
replace_nan=True)
# Estimate the multipliers of quantiles
state_quantiles_multiplier = quantile_biases + tf.math.reduce_mean(
tf.multiply(encoding_input_t, quantile_mapping_kernel), 0)
# Consider accumulation to guarantee monotonicity
state_quantiles_multiplier = tf.math.cumsum(
state_quantiles_multiplier, axis=-1)
if partial_mean_interval == 0:
# Normalize to match the median to point forecasts
state_quantiles_multiplier /= (
epsilon + tf.expand_dims(
state_quantiles_multiplier[:, :,
(num_quantiles - 1) // 2], -1))
else:
# Normalize with major densities to approximate point forecast (mean)
median_idx = (num_quantiles - 1) // 2
normalize_start = median_idx - partial_mean_interval
normalize_end = median_idx + partial_mean_interval
normalizer = tf.reduce_mean(
0.5 *
(state_quantiles_multiplier[:, :, normalize_start:normalize_end] +
state_quantiles_multiplier[:, :, normalize_start +
1:normalize_end + 1]),
axis=2,
keepdims=True)
state_quantiles_multiplier /= (epsilon + normalizer)
state_quantiles_multiplier = (
smooth_coef * state_quantiles_multiplier_prev +
(1 - smooth_coef) * state_quantiles_multiplier)
state_quantiles_multiplier_prev = state_quantiles_multiplier
# Return the estimated quantiles
propagated_states_quantiles_timestep = tf.multiply(
tf.expand_dims(propagated_states[ti, :, :], 2),
state_quantiles_multiplier)
propagated_states_quantiles.append(propagated_states_quantiles_timestep)
return tf.stack(propagated_states_quantiles)
def extract_rate_list(self):
"""Return list of rates that correspond to 'propagated_variables' tensor.
Args: None.
Returns:
List of rate names.
"""
return constants.ICU_AND_VENTILATOR_RATE_LIST
def calculate_r_eff(self,
rates = None,
propagated_variables = None,
epsilon = 1e-8):
"""Calculate Basic Reproduction Number R_eff over time and locations.
Args:
rates: rate name->tensor maps.
propagated_variables: single tensor of variables indexed by
(time)x(variables)x(locations) (used in the training).
epsilon: epsilon for avoiding numerical error.
Returns:
R_eff tensor.
"""
if rates is not None and propagated_variables is not None:
raise ValueError("Only rates or seir_variables can be used.")
elif rates is None and propagated_variables is None:
raise ValueError("Have to specify one argument.")
elif rates is not None:
beta_d, beta_ud = rates["average_contact_id_rate"], rates[
"average_contact_iud_rate"]
rho_id, rho_iud = rates["recovery_id_rate"], rates["recovery_iud_rate"]
gamma, h = rates["diagnosis_rate"], rates["hospitalization_rate"]
kappa_id = rates["death_id_rate"]
# equation is computed from the Next Generation Matrix Method.
# If you are changing any of the parameters below, please make sure to
# update the Next Generation Matrix derivation and parameters too.
# LINT.IfChange
r_eff = (beta_d * gamma + beta_ud *
(rho_id + kappa_id + h)) / ((gamma + rho_iud) *
(rho_id + kappa_id + h) + epsilon)
return r_eff
else:
propagated_variables_list = tf.unstack(propagated_variables, axis=1)
average_contact_id = propagated_variables_list[2]
average_contact_iud = propagated_variables_list[3]
diagnosis_rate = propagated_variables_list[6]
recovery_rate_id = propagated_variables_list[7]
recovery_rate_iud = propagated_variables_list[8]
hospitalization_rate = propagated_variables_list[12]
death_rate_id = propagated_variables_list[15]
beta_d = average_contact_id
beta_ud = average_contact_iud
rho_id = recovery_rate_id
rho_iud = recovery_rate_iud
gamma = diagnosis_rate
h = hospitalization_rate
kappa_id = death_rate_id
r_eff = (beta_d * gamma + beta_ud *
(rho_id + kappa_id + h)) / ((gamma + rho_iud) *
(rho_id + kappa_id + h) + epsilon)
return r_eff
| |
"""
Module for checking resource reservations by chutes.
One idea motivating this design is to reduce the amount of state
in memory for resource reservations. We have the chute list, which
contains information about what devices the chute is using. If we also
maintain a separate list of devices used by chutes, we need to keep
them synchronized. This becomes messy when a chute fails to install
or uninstall correctly. The getDeviceReservations function iterates
over the chute list and returns an up-to-date view of device usage.
This can be called as needed.
"""
import collections
import ipaddress
from paradrop.base import constants
from paradrop.core.config.devices import getWirelessPhyName
from paradrop.core.config.hostconfig import prepareHostConfig
from paradrop.core.chute.chute_storage import ChuteStorage
from paradrop.lib.utils import datastruct
class DeviceReservations(object):
def __init__(self):
self.reservations = []
def add(self, chute, dtype, mode=None):
r = {
'chute': chute,
'type': dtype,
'mode': mode
}
self.reservations.append(r)
def count(self, dtype=None, mode=None):
"""
Return the number of reservations matching the given criteria.
None is used as a wildcard, so if no arguments are passed, the count
returned is the total number of reservations.
"""
count = 0
for res in self.reservations:
if dtype is not None and dtype != res['type']:
continue
if mode is not None and mode != res['mode']:
continue
count += 1
return count
def getDeviceReservations(exclude=None):
"""
Produce a dictionary mapping device names to DeviceReservations objects
that describe the current usage of the device.
The returned type is a defaultdict, so there is no need to check if a key
exists before accessing it.
exclude: name of chute whose device reservations should be excluded
"""
reservations = collections.defaultdict(DeviceReservations)
if exclude != constants.RESERVED_CHUTE_NAME:
hostConfig = prepareHostConfig()
wifiInterfaces = hostConfig.get('wifi-interfaces', [])
for iface in wifiInterfaces:
if 'device' not in iface:
continue
dev = iface['device']
phy = getWirelessPhyName(dev)
if phy is not None:
# It is annoying to do this conversion everywhere, but it would
# be painful to break compatibility with all of the devices out
# there that use e.g. wlan0 instead of phy0 in their hostconfig.
dev = phy
reservations[dev].add(constants.RESERVED_CHUTE_NAME, 'wifi',
iface.get('mode', 'ap'))
lanInterfaces = datastruct.getValue(hostConfig, 'lan.interfaces', [])
for iface in lanInterfaces:
reservations[iface].add(constants.RESERVED_CHUTE_NAME, 'lan', None)
for chute in ChuteStorage.chuteList.values():
if chute.name == exclude:
continue
for iface in chute.getCache('networkInterfaces'):
dev = iface.get('device', None)
# Device is not set in cases such as vlan interfaces.
if dev is None:
continue
reservations[dev].add(chute.name, iface['type'],
iface.get('mode', None))
return reservations
class InterfaceReservationSet(object):
def __init__(self):
self.reservations = set()
def add(self, interface):
self.reservations.add(interface)
def __contains__(self, x):
return x in self.reservations
def __len__(self):
return len(self.reservations)
def getInterfaceReservations(exclude=None):
"""
Get current set of interface reservations.
Returns an instance of InterfaceReservationSet.
exclude: name of chute whose interfaces should be excluded
"""
reservations = InterfaceReservationSet()
if exclude != constants.RESERVED_CHUTE_NAME:
hostConfig = prepareHostConfig()
wifiInterfaces = hostConfig.get('wifi-interfaces', [])
for iface in wifiInterfaces:
if 'ifname' not in iface:
continue
ifname = iface['ifname']
reservations.add(ifname)
for chute in ChuteStorage.chuteList.values():
if chute.name == exclude:
continue
for iface in chute.getCache('networkInterfaces'):
if 'externalIntf' not in iface:
continue
ifname = iface['externalIntf']
reservations.add(ifname)
return reservations
class SubnetReservationSet(object):
def __init__(self):
self.reservations = []
def add(self, subnet):
self.reservations.append(subnet)
def __contains__(self, subnet):
for res in self.reservations:
if res.overlaps(subnet):
return True
return False
def __len__(self):
return len(self.reservations)
def getSubnetReservations(exclude=None):
"""
Get current set of subnet reservations.
Returns an instance of SubnetReservationSet.
exclude: name of chute whose reservations should be excluded
"""
reservations = SubnetReservationSet()
if exclude != constants.RESERVED_CHUTE_NAME:
hostConfig = prepareHostConfig()
ipaddr = datastruct.getValue(hostConfig, 'lan.ipaddr', None)
netmask = datastruct.getValue(hostConfig, 'lan.netmask', None)
if ipaddr is not None and netmask is not None:
network = ipaddress.ip_network(u'{}/{}'.format(ipaddr, netmask),
strict=False)
reservations.add(network)
for chute in ChuteStorage.chuteList.values():
if chute.name == exclude:
continue
for iface in chute.getCache('networkInterfaces'):
if 'subnet' not in iface:
continue
subnet = iface['subnet']
reservations.add(subnet)
return reservations
def getReservations(update):
"""
Get device and resource reservations claimed by other users.
"""
devices = getDeviceReservations(exclude=update.new.name)
interfaces = getInterfaceReservations(exclude=update.new.name)
subnets = getSubnetReservations(exclude=update.new.name)
update.cache_set('deviceReservations', devices)
update.cache_set('interfaceReservations', interfaces)
update.cache_set('subnetReservations', subnets)
| |
#!/usr/bin/env python
from rdflib import Graph, Literal, RDF, RDFS, Namespace, URIRef
from rdflib.namespace import DCTERMS
from rdflib.plugins.stores import sparqlstore
from bunch import bunchify
import hashlib
import json
from uuid import uuid4
class LocalStore():
'''
This class is a wrapper for the Graph class that
handles ontology binding and triples serialization.
'''
def __init__(self):
self.g = Graph()
self.ns = {}
def bind_namespaces(self, namespaces):
for ns in namespaces:
# ns is the prefix and the key
self.g.bind(ns, Namespace(namespaces[ns]))
self.ns[ns] = Namespace(namespaces[ns])
def get_namespaces(self):
ns = []
for namespace in self.g.namespaces():
ns.append(namespace)
return ns
def get_resource(self, urn):
return self.g.resource(urn)
def add_triple(self, s, v, p):
self.g.add((s, v, p))
def serialize(self, format):
return self.g.serialize(format=format)
class RemoteStore():
def __init__(self, endpoint):
self._store = sparqlstore.SPARQLUpdateStore()
self._store.open((endpoint, endpoint))
self.g = Graph(self._store, URIRef('urn:x-arq:DefaultGraph'))
def update(self, triples_as_nt):
return self.g.update("INSERT DATA { %s }" % triples_as_nt)
def delete(self, triples_as_nt):
return self.g.update("DELETE DATA { %s }" % triples_as_nt)
class Triplelizer():
'''
This class takes the json output from semantics-preprocessing and generates
triples
'''
def __init__(self):
self.store = LocalStore()
with open('configs/services.json', 'r') as fp:
self.fingerprints = bunchify(json.loads(fp.read()))
ontology_uris = {
'wso': 'http://purl.org/nsidc/bcube/web-services#',
'Profile': 'http://www.daml.org/services/owl-s/1.2/Profile.owl#',
'Service': 'http://ww.daml.org/services/owl-s/1.2/Service.owl#',
'ServiceParameter':
'http://www.daml.org/services/owl-s/1.2/ServiceParameter.owl#',
'media':
'http://www.iana.org/assignments/media-types/media-types.xhtml#',
'dc': str(DCTERMS)
}
self.store.bind_namespaces(ontology_uris)
def _validate(self, value):
'''
Returns None if the value is empty string,
'null' or is non existant.
'''
if value == "" or value == "null" or value is None:
return None
else:
return value
def _escape_rdflib(self, url):
'''
See http://github.com/RDFLib/rdflib/blob/
e80c6186fee68219e19bc2adae2cd5edf20bfef9/rdflib/term.py
Line 73
'''
return url.replace("{", "%7B").replace("}", "%7D")
def _generate_sha_identifier(self, url):
'''
temporary document identifer as sha-1 of the source url
(should be implemented in solr in the near future)
'''
return hashlib.sha224(url).hexdigest()
def _generate_uri(self, object_type):
'''
generate a non-resolvable uri for any object as
urn:{object_type}:{identifier}
where object_type is the class name and identifier
is a random hash (uuid4 for now)
note: can't just generate a sha hash from the value
given that the values can repeat
'''
return 'urn:{0}:{1}'.format(object_type, str(uuid4()))
def identify(self, document):
for attr in self.fingerprints:
if attr.DocType == document.identity.protocol:
return attr.object_type, attr.ontology_class
return None
def triplelize_parameters(self, parameters, endpoint, digest):
'''
Triplelize parameters, they belong to an endpoint
and have a name, type and format.
'''
param_ns = self.store.ns['ServiceParameter']
for param in parameters:
parameter_urn = self._generate_uri('ServiceParameter')
p = self.store.get_resource(parameter_urn)
p.add(RDF.type, URIRef("ServiceParameter:ServiceParameter"))
if 'name' in param and self._validate(param.name) is not None:
p.add(param_ns['serviceParameterName'],
Literal(param.name))
if 'formats' in param and self._validate(param.formats) is not None:
p.add(param_ns['serviceParameterFormat'],
Literal(param.formats))
if 'type' in param and self._validate(param.type) is not None:
p.add(param_ns['serviceParameterType'],
Literal(param.type))
endpoint.add(param_ns['hasParameters'], p)
return self.store
def triplelize_endpoints(self, doc, service):
'''
'''
wso = self.store.ns['wso']
media = self.store.ns['media']
for item in doc.service_description.service.endpoints:
endpoint_uri = self._generate_uri('ServiceEndpoint')
endpoint = self.store.get_resource(endpoint_uri)
endpoint.add(wso["Protocol"], Literal(item.protocol))
endpoint.add(wso["BaseURL"], URIRef(self._escape_rdflib(item.url)))
if 'mimeType' in item and item.mimeType is not None:
for mime_type in item.mimeType:
endpoint.add(media['type'], Literal(mime_type))
if doc.identity.subtype == "service":
endpoint.add(RDF.type, wso["ServiceEndpoint"])
endpoint.add(wso["hasService"], service)
if 'parameters' in item and item.parameters is not None:
self.triplelize_parameters(item.parameters,
endpoint, doc.digest)
else:
endpoint.add(wso["childOf"], service)
if 'name' in item:
endpoint.add(RDFS.label, Literal(item.name))
return self.store
def triplelize(self, document):
'''
This method works fine with:
pip install git+https://github.com/betolink/bunch.git
Otherwise bunch rises an exception for not found keys
'''
# ns = 'http://purl.org/nsidc/bcube/web-services#'
wso = self.store.ns['wso']
if self.identify(document) is not None:
document_urn = self._generate_uri('WebDocument')
service_doc = document.get('service_description', {})
service = service_doc.get('service', {})
if not service_doc or not service:
return None
doc_base_url = document.url
doc_version = document.identity.version
doc_title = service.get('title', [])
doc_abstract = service.get('abstract', [])
doc_type, doc_ontology = self.identify(document)
resource = self.store.get_resource(document_urn)
resource.add(RDF.type, URIRef(doc_type))
resource.add(RDF.type, wso[doc_ontology])
resource.add(DCTERMS.hasVersion, Literal(doc_version))
# run as multiple elements for now
for title in doc_title:
resource.add(DCTERMS.title, Literal(title))
for abstract in doc_abstract:
resource.add(DCTERMS.abstract, Literal(abstract))
resource.add(wso.BaseURL,
Literal(self._escape_rdflib(doc_base_url)))
# now the endpoints
self.triplelize_endpoints(document, resource)
return self.store, document_urn
else:
return None, None
def triplify(json_data):
json_data = bunchify(json_data)
triple = Triplelizer()
return triple.triplelize(json_data)
def storify(endpoint, triples_as_nt=None, triples_path='', option='INSERT'):
'''
load an existing set of triples as TURTLE ONLY
or just accept a set of triples already serialized as nt
'''
store = RemoteStore(endpoint)
if triples_path:
# load the triples from the path as nt
g = Graph()
g.parse(triples_path, format='turtle')
triples_as_nt = g.serialize(format='nt')
if triples_as_nt is None:
raise Exception('No triples!')
if option == 'INSERT':
store.update(triples_as_nt)
elif option == 'DELETE':
store.delete(triples_as_nt)
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test the hunt_view interface."""
import os
import traceback
from absl import app
from grr_response_core.lib import rdfvalue
from grr_response_server import data_store
from grr_response_server import flow
from grr_response_server import hunt
from grr_response_server.gui import gui_test_lib
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr.test_lib import test_lib
class TestHuntView(gui_test_lib.GRRSeleniumHuntTest):
"""Test the Cron view GUI."""
reason = "Felt like it!"
def SetupTestHuntView(self, client_limit=0, client_count=10):
# Create some clients and a hunt to view.
hunt_id = self.CreateSampleHunt(
client_limit=client_limit, client_count=client_count)
self.RunHunt(failrate=2)
self.AddLogToHunt(hunt_id, self.client_ids[0], "TestLogLine")
# Log an error just with some random traceback.
self.AddErrorToHunt(hunt_id, self.client_ids[1], "Client Error 1",
traceback.format_exc())
hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id)
if client_limit == 0:
self.assertEqual(hunt_counters.num_clients, client_count)
else:
self.assertEqual(hunt_counters.num_clients, min(client_count,
client_limit))
return hunt_id
def testPageTitleReflectsSelectedHunt(self):
hunt_id = self.CreateSampleHunt(stopped=True)
self.Open("/#/hunts")
self.WaitUntilEqual("GRR | Hunts", self.GetPageTitle)
self.Click("css=td:contains('%s')" % hunt_id)
self.WaitUntilEqual("GRR | " + hunt_id, self.GetPageTitle)
def testHuntView(self):
"""Test that we can see all the hunt data."""
hunt_id = self.SetupTestHuntView()
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select a Hunt.
self.Click("css=td:contains('%s')" % hunt_id)
# Check we can now see the details.
self.WaitUntil(self.IsElementPresent, "css=dl.dl-hunt")
self.WaitUntil(self.IsTextPresent, "Clients Scheduled")
self.WaitUntil(self.IsTextPresent, "Hunt ID")
# Click the Log Tab.
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsTextPresent, "TestLogLine")
# Click the Error Tab.
self.Click("css=li[heading=Errors]")
self.WaitUntil(self.IsTextPresent, "Client Error 1")
def SetupHuntDetailView(self, failrate=2):
"""Create some clients and a hunt to view."""
hunt_id = self.CreateSampleHunt()
self.RunHunt(client_ids=self.client_ids, failrate=failrate)
self.AddErrorToHunt(hunt_id, self.client_ids[1], "Client Error 1",
traceback.format_exc())
return hunt_id
def testHuntClientsView(self):
"""Test the detailed client view works."""
hunt_id = self._CreateHuntWithDownloadedFile()
# Open up and click on View Hunts then the first Hunt.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
self.Click("css=td:contains('%s')" % hunt_id)
# Click the Overview Tab then the Details Link.
self.Click("css=li[heading=Overview]")
self.WaitUntil(self.IsTextPresent, "Hunt ID")
# Check the Hunt Clients tab.
self.Click("css=li[heading=Clients]")
client_id = self.client_ids[0]
self.WaitUntil(self.IsElementPresent, "css=tr:contains('%s')" % client_id)
self.RequestAndGrantClientApproval(client_id)
# TODO(user): move the code below outside of if as soon as hunt's
# subflows are properly reported in the REL_DB implementation.
self.Click("css=tr:contains('%s') td:nth-of-type(2) a" % client_id)
self.WaitUntil(self.IsTextPresent, "Flow Information")
self.WaitUntil(self.IsTextPresent, self.base_path)
def testHuntOverviewShowsStats(self):
"""Test the detailed client view works."""
hunt_id = self.CreateSampleHunt()
client_id = self.SetupClient(0)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow.RandomFlowId(),
parent_hunt_id=hunt_id,
create_time=rdfvalue.RDFDatetime.Now())
rdf_flow.cpu_time_used.user_cpu_time = 5000
rdf_flow.network_bytes_sent = 1000000
data_store.REL_DB.WriteFlowObject(rdf_flow)
# Open up and click on View Hunts then the first Hunt.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
self.Click("css=td:contains('%s')" % hunt_id)
# Click the Overview Tab and check that the stats are present.
self.Click("css=li[heading=Overview]")
self.WaitUntil(self.IsTextPresent, "1h 23m 20s")
self.WaitUntil(self.IsTextPresent, "976.6KiB")
def testHuntOverviewGetsUpdatedWhenHuntChanges(self):
hunt_id = self.CreateSampleHunt()
client_id = self.SetupClient(0)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow.RandomFlowId(),
parent_hunt_id=hunt_id,
create_time=rdfvalue.RDFDatetime.Now())
rdf_flow.cpu_time_used.user_cpu_time = 5000
rdf_flow.network_bytes_sent = 1000000
data_store.REL_DB.WriteFlowObject(rdf_flow)
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntOverviewDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.WaitUntil(self.IsTextPresent, "1h 23m 20s")
self.WaitUntil(self.IsTextPresent, "976.6KiB")
client_id = self.SetupClient(1)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow.RandomFlowId(),
parent_hunt_id=hunt_id,
create_time=rdfvalue.RDFDatetime.Now())
rdf_flow.cpu_time_used.user_cpu_time = 1000
rdf_flow.network_bytes_sent = 10000000
data_store.REL_DB.WriteFlowObject(rdf_flow)
self.WaitUntil(self.IsTextPresent, "1h 40m")
self.WaitUntil(self.IsTextPresent, "10.5MiB")
def testHuntOverviewShowsStartAndExpirationTime(self):
duration = rdfvalue.Duration.From(3, rdfvalue.DAYS)
init_start_time = rdfvalue.RDFDatetime.FromHumanReadable("1973-01-01 08:34")
last_start_time = rdfvalue.RDFDatetime.FromHumanReadable("1981-03-04 12:52")
expiration_time = init_start_time + duration
hunt_id = self.CreateHunt(duration=duration)
# Navigate to the hunt view.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
# Select the hunt.
self.Click("css=td:contains('{}')".format(hunt_id))
self.RequestAndGrantHuntApproval(hunt_id)
self.assertFalse(self.IsTextPresent(str(init_start_time)))
self.assertFalse(self.IsTextPresent(str(expiration_time)))
self.assertFalse(self.IsTextPresent(str(last_start_time)))
with test_lib.FakeTime(init_start_time):
hunt.StartHunt(hunt_id)
self.Refresh()
self.WaitUntil(self.IsTextPresent, str(init_start_time))
self.WaitUntil(self.IsTextPresent, str(expiration_time))
self.assertFalse(self.IsTextPresent(str(last_start_time)))
with test_lib.FakeTime(last_start_time):
hunt.PauseHunt(hunt_id)
hunt.StartHunt(hunt_id)
self.Refresh()
self.WaitUntil(self.IsTextPresent, str(init_start_time))
self.WaitUntil(self.IsTextPresent, str(expiration_time))
self.WaitUntil(self.IsTextPresent, str(last_start_time))
def testHuntListShowsStartAndExpirationTime(self):
hunt_1_start_time = rdfvalue.RDFDatetime.FromHumanReadable("1992-11-11")
hunt_2_start_time = rdfvalue.RDFDatetime.FromHumanReadable("2001-05-03")
hunt_1_duration = rdfvalue.Duration.From(3, rdfvalue.DAYS)
hunt_2_duration = rdfvalue.Duration.From(5, rdfvalue.HOURS)
hunt_1_expiration_time = hunt_1_start_time + hunt_1_duration
hunt_2_expiration_time = hunt_2_start_time + hunt_2_duration
hunt_1_id = self.CreateHunt(duration=hunt_1_duration)
hunt_2_id = self.CreateHunt(duration=hunt_2_duration)
# Navigate to the hunt list.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_1_id)
self.WaitUntil(self.IsTextPresent, hunt_2_id)
self.assertFalse(self.IsTextPresent(str(hunt_1_start_time)))
self.assertFalse(self.IsTextPresent(str(hunt_1_expiration_time)))
self.assertFalse(self.IsTextPresent(str(hunt_2_start_time)))
self.assertFalse(self.IsTextPresent(str(hunt_2_expiration_time)))
with test_lib.FakeTime(hunt_1_start_time):
hunt.StartHunt(hunt_1_id)
self.Refresh()
self.WaitUntil(self.IsTextPresent, str(hunt_1_start_time))
self.WaitUntil(self.IsTextPresent, str(hunt_1_expiration_time))
self.assertFalse(self.IsTextPresent(str(hunt_2_start_time)))
self.assertFalse(self.IsTextPresent(str(hunt_2_expiration_time)))
with test_lib.FakeTime(hunt_2_start_time):
hunt.StartHunt(hunt_2_id)
self.Refresh()
self.WaitUntil(self.IsTextPresent, str(hunt_1_start_time))
self.WaitUntil(self.IsTextPresent, str(hunt_1_expiration_time))
self.WaitUntil(self.IsTextPresent, str(hunt_2_start_time))
self.WaitUntil(self.IsTextPresent, str(hunt_2_expiration_time))
def testHuntStatsView(self):
hunt_id = self.SetupTestHuntView()
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, hunt_id)
self.Click("css=td:contains('%s')" % hunt_id)
# Click the Stats tab.
self.Click("css=li[heading=Stats]")
self.WaitUntil(self.IsTextPresent, "Total number of clients")
self.WaitUntil(self.IsTextPresent, "10")
self.WaitUntil(self.IsTextPresent, "User CPU mean")
self.WaitUntil(self.IsTextPresent, "5.5")
self.WaitUntil(self.IsTextPresent, "User CPU stddev")
self.WaitUntil(self.IsTextPresent, "2.9")
self.WaitUntil(self.IsTextPresent, "System CPU mean")
self.WaitUntil(self.IsTextPresent, "11")
self.WaitUntil(self.IsTextPresent, "System CPU stddev")
self.WaitUntil(self.IsTextPresent, "5.7")
self.WaitUntil(self.IsTextPresent, "Network bytes sent mean")
self.WaitUntil(self.IsTextPresent, "16.5")
self.WaitUntil(self.IsTextPresent, "Network bytes sent stddev")
self.WaitUntil(self.IsTextPresent, "8.6")
def testHuntNotificationIsShownAndClickable(self):
hunt_id = self.CreateSampleHunt(
path=os.path.join(self.base_path, "test.plist"))
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=#notification_button")
self.Click("css=a:contains('has granted you access')")
self.WaitUntil(self.IsElementPresent,
"css=tr.row-selected td:contains('%s')" % hunt_id)
self.WaitUntil(self.IsTextPresent, hunt_id)
def testLogsTabShowsLogsFromAllClients(self):
hunt_id = self.SetupHuntDetailView(failrate=-1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Log]")
for client_id in self.client_ids:
self.WaitUntil(self.IsTextPresent, client_id)
# TODO(amoser): Get rid of the aff4 prefix here.
self.WaitUntil(
self.IsTextPresent, "File aff4:/%s/%s transferred successfully." %
(client_id, "fs/os/tmp/evil.txt"))
def testLogsTabGetsAutoRefreshed(self):
hunt_id = self.CreateSampleHunt()
self.AddLogToHunt(hunt_id, self.client_ids[0], "foo-log")
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntLogDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-log td:contains('foo-log')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-hunt-log td:contains('bar-log')")
self.AddLogToHunt(hunt_id, self.client_ids[1], "bar-log")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-log td:contains('bar-log')")
def testLogsTabFiltersLogsByString(self):
hunt_id = self.SetupHuntDetailView(failrate=-1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Log]")
self.Type("css=grr-hunt-log input.search-query", self.client_ids[-1])
self.Click("css=grr-hunt-log button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, self.client_ids[-1])
# TODO(amoser): Get rid of the aff4 prefix here.
self.WaitUntil(
self.IsTextPresent, "File aff4:/%s/%s transferred successfully." %
(self.client_ids[-1], "fs/os/tmp/evil.txt"))
for client_id in self.client_ids[:-1]:
self.WaitUntilNot(self.IsTextPresent, client_id)
self.WaitUntilNot(
self.IsTextPresent, "File %s/%s transferred successfully." %
(client_id, "fs/os/tmp/evil.txt"))
def testLogsTabShowsDatesInUTC(self):
hunt_id = self.CreateSampleHunt()
with test_lib.FakeTime(42):
self.AddLogToHunt(hunt_id, self.client_ids[0], "I do log.")
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsTextPresent, "1970-01-01 00:00:42 UTC")
def testErrorsTabShowsErrorsFromAllClients(self):
hunt_id = self.SetupHuntDetailView(failrate=1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Errors]")
for client_id in self.client_ids:
self.WaitUntil(self.IsTextPresent, client_id)
def testErrorsTabGetsAutoRefreshed(self):
hunt_id = self.CreateSampleHunt()
self.AddErrorToHunt(hunt_id, self.client_ids[0], "foo-error",
traceback.format_exc())
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntErrorsDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Errors]")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-errors td:contains('foo-error')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-hunt-errors td:contains('bar-error')")
self.AddErrorToHunt(hunt_id, self.client_ids[0], "bar-error",
traceback.format_exc())
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-errors td:contains('bar-error')")
def testErrorsTabShowsDatesInUTC(self):
hunt_id = self.CreateSampleHunt()
with test_lib.FakeTime(42):
self.AddErrorToHunt(hunt_id, self.client_ids[0], "Client Error 1",
traceback.format_exc())
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Errors]")
self.WaitUntil(self.IsTextPresent, "1970-01-01 00:00:42 UTC")
def testErrorsTabFiltersErrorsByString(self):
hunt_id = self.SetupHuntDetailView(failrate=1)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Errors]")
self.Type("css=grr-hunt-errors input.search-query", self.client_ids[-1])
self.Click("css=grr-hunt-errors button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, self.client_ids[-1])
for client_id in self.client_ids[:-1]:
self.WaitUntilNot(self.IsTextPresent, client_id)
def testCrashesTabShowsNoErrorWhenCrashesAreMissing(self):
hunt_id = self.SetupHuntDetailView()
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Crashes]")
self.WaitUntilNot(self.IsTextPresent, "Loading...")
self.WaitUntilNot(self.IsVisible, "css=button#show_backtrace")
def testCrashesTabGetsAutoRefreshed(self):
client_ids = self.SetupClients(2)
hunt_id = self.StartHunt()
self.RunHuntWithClientCrashes([client_ids[0]])
self.Open("/")
# Ensure auto-refresh updates happen every second.
self.GetJavaScriptValue(
"grrUi.hunt.huntCrashesDirective.setAutoRefreshInterval(1000);")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Crashes]")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-crashes td:contains('%s')" % client_ids[0])
self.WaitUntilNot(self.IsElementPresent,
"css=grr-hunt-crashes td:contains('%s')" % client_ids[1])
self.RunHuntWithClientCrashes([client_ids[1]])
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-crashes td:contains('%s')" % client_ids[1])
def testShowsResultsTabForIndividualFlowsOnClients(self):
# Create and run the hunt.
self.CreateSampleHunt(stopped=False)
self.RunHunt(client_ids=self.client_ids, failrate=-1)
self.RequestAndGrantClientApproval(self.client_ids[0])
self.Open("/#c=" + self.client_ids[0])
self.Click("css=a:contains('Manage launched flows')")
self.Click("css=grr-client-flows-list tr:contains('GetFile')")
self.Click("css=li[heading=Results]")
# This is to check that no exceptions happened when we tried to display
# results.
self.WaitUntilNot(self.IsTextPresent, "Loading...")
def testClientsTabShowsCompletedAndOutstandingClients(self):
# Create some clients and a hunt to view.
hunt_id = self.CreateSampleHunt()
# Run the hunt on half the clients.
finished_client_ids = self.client_ids[5:]
outstanding_client_ids = self.client_ids[:5]
self.AssignTasksToClients(client_ids=outstanding_client_ids)
self.RunHunt(failrate=2, client_ids=finished_client_ids)
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Clients]")
self.Click("css=label[name=ShowCompletedClients]")
for client_id in finished_client_ids:
self.WaitUntilContains(client_id, self.GetText, "css=.tab-content")
self.Click("css=label[name=ShowOutstandingClients]")
for client_id in outstanding_client_ids:
self.WaitUntilContains(client_id, self.GetText, "css=.tab-content")
def testContextTabShowsHuntContext(self):
# Create some clients and a hunt to view.
hunt_id = self.CreateSampleHunt()
self.Open("/#main=ManageHunts")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading='Context Details']")
# Check for different context properties.
self.WaitUntilContains(
hunt_id, self.GetText,
"css=table > tbody td.proto_key:contains(\"Session id\") "
"~ td.proto_value")
self.WaitUntilContains(
self.test_username, self.GetText,
"css=table > tbody td.proto_key:contains(\"Creator\") "
"~ td.proto_value")
def testHuntCreatorIsNotifiedWhenHuntIsStoppedDueToCrashes(self):
hunt_id = self.StartHunt(crash_limit=3, creator=self.test_username)
# Run the hunt on 3 clients, one by one. Crash detection check happens
# when client is scheduled, so it's important to schedule the clients
# one by one in the test.
for client_id in self.SetupClients(3):
self.RunHuntWithClientCrashes([client_id])
self.Open("/")
# Wait until the notification is there and show the notifications list.
self.WaitUntilEqual("1", self.GetText, "css=button[id=notification_button]")
self.Click("css=button[id=notification_button]")
# Click on the "hunt [id] reached the crashes limit" notification.
self.Click("css=td:contains(Hunt %s reached the crashes limit)" % hunt_id)
# Clicking on notification should shown the hunt's overview page.
self.WaitUntil(self.IsTextPresent, "/tmp/evil.txt")
# TODO(user): display hunt.hunt_state_comment in the UI.
if __name__ == "__main__":
app.run(test_lib.main)
| |
import logging
from typing import Dict, Any, List, Optional, Tuple, Union
from ray._raylet import (
Sum as CythonCount,
Histogram as CythonHistogram,
Gauge as CythonGauge,
) # noqa: E402
# Sum is used for CythonCount because it allows incrementing by positive
# values that are different from one.
logger = logging.getLogger(__name__)
class Metric:
"""The parent class of custom metrics.
Ray's custom metrics APIs are rooted from this class and share
the same public methods.
"""
def __init__(self,
name: str,
description: str = "",
tag_keys: Optional[Tuple[str]] = None):
if len(name) == 0:
raise ValueError("Empty name is not allowed. "
"Please provide a metric name.")
self._name = name
self._description = description
# The default tags key-value pair.
self._default_tags = {}
# Keys of tags.
self._tag_keys = tag_keys or tuple()
# The Cython metric class. This should be set in the child class.
self._metric = None
if not isinstance(self._tag_keys, tuple):
raise TypeError("tag_keys should be a tuple type, got: "
f"{type(self._tag_keys)}")
for key in self._tag_keys:
if not isinstance(key, str):
raise TypeError(f"Tag keys must be str, got {type(key)}.")
def set_default_tags(self, default_tags: Dict[str, str]):
"""Set default tags of metrics.
Example:
>>> # Note that set_default_tags returns the instance itself.
>>> counter = Counter("name")
>>> counter2 = counter.set_default_tags({"a": "b"})
>>> assert counter is counter2
>>> # this means you can instantiate it in this way.
>>> counter = Counter("name").set_default_tags({"a": "b"})
Args:
default_tags(dict): Default tags that are
used for every record method.
Returns:
Metric: it returns the instance itself.
"""
for key, val in default_tags.items():
if key not in self._tag_keys:
raise ValueError(f"Unrecognized tag key {key}.")
if not isinstance(val, str):
raise TypeError(f"Tag values must be str, got {type(val)}.")
self._default_tags = default_tags
return self
def record(self,
value: Union[int, float],
tags: Dict[str, str] = None,
_internal=False) -> None:
"""Record the metric point of the metric.
Tags passed in will take precedence over the metric's default tags.
Args:
value(float): The value to be recorded as a metric point.
"""
assert self._metric is not None
if isinstance(self._metric, CythonCount) and not _internal:
logger.warning("Counter.record() is deprecated in favor of "
"Counter.inc() and will be removed in a future "
"release. Please use Counter.inc() instead.")
if isinstance(self._metric, CythonGauge) and not _internal:
logger.warning("Gauge.record() is deprecated in favor of "
"Gauge.set() and will be removed in a future "
"release. Please use Gauge.set() instead.")
if isinstance(self._metric, CythonHistogram) and not _internal:
logger.warning("Histogram.record() is deprecated in favor of "
"Histogram.observe() and will be removed in a "
"future release. Please use Histogram.observe() "
"instead.")
if tags is not None:
for val in tags.values():
if not isinstance(val, str):
raise TypeError(
f"Tag values must be str, got {type(val)}.")
final_tags = {}
tags_copy = tags.copy() if tags else {}
for tag_key in self._tag_keys:
# Prefer passed tags over default tags.
if tags is not None and tag_key in tags:
final_tags[tag_key] = tags_copy.pop(tag_key)
elif tag_key in self._default_tags:
final_tags[tag_key] = self._default_tags[tag_key]
else:
raise ValueError(f"Missing value for tag key {tag_key}.")
if len(tags_copy) > 0:
raise ValueError(
f"Unrecognized tag keys: {list(tags_copy.keys())}.")
self._metric.record(value, tags=final_tags)
@property
def info(self) -> Dict[str, Any]:
"""Return the information of this metric.
Example:
>>> counter = Counter("name", description="desc")
print(counter.info)
\"""
{
"name": "name",
"description": "desc"
"tag_keys": ("ray.key")
"default_tags": {"ray.key": "abc"}
}
\"""
"""
return {
"name": self._name,
"description": self._description,
"tag_keys": self._tag_keys,
"default_tags": self._default_tags
}
class Counter(Metric):
"""A cumulative metric that is monotonically increasing.
This corresponds to Prometheus' counter metric:
https://prometheus.io/docs/concepts/metric_types/#counter
Args:
name(str): Name of the metric.
description(str): Description of the metric.
tag_keys(tuple): Tag keys of the metric.
"""
def __init__(self,
name: str,
description: str = "",
tag_keys: Optional[Tuple[str]] = None):
super().__init__(name, description, tag_keys)
self._metric = CythonCount(self._name, self._description,
self._tag_keys)
def __reduce__(self):
deserializer = self.__class__
serialized_data = (self._name, self._description, self._tag_keys)
return deserializer, serialized_data
def inc(self, value: Union[int, float] = 1.0, tags: Dict[str, str] = None):
"""Increment the counter by `value` (defaults to 1).
Tags passed in will take precedence over the metric's default tags.
Args:
value(int, float): Value to increment the counter by (default=1).
tags(Dict[str, str]): Tags to set or override for this counter.
"""
if not isinstance(value, (int, float)):
raise TypeError(f"value must be int or float, got {type(value)}.")
if value <= 0:
raise ValueError(f"value must be >0, got {value}")
self.record(value, tags=tags, _internal=True)
class Count(Counter):
"""The count of the number of metric points.
This corresponds to Prometheus' 'Count' metric.
This class is DEPRECATED, please use ray.util.metrics.Counter instead.
Args:
name(str): Name of the metric.
description(str): Description of the metric.
tag_keys(tuple): Tag keys of the metric.
"""
def __init__(self,
name: str,
description: str = "",
tag_keys: Optional[Tuple[str]] = None):
logger.warning(
"`metrics.Count` has been renamed to `metrics.Counter`. "
"`metrics.Count` will be removed in a future release.")
super().__init__(name, description, tag_keys)
class Histogram(Metric):
"""Tracks the size and number of events in buckets.
Histograms allow you to calculate aggregate quantiles
such as 25, 50, 95, 99 percentile latency for an RPC.
This corresponds to Prometheus' histogram metric:
https://prometheus.io/docs/concepts/metric_types/#histogram
Args:
name(str): Name of the metric.
description(str): Description of the metric.
boundaries(list): Boundaries of histogram buckets.
tag_keys(tuple): Tag keys of the metric.
"""
def __init__(self,
name: str,
description: str = "",
boundaries: List[float] = None,
tag_keys: Optional[Tuple[str]] = None):
super().__init__(name, description, tag_keys)
if boundaries is None or len(boundaries) == 0:
raise ValueError(
"boundaries argument should be provided when using "
"the Histogram class. e.g., "
"Histogram(\"name\", boundaries=[1.0, 2.0])")
self.boundaries = boundaries
self._metric = CythonHistogram(self._name, self._description,
self.boundaries, self._tag_keys)
def observe(self, value: Union[int, float], tags: Dict[str, str] = None):
"""Observe a given `value` and add it to the appropriate bucket.
Tags passed in will take precedence over the metric's default tags.
Args:
value(int, float): Value to set the gauge to.
tags(Dict[str, str]): Tags to set or override for this gauge.
"""
if not isinstance(value, (int, float)):
raise TypeError(f"value must be int or float, got {type(value)}.")
self.record(value, tags, _internal=True)
def __reduce__(self):
deserializer = Histogram
serialized_data = (self._name, self._description, self.boundaries,
self._tag_keys)
return deserializer, serialized_data
@property
def info(self):
"""Return information about histogram metric."""
info = super().info
info.update({"boundaries": self.boundaries})
return info
class Gauge(Metric):
"""Gauges keep the last recorded value and drop everything before.
Unlike counters, gauges can go up or down over time.
This corresponds to Prometheus' gauge metric:
https://prometheus.io/docs/concepts/metric_types/#gauge
Args:
name(str): Name of the metric.
description(str): Description of the metric.
tag_keys(tuple): Tag keys of the metric.
"""
def __init__(self,
name: str,
description: str = "",
tag_keys: Optional[Tuple[str]] = None):
super().__init__(name, description, tag_keys)
self._metric = CythonGauge(self._name, self._description,
self._tag_keys)
def set(self, value: Union[int, float], tags: Dict[str, str] = None):
"""Set the gauge to the given `value`.
Tags passed in will take precedence over the metric's default tags.
Args:
value(int, float): Value to set the gauge to.
tags(Dict[str, str]): Tags to set or override for this gauge.
"""
if not isinstance(value, (int, float)):
raise TypeError(f"value must be int or float, got {type(value)}.")
self.record(value, tags, _internal=True)
def __reduce__(self):
deserializer = Gauge
serialized_data = (self._name, self._description, self._tag_keys)
return deserializer, serialized_data
__all__ = [
"Counter",
"Histogram",
"Gauge",
]
| |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/NodeOps.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
# This test is used to verify that the Buildability of a set of nodes
# is unaffected by various querying operations on those nodes:
#
# 1) Calling exists() on a Node (e.g. from find_file) in a VariantDir
# will cause that node to be duplicated into the builddir.
# However, this should *not* occur during a dryrun (-n). When not
# performed during a dryrun, this should not affect buildability.
# 2) Calling is_derived() should not affect buildability.
import sys
import TestSCons
import os
_exe = TestSCons._exe
lib_ = TestSCons.lib_
_lib = TestSCons._lib
_obj = TestSCons._obj
dll_ = TestSCons.dll_
_dll = TestSCons._dll
if os.name == 'posix':
os.environ['LD_LIBRARY_PATH'] = '.'
if sys.platform.find('irix') > -1:
os.environ['LD_LIBRARYN32_PATH'] = '.'
test = TestSCons.TestSCons()
test.subdir('bld', 'src', ['src', 'subsrcdir'])
sconstruct = r"""
foo = Environment(SHOBJPREFIX='', WINDOWS_INSERT_DEF=1)
foo.Append(SHCXXFLAGS = '-DFOO')
bar = Environment(SHOBJPREFIX='', WINDOWS_INSERT_DEF=1)
bar.Append(SHCXXFLAGS = '-DBAR')
src = Dir('src')
VariantDir('bld', src, duplicate=1)
Nodes=[]
Nodes.extend(foo.SharedObject(target = 'foo%(_obj)s', source = 'prog.cpp'))
Nodes.extend(bar.SharedObject(target = 'bar%(_obj)s', source = 'prog.cpp'))
SConscript('bld/SConscript', ['Nodes'])
if %(_E)s:
import os
derived = [N.is_derived() for N in Nodes]
real1 = [os.path.exists(str(N)) for N in Nodes]
exists = [N.exists() for N in Nodes]
real2 = [os.path.exists(str(N)) for N in Nodes]
for N,D,R,E,F in zip(Nodes, derived, real1, exists, real2):
print '%%s: %%s %%s %%s %%s'%%(N,D,R,E,F)
foo.SharedLibrary(target = 'foo', source = 'foo%(_obj)s')
bar.SharedLibrary(target = 'bar', source = 'bar%(_obj)s')
fooMain = foo.Clone(LIBS='foo', LIBPATH='.')
foo_obj = fooMain.Object(target='foomain', source='main.c')
fooMain.Program(target='fooprog', source=foo_obj)
barMain = bar.Clone(LIBS='bar', LIBPATH='.')
bar_obj = barMain.Object(target='barmain', source='main.c')
barMain.Program(target='barprog', source=bar_obj)
gooMain = foo.Clone(LIBS='goo', LIBPATH='bld')
goo_obj = gooMain.Object(target='goomain', source='main.c')
gooMain.Program(target='gooprog', source=goo_obj)
"""
test.write('foo.def', r"""
LIBRARY "foo"
EXPORTS
doIt
""")
test.write('bar.def', r"""
LIBRARY "bar"
EXPORTS
doIt
""")
test.write('prog.cpp', r"""
#include <stdio.h>
extern "C" void
doIt()
{
#ifdef FOO
printf("prog.cpp: FOO\n");
#endif
#ifdef BAR
printf("prog.cpp: BAR\n");
#endif
}
""")
sconscript = r"""
import os
Import('*')
def mycopy(env, source, target):
open(str(target[0]),'w').write(open(str(source[0]),'r').read())
def exists_test(node):
before = os.path.exists(str(node)) # doesn't exist yet in VariantDir
via_node = node.exists() # side effect causes copy from src
after = os.path.exists(str(node))
node.is_derived()
import SCons.Script
if GetOption('no_exec'):
if (before,via_node,after) != (False,False,False):
import sys
sys.stderr.write('VariantDir exists() populated during dryrun!\n')
sys.exit(-2)
else:
if (before,via_node,after) != (False,True,True):
import sys
sys.stderr.write('VariantDir exists() population did not occur! (%%s:%%s,%%s,%%s)\n'%%(str(node),before,via_node,after))
sys.exit(-2)
goo = Environment()
goo.Append(CFLAGS = '-DFOO')
goof_in = File('goof.in')
if %(_E)s:
exists_test(goof_in)
Nodes.append(goof_in)
Nodes.extend(goo.Command(target='goof.c', source='goof.in', action=mycopy))
boo_src = File('subsrcdir/boo.c')
if %(_E)s:
exists_test(boo_src)
boo_objs = goo.Object(target='subsrcdir/boo%(_obj)s', source = boo_src)
Nodes.extend(boo_objs)
Nodes.extend(goo.Object(target='goo%(_obj)s',source='goof.c'))
goo.Library(target = 'goo', source = ['goo%(_obj)s'] + boo_objs)
"""
test.write(['src', 'goof.in'], r"""
#include <stdio.h>
extern char *boo_sub();
void
doIt()
{
#ifdef FOO
printf("prog.cpp: %s\n", boo_sub());
#endif
}
""")
test.write(['src', 'subsrcdir', 'boo.c'], r"""
char *
boo_sub()
{
return "GOO";
}
""")
test.write('main.c', r"""
void doIt();
int
main(int argc, char* argv[])
{
doIt();
return 0;
}
""")
builddir_srcnodes = [ os.path.join('bld', 'goof.in'),
os.path.join('bld', 'subsrcdir', 'boo.c'),
]
sub_build_nodes = [ os.path.join('bld', 'subsrcdir','boo' + _obj),
os.path.join('bld', 'goo' + _obj),
os.path.join('bld', 'goof.c'),
os.path.join('bld', lib_ + 'goo' + _lib),
]
build_nodes = ['fooprog' + _exe,
dll_ + 'foo' + _dll,
'foo' + _obj,
'barprog' + _exe,
dll_ + 'bar' + _dll,
'bar' + _obj,
'gooprog' + _exe,
] + builddir_srcnodes + sub_build_nodes
def cleanup_test():
"cleanup after running a test"
for F in builddir_srcnodes:
test.unlink(F) # will be repopulated during clean operation
test.run(arguments = '-c')
for F in builddir_srcnodes:
test.unlink(F)
for name in build_nodes:
test.must_not_exist(test.workpath(name))
### First pass, make sure everything goes quietly
for name in build_nodes:
test.must_not_exist(test.workpath(name))
_E=0
test.write('SConstruct', sconstruct % locals() )
test.write(['src', 'SConscript'], sconscript % locals() )
test.run(arguments = '.',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
test.run(program = test.workpath('fooprog'), stdout = "prog.cpp: FOO\n")
test.run(program = test.workpath('barprog'), stdout = "prog.cpp: BAR\n")
test.run(program = test.workpath('gooprog'), stdout = "prog.cpp: GOO\n")
for name in build_nodes:
test.must_exist(test.workpath(name))
cleanup_test()
### Next pass: add internal Node ops that may have side effects to
### ensure that those side-effects don't interfere with building
for name in build_nodes:
test.must_not_exist(test.workpath(name))
_E=1
test.write('SConstruct', sconstruct % locals() )
test.write(['src', 'SConscript'], sconscript % locals() )
test.run(arguments = '.',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
test.run(program = test.workpath('fooprog'), stdout = "prog.cpp: FOO\n")
test.run(program = test.workpath('barprog'), stdout = "prog.cpp: BAR\n")
test.run(program = test.workpath('gooprog'), stdout = "prog.cpp: GOO\n")
for name in build_nodes:
test.must_exist(test.workpath(name))
cleanup_test()
### Next pass: try a dry-run first and verify that it doesn't change
### the buildability.
for name in build_nodes:
test.must_not_exist(test.workpath(name))
_E=1
test.write('SConstruct', sconstruct % locals() )
test.write(['src', 'SConscript'], sconscript % locals() )
test.run(arguments = '-n .',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
for name in build_nodes:
test.must_not_exist(test.workpath(name))
test.run(arguments = '.',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
test.run(program = test.workpath('fooprog'), stdout = "prog.cpp: FOO\n")
test.run(program = test.workpath('barprog'), stdout = "prog.cpp: BAR\n")
test.run(program = test.workpath('gooprog'), stdout = "prog.cpp: GOO\n")
for name in build_nodes:
test.must_exist(test.workpath(name))
cleanup_test()
### Next pass: do an up-build from a VariantDir src
for name in build_nodes:
test.must_not_exist(test.workpath(name))
_E=0
test.write('SConstruct', sconstruct % locals() )
test.write(['src', 'SConscript'], sconscript % locals() )
test.run(chdir='src', arguments = '-u',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
for name in build_nodes:
if name in sub_build_nodes or name in builddir_srcnodes:
test.must_exist(test.workpath(name))
else:
test.must_not_exist(test.workpath(name))
cleanup_test()
### Next pass: do an up-build from a VariantDir src with Node Ops
### side-effects
for name in build_nodes:
test.must_not_exist(test.workpath(name))
_E=1
test.write('SConstruct', sconstruct % locals() )
test.write(['src', 'SConscript'], sconscript % locals() )
test.run(chdir='src', arguments = '-u',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
for name in build_nodes:
if name in sub_build_nodes or name in builddir_srcnodes:
test.must_exist(test.workpath(name))
else:
test.must_not_exist(test.workpath(name))
cleanup_test()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
import bs4
import re
import calendar
import datetime
import time
import json
import os.path
import parsedatetime
import bleach
import WebRequest
import common.util.urlFuncs
import WebMirror.OutputFilters.FilterBase
import WebMirror.OutputFilters.util.TitleParsers as titleParsers
import WebMirror.OutputFilters.util.MessageConstructors as msgpackers
from .. import SeriesPageCommon
class SHSeriesPageFilter(WebMirror.OutputFilters.FilterBase.FilterBase):
wanted_mimetypes = [
'text/html',
]
want_priority = 55
loggerPath = "Main.Filter.RoyalRoad.Page"
match_re = re.compile(r"^https?://(?:www\.)?scribblehub\.com/series/(\d+)(?:/[a-zA-Z0-9-]+/?|/?)?", flags=re.IGNORECASE)
@classmethod
def wantsUrl(cls, url):
if cls.match_re.search(url):
print("SHSeriesPageFilter Wants url: '%s'" % url)
return True
# else:
# print("SHSeriesPageFilter doesn't want url: '%s'" % url)ty
return False
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pageUrl = kwargs['pageUrl']
self.content = kwargs['pgContent']
self.type = kwargs['type']
self.log.info("Processing ScribbleHub Series Page")
super().__init__(**kwargs)
##################################################################################################################################
##################################################################################################################################
##################################################################################################################################
def extractSeriesReleases(self, seriesPageUrl, soup):
match = self.match_re.search(seriesPageUrl)
series_id = match.group(1)
titletg = soup.find("div", class_='fic_title')
authortg = soup.find("span", class_='auth_name_fic')
if not titletg:
self.log.error("Could not find title tag!")
return []
if not authortg:
self.log.error("Could not find author tag!")
return []
metas = soup.find_all("script", type="application/ld+json")
agg_meta = {}
for meta in metas:
loaded = json.loads(meta.string)
for k, v in loaded.items():
agg_meta[k] = v
rating = float(agg_meta.get('ratingValue', "0"))
rating_cnt = float(agg_meta.get('ratingCount', "0"))
self.log.info("Rating value: %s, Rating cnt: %s", rating, rating_cnt)
if rating < SeriesPageCommon.MIN_RATING_STARS:
self.log.error("Item rating below upload threshold: %s", rating)
return []
if rating_cnt < SeriesPageCommon.MIN_RATE_CNT:
self.log.error("Item has insufficent ratings: %s", rating_cnt)
return []
title = titletg.get_text().strip()
author = authortg.get_text().strip()
title = bleach.clean(title, tags=[], attributes=[], styles=[], strip=True, strip_comments=True)
author = bleach.clean(author, tags=[], attributes=[], styles=[], strip=True, strip_comments=True)
descDiv = soup.find('div', class_='wi_fic_desc')
if not descDiv or not descDiv.p:
self.log.error("Incomplete or broken description?")
return []
desc = []
for segment in descDiv:
if isinstance(segment, bs4.NavigableString):
desc.append(str(segment).strip())
else:
if segment.get_text().strip():
desc.append(segment.get_text().strip())
desc = ['<p>{}</p>'.format(line) for line in desc if line.strip()]
tags = []
tagdiv = soup.find('span', class_='wi_fic_showtags')
for tag in tagdiv.find_all('a', class_='stag'):
tagtxt = SeriesPageCommon.clean_tag(tag.get_text())
tagtxt = SeriesPageCommon.fix_tag(tagtxt)
tags.append(tagtxt)
# These are separate on SH, but I'm just treating them as tags.
for tag in soup.find_all('li', class_='mature_contains'):
tagtxt = SeriesPageCommon.clean_tag(tag.get_text())
tagtxt = SeriesPageCommon.fix_tag(tagtxt)
tags.append(tagtxt)
genres = []
genrediv = soup.find('span', class_='wi_fic_genre')
for genre in genrediv.find_all('a', class_='fic_genre'):
genretxt = SeriesPageCommon.clean_tag(genre.get_text())
genretxt = SeriesPageCommon.fix_genre(genretxt)
genres.append(genretxt)
seriesmeta = {}
seriesmeta['title'] = msgpackers.fix_string(title)
seriesmeta['author'] = msgpackers.fix_string(author)
seriesmeta['tags'] = tags
seriesmeta['homepage'] = seriesPageUrl
seriesmeta['desc'] = "\r\n".join(desc)
seriesmeta['tl_type'] = 'oel'
seriesmeta['sourcesite'] = 'ScribbleHub'
seriesmeta['create_tags'] = True
meta_pkt = msgpackers.createSeriesInfoPacket(seriesmeta, matchAuthor=True)
extra = {}
extra['tags'] = tags
extra['genres'] = genres
extra['homepage'] = seriesPageUrl
extra['sourcesite'] = 'ScribbleHub'
self.log.info("Found %s tags, %s genres", len(tags), len(genres))
chapters = soup.find_all("li", class_='toc_w')
raw_retval = []
for chapter in chapters:
cname, cdate = chapter.a, chapter.span
if not (cname and cdate):
self.log.warning("Row with invalid number of entries?")
continue
if not cdate.get("title"):
self.log.error("No time entry?")
continue
timestr = cdate.get("title").strip()
itemDate, status = parsedatetime.Calendar().parse(timestr)
if status < 1:
self.log.warning("Failure processing date: %s", timestr)
continue
reldate = time.mktime(itemDate)
relurl = common.util.urlFuncs.rebaseUrl(cname['href'], seriesPageUrl)
chp_title = cname.get_text().strip()
# print("Chp title: '{}'".format(chp_title))
vol, chp, frag, _ = titleParsers.extractTitle(chp_title + " " + title)
raw_item = {}
raw_item['srcname'] = "ScribbleHub"
raw_item['published'] = float(reldate)
raw_item['linkUrl'] = relurl
raw_msg = msgpackers._buildReleaseMessage(
raw_item,
title,
vol,
chp,
frag,
author = author,
postfix = chp_title,
tl_type = 'oel',
extraData = extra,
matchAuthor = True
)
# print("Chapter:", raw_item)
raw_retval.append(raw_msg)
raw_retval = SeriesPageCommon.check_fix_numbering(self.log, raw_retval, series_id, sh=True)
# Do not add series without 3 chapters.
if len(raw_retval) < 3:
self.log.info("Less then three chapters!")
return []
if not raw_retval:
self.log.info("Retval empty?!")
return []
retval = [msgpackers.createReleasePacket(raw_msg) for raw_msg in raw_retval] + [meta_pkt]
self.log.info("Found %s chapter releases on series page!", len(retval))
self.put_measurement(
measurement_name = 'chapter_releases',
measurement = len(retval),
fields = {},
extra_tags = {"site" : "ScribbleHub"},
)
return retval
def sendReleases(self, releases):
self.log.info("Total releases found on page: %s. Emitting messages into AMQP local queue.", len(releases))
self.amqp_put_many(releases)
def processPage(self, url, content):
# Ignore 404 chapters
if "<title> | Scribble Hub</title>" in content:
self.log.warning("No series?")
return
soup = WebRequest.as_soup(self.content)
releases = self.extractSeriesReleases(self.pageUrl, soup)
if releases:
self.sendReleases(releases)
else:
self.log.info("No releases found on page?")
##################################################################################################################################
##################################################################################################################################
##################################################################################################################################
def extractContent(self):
# print("Call to extract!")
# print(self.amqpint)
self.processPage(self.pageUrl, self.content)
def test():
print("Test mode!")
import logSetup
from WebMirror.Engine import SiteArchiver
import common.database as db
logSetup.initLogging()
def fetch(url):
with db.session_context() as sess:
archiver = SiteArchiver(
cookie_lock = None,
db_interface = sess,
new_job_queue = None
)
archiver.synchronousJobRequest(url, ignore_cache=True, debug=True)
fetch('https://www.scribblehub.com/series/112220/the-dragonkin-and-the-succubus/')
fetch('https://www.scribblehub.com/series/107977/bookworld-online-marsh-man/')
fetch('https://www.scribblehub.com/series/100965/reincarnation-of-a-worthless-man/')
fetch('https://www.scribblehub.com/series/106548/i-am-an-eggplant-bl/')
fetch('https://www.scribblehub.com/series/81596/the-broken-system-what-bred-a-king/')
fetch('https://www.scribblehub.com/series/82656/the-th-demon-lord/')
fetch('https://www.scribblehub.com/series/66899/the-trials-path-toward-godhood-warning-mature-content/')
fetch('https://www.scribblehub.com/series/106712/lust-knight/')
fetch('https://www.scribblehub.com/series/111453/the-forgotten-character/')
fetch('https://www.scribblehub.com/series/69064/morbid/')
fetch('https://www.scribblehub.com/series/34196/the-legend-of-the-fake-hero/')
fetch('https://www.scribblehub.com/series/58245/a-reincarnated-demons-tales-of-wonder/')
fetch('https://www.scribblehub.com/series/86103/the-demon-lords-successor/')
fetch('https://www.scribblehub.com/series/93826/waking-up-as-a-spaceship-whats-a-ship-girl-supposed-to-do-now/')
fetch('https://www.scribblehub.com/series/94224/the-man-who-killed-the-first-monster/')
fetch('https://www.scribblehub.com/series/110849/monster-parade/')
fetch('https://www.scribblehub.com/series/40636/falling-over/')
fetch('https://www.scribblehub.com/series/94576/psionic-goddess-and-the-akashic-system/')
fetch('https://www.scribblehub.com/series/98089/the-creed-of-an-avenger-an-arifureta-fanfic/')
fetch('https://www.scribblehub.com/series/51635/eh-where-did-my-pen-pen-go/')
fetch('https://www.scribblehub.com/series/81242/summoned-again/')
fetch('https://www.scribblehub.com/series/62217/ultimate-fruit/')
fetch('https://www.scribblehub.com/series/108367/the-queen-of-darkness-does-not-want-to-be-the-villain/')
fetch('https://www.scribblehub.com/series/101250/reborn-as-batmans-little-brother/')
fetch('https://www.scribblehub.com/series/10442/world-keeper/')
fetch('https://www.scribblehub.com/series/83275/nero-my-existence-is-perfect/')
# engine.dispatchRequest(testJobFromUrl('http://www.ScribbleHub.com/fiction/3021'))
# engine.dispatchRequest(testJobFromUrl('http://www.ScribbleHub.com/fictions/latest-updates/'))
# engine.dispatchRequest(testJobFromUrl('http://www.ScribbleHub.com/fictions/best-rated/'))
# fetch('https://www.scribblehub.com/series-ranking/')
# fetch('https://www.scribblehub.com/series-ranking/?sort=3&order=1')
# fetch('https://www.scribblehub.com/latest-series/')
if __name__ == "__main__":
test()
| |
from PyQt4 import QtCore, QtGui, uic
from wallet import wallet
from tablemodel import TableModel, ProxyModel
class AddAssetDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self, parent)
uic.loadUi(uic.getUiPath('addassetdialog.ui'), self)
for wname in ['edtMoniker', 'edtColorDesc', 'edtUnit']:
getattr(self, wname).focusInEvent = \
lambda e, name=wname: getattr(self, name).setStyleSheet('')
def isValid(self):
moniker = self.edtMoniker.text()
a = bool(moniker)
if a and moniker in wallet.get_all_monikers():
QtGui.QMessageBox.warning(
self, 'Already exists!',
"Moniker <b>%s</b> already exists!" % moniker,
QtGui.QMessageBox.Ok)
a = False
if not a:
self.edtMoniker.setStyleSheet('background:#FF8080')
b = bool(self.edtColorDesc.text())
if not b:
self.edtColorDesc.setStyleSheet('background:#FF8080')
c = str(self.edtUnit.text()).isdigit()
if not c:
self.edtUnit.setStyleSheet('background:#FF8080')
return all([a, b, c])
def accept(self):
if self.isValid():
QtGui.QDialog.accept(self)
def get_data(self):
return {
'moniker': str(self.edtMoniker.text()),
'color_desc': str(self.edtColorDesc.text()),
'unit': int(self.edtUnit.text()),
}
class IssueCoinsDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self, parent)
uic.loadUi(uic.getUiPath('issuedialog.ui'), self)
self.cbScheme.addItem('obc')
for wname in ['edtMoniker', 'edtUnits', 'edtAtoms']:
getattr(self, wname).focusInEvent = \
lambda e, name=wname: getattr(self, name).setStyleSheet('')
self.edtUnits.textChanged.connect(self.changeTotalBTC)
self.edtAtoms.textChanged.connect(self.changeTotalBTC)
self.availableBTC = wallet.get_balance('bitcoin')
self.lblTotalBTC.setToolTip('Available: %s bitcoin' % \
wallet.get_asset_definition('bitcoin').format_value(self.availableBTC))
def changeTotalBTC(self):
amount = self.edtUnits.text().toInt()
units = self.edtAtoms.text().toInt()
if amount[1] and units[1]:
need = amount[0] * units[0]
text = '%s bitcoin' % \
wallet.get_asset_definition('bitcoin').format_value(need)
if need > self.availableBTC:
text = '<font color="#FF3838">%s</font>' % text
self.lblTotalBTC.setText(text)
def isValid(self):
moniker = self.edtMoniker.text()
a = bool(moniker)
if a and moniker in wallet.get_all_monikers():
QtGui.QMessageBox.warning(
self, 'Already exists!',
"Moniker <b>%s</b> already exists!" % moniker,
QtGui.QMessageBox.Ok)
a = False
if not a:
self.edtMoniker.setStyleSheet('background:#FF8080')
b = self.edtUnits.text().toInt()
if not b[1]:
self.edtUnits.setStyleSheet('background:#FF8080')
c = self.edtAtoms.text().toInt()
if not c[1]:
self.edtAtoms.setStyleSheet('background:#FF8080')
d = False
if b[1] and c[1] and b[0]*c[0] <= self.availableBTC:
d = True
return all([a, b, c, d])
def accept(self):
if self.isValid():
QtGui.QDialog.accept(self)
def get_data(self):
return {
'moniker': str(self.edtMoniker.text()),
'coloring_scheme': str(self.cbScheme.currentText()),
'units': self.edtUnits.text().toInt()[0],
'atoms': self.edtAtoms.text().toInt()[0],
}
class AssetTableModel(TableModel):
_columns = ['Moniker', 'Color set', 'Unit']
_alignment = [
QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter,
QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter,
QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter,
]
class AssetProxyModel(ProxyModel):
pass
class AssetPage(QtGui.QWidget):
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
uic.loadUi(uic.getUiPath('assetpage.ui'), self)
self.model = AssetTableModel(self)
self.proxyModel = AssetProxyModel(self)
self.proxyModel.setSourceModel(self.model)
self.proxyModel.setDynamicSortFilter(True)
self.proxyModel.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.proxyModel.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.tableView.setModel(self.proxyModel)
self.tableView.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.tableView.horizontalHeader().setResizeMode(
0, QtGui.QHeaderView.Stretch)
self.tableView.horizontalHeader().setResizeMode(
1, QtGui.QHeaderView.ResizeToContents)
self.tableView.horizontalHeader().setResizeMode(
2, QtGui.QHeaderView.ResizeToContents)
self.btnAddExistingAsset.clicked.connect(self.btnAddExistingAssetClicked)
self.btnAddNewAsset.clicked.connect(self.btnAddNewAssetClicked)
def update(self):
self.model.removeRows(0, self.model.rowCount())
for asset in wallet.get_all_asset():
self.model.addRow(
[asset['monikers'][0], asset['color_set'][0], asset['unit']])
def contextMenuEvent(self, event):
selected = self.tableView.selectedIndexes()
if not selected:
return
actions = [
self.actionCopyMoniker,
self.actionCopyColorSet,
self.actionCopyUnit,
self.actionShowAddresses,
]
menu = QtGui.QMenu()
for action in actions:
menu.addAction(action)
result = menu.exec_(event.globalPos())
if result is None or result not in actions:
return
if 0 <= actions.index(result) <= 2:
index = selected[actions.index(result)]
QtGui.QApplication.clipboard().setText(
self.proxyModel.data(index).toString())
elif actions.index(result) == 3:
window = self.parentWidget().parentWidget().parentWidget()
window.gotoAddressesPage()
window.addressespage.setMonikerFilter(
self.proxyModel.data(selected[0]).toString())
def selectRowByMoniker(self, moniker):
moniker = QtCore.QString(moniker)
for row in xrange(self.proxyModel.rowCount()):
index = self.proxyModel.index(row, 0)
if self.proxyModel.data(index).toString() == moniker:
self.tableView.selectRow(row)
break
def btnAddExistingAssetClicked(self):
dialog = AddAssetDialog(self)
if dialog.exec_():
data = dialog.get_data()
wallet.add_asset(data)
self.update()
self.selectRowByMoniker(data['moniker'])
def btnAddNewAssetClicked(self):
dialog = IssueCoinsDialog(self)
if dialog.exec_():
data = dialog.get_data()
wallet.issue(data)
self.update()
self.selectRowByMoniker(data['moniker'])
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy implementation for DB.API
"""
import sys
from oslo.config import cfg
import sqlalchemy as sa
from rally.db.sqlalchemy import models
from rally import exceptions
from rally.openstack.common.db.sqlalchemy import session as db_session
CONF = cfg.CONF
CONF.import_opt('connection',
'rally.openstack.common.db.options',
group='database')
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(
CONF.database.connection, CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def db_cleanup():
global _FACADE
_FACADE = None
def db_create():
models.create_db()
def db_drop():
models.drop_db()
def model_query(model, session=None):
"""The helper method to create query.
:param model: The instance of
:class:`rally.db.sqlalchemy.models.RallyBase` to
request it.
:param session: Reuse the session object or get new one if it is
None.
:returns: The query object.
:raises: :class:`Exception` when the model is not a sublcass of
:class:`rally.db.sqlalchemy.models.RallyBase`.
"""
session = session or get_session()
query = session.query(model)
def issubclassof_rally_base(obj):
return isinstance(obj, type) and issubclass(obj, models.RallyBase)
if not issubclassof_rally_base(model):
raise Exception(_("The model should be a subclass of RallyBase"))
return query
def _task_get(uuid, session=None):
task = model_query(models.Task, session=session).\
filter_by(uuid=uuid).\
first()
if not task:
raise exceptions.TaskNotFound(uuid=uuid)
return task
def task_get(uuid):
return _task_get(uuid)
def task_get_detailed(uuid):
return model_query(models.Task).\
options(sa.orm.joinedload('results')).\
filter_by(uuid=uuid).\
first()
def task_get_detailed_last():
return model_query(models.Task).\
options(sa.orm.joinedload('results')).\
order_by(models.Task.id.desc()).first()
def task_create(values):
task = models.Task()
task.update(values)
task.save()
return task
def task_update(uuid, values):
session = get_session()
values.pop('uuid', None)
with session.begin():
task = _task_get(uuid, session=session)
task.update(values)
return task
def task_list(status=None):
query = model_query(models.Task)
if status is not None:
query = query.filter_by(status=status)
return query.all()
def task_delete(uuid, status=None):
session = get_session()
with session.begin():
query = base_query = model_query(models.Task).filter_by(uuid=uuid)
if status is not None:
query = base_query.filter_by(status=status)
model_query(models.TaskResult).\
filter_by(task_uuid=uuid).\
delete(synchronize_session=False)
count = query.delete(synchronize_session=False)
if not count:
if status is not None:
task = base_query.first()
if task:
raise exceptions.TaskInvalidStatus(uuid=uuid,
require=status,
actual=task.status)
raise exceptions.TaskNotFound(uuid=uuid)
def task_result_create(task_uuid, key, data):
result = models.TaskResult()
result.update({"task_uuid": task_uuid, "key": key, "data": data})
result.save()
return result
def task_result_get_all_by_uuid(uuid):
return model_query(models.TaskResult).\
filter_by(task_uuid=uuid).\
all()
def _deployment_get(uuid, session=None):
deploy = model_query(models.Deployment, session=session).\
filter_by(uuid=uuid).\
first()
if not deploy:
raise exceptions.DeploymentNotFound(uuid=uuid)
return deploy
def deployment_create(values):
deployment = models.Deployment()
deployment.update(values)
deployment.save()
return deployment
def deployment_delete(uuid):
session = get_session()
with session.begin():
count = model_query(models.Resource, session=session).\
filter_by(deployment_uuid=uuid).\
count()
if count:
raise exceptions.DeploymentIsBusy(uuid=uuid)
count = model_query(models.Deployment, session=session).\
filter_by(uuid=uuid).\
delete(synchronize_session=False)
if not count:
raise exceptions.DeploymentNotFound(uuid=uuid)
def deployment_get(uuid):
return _deployment_get(uuid)
def deployment_update(uuid, values):
session = get_session()
values.pop('uuid', None)
with session.begin():
deploy = _deployment_get(uuid, session=session)
deploy.update(values)
return deploy
def deployment_list(status=None, parent_uuid=None):
query = model_query(models.Deployment).filter_by(parent_uuid=parent_uuid)
if status is not None:
query = query.filter_by(status=status)
return query.all()
def resource_create(values):
resource = models.Resource()
resource.update(values)
resource.save()
return resource
def resource_get_all(deployment_uuid, provider_name=None, type=None):
query = model_query(models.Resource).\
filter_by(deployment_uuid=deployment_uuid)
if provider_name is not None:
query = query.filter_by(provider_name=provider_name)
if type is not None:
query = query.filter_by(type=type)
return query.all()
def resource_delete(id):
count = model_query(models.Resource).\
filter_by(id=id).\
delete(synchronize_session=False)
if not count:
raise exceptions.ResourceNotFound(id=id)
def verification_create(deployment_uuid):
verification = models.Verification()
verification.update({"deployment_uuid": deployment_uuid})
verification.save()
return verification
def verification_get(verification_uuid, session=None):
verification = model_query(models.Verification, session=session).\
filter_by(uuid=verification_uuid).first()
if not verification:
raise exceptions.NotFoundException(
"Can't find any verification with following UUID '%s'." %
verification_uuid)
return verification
def verification_update(verification_uuid, values):
session = get_session()
with session.begin():
verification = verification_get(verification_uuid, session=session)
verification.update(values)
return verification
def verification_list(status=None):
query = model_query(models.Verification)
if status is not None:
query = query.filter_by(status=status)
return query.all()
def verification_delete(verification_uuid):
count = model_query(models.Verification).filter_by(id=verification_uuid).\
delete(synchronize_session=False)
if not count:
raise exceptions.NotFoundException(
"Can't find any verification with following UUID '%s'." %
verification_uuid)
def verification_result_create(verification_uuid, data):
result = models.VerificationResult()
result.update({"verification_uuid": verification_uuid,
"data": data})
result.save()
return result
def verification_result_get(verification_uuid):
result = model_query(models.VerificationResult).\
filter_by(verification_uuid=verification_uuid).first()
if not result:
raise exceptions.NotFoundException(
"No results for following UUID '%s'." % verification_uuid)
return result
| |
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2019 - NetCracker Technology Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_log import log as logging
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral.api import access_control as acl
from mistral.api.controllers.v2 import action_execution
from mistral.api.controllers.v2 import resources
from mistral.api.controllers.v2 import sub_execution
from mistral.api.controllers.v2 import types
from mistral import context
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral import expressions as expr
from mistral.lang import parser as spec_parser
from mistral.rpc import clients as rpc
from mistral.utils import filter_utils
from mistral.utils import rest_utils
from mistral.workflow import data_flow
from mistral.workflow import states
LOG = logging.getLogger(__name__)
STATE_TYPES = wtypes.Enum(
str,
states.IDLE,
states.RUNNING,
states.SUCCESS,
states.ERROR,
states.RUNNING_DELAYED
)
def _get_task_resource_with_result(task_ex):
task = resources.Task.from_db_model(task_ex)
task.result = json.dumps(data_flow.get_task_execution_result(task_ex))
return task
# Use retries to prevent possible failures.
@rest_utils.rest_retry_on_db_error
def _get_task_execution(id):
with db_api.transaction():
task_ex = db_api.get_task_execution(id)
rest_utils.load_deferred_fields(task_ex, ['workflow_execution'])
rest_utils.load_deferred_fields(
task_ex.workflow_execution,
['context', 'input', 'params', 'root_execution']
)
rest_utils.load_deferred_fields(
task_ex.workflow_execution.root_execution,
['params']
)
return _get_task_resource_with_result(task_ex), task_ex
def get_published_global(task_ex, wf_ex=None):
if task_ex.state not in [states.SUCCESS, states.ERROR]:
return
if wf_ex is None:
wf_ex = task_ex.workflow_execution
expr_ctx = data_flow.ContextView(
data_flow.get_current_task_dict(task_ex),
task_ex.in_context,
data_flow.get_workflow_environment_dict(wf_ex),
wf_ex.context,
wf_ex.input
)
task_spec = spec_parser.get_task_spec(task_ex.spec)
publish_spec = task_spec.get_publish(task_ex.state)
if not publish_spec:
return
global_vars = publish_spec.get_global()
return expr.evaluate_recursively(global_vars, expr_ctx)
def _task_with_published_global(task, task_ex):
published_global_vars = get_published_global(task_ex)
if published_global_vars:
task.published_global = published_global_vars
return task
class TaskExecutionsController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Executions, types.uuid, types.uuid, int,
types.uniquelist, types.list, types.uniquelist,
wtypes.text, types.uuid, wtypes.text,
types.uniquelist, types.jsontype, STATE_TYPES,
wtypes.text, types.jsontype, types.jsontype,
wtypes.text, wtypes.text)
def get_all(self, task_execution_id, marker=None, limit=None,
sort_keys='created_at', sort_dirs='asc', fields='',
workflow_name=None, workflow_id=None, description=None,
tags=None, params=None, state=None,
state_info=None, input=None, output=None,
created_at=None, updated_at=None):
"""Return all executions that belong to the given task execution.
:param task_execution_id: Task task execution ID.
:param marker: Optional. Pagination marker for large data sets.
:param limit: Optional. Maximum number of resources to return in a
single result. Default value is None for backward
compatibility.
:param sort_keys: Optional. Columns to sort results by.
Default: created_at, which is backward compatible.
:param sort_dirs: Optional. Directions to sort corresponding to
sort_keys, "asc" or "desc" can be chosen.
Default: desc. The length of sort_dirs can be equal
or less than that of sort_keys.
:param fields: Optional. A specified list of fields of the resource to
be returned. 'id' will be included automatically in
fields if it's provided, since it will be used when
constructing 'next' link.
:param workflow_name: Optional. Keep only resources with a specific
workflow name.
:param workflow_id: Optional. Keep only resources with a specific
workflow ID.
:param description: Optional. Keep only resources with a specific
description.
:param tags: Optional. Keep only resources containing specific tags.
:param params: Optional. Keep only resources with specific parameters.
:param state: Optional. Keep only resources with a specific state.
:param state_info: Optional. Keep only resources with specific
state information.
:param input: Optional. Keep only resources with a specific input.
:param output: Optional. Keep only resources with a specific output.
:param created_at: Optional. Keep only resources created at a specific
time and date.
:param updated_at: Optional. Keep only resources with specific latest
update time and date.
"""
acl.enforce('executions:list', context.ctx())
filters = filter_utils.create_filters_from_request_params(
task_execution_id=task_execution_id,
created_at=created_at,
workflow_name=workflow_name,
workflow_id=workflow_id,
tags=tags,
params=params,
state=state,
state_info=state_info,
input=input,
output=output,
updated_at=updated_at,
description=description
)
LOG.debug(
"Fetch executions. marker=%s, limit=%s, sort_keys=%s, "
"sort_dirs=%s, filters=%s", marker, limit, sort_keys, sort_dirs,
filters
)
return rest_utils.get_all(
resources.Executions,
resources.Execution,
db_api.get_workflow_executions,
db_api.get_workflow_execution,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
**filters
)
class TasksController(rest.RestController):
action_executions = action_execution.TasksActionExecutionController()
workflow_executions = TaskExecutionsController()
executions = sub_execution.SubExecutionsController()
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Task, wtypes.text)
def get(self, id):
"""Return the specified task.
:param id: UUID of task to retrieve
"""
acl.enforce('tasks:get', context.ctx())
LOG.debug("Fetch task [id=%s]", id)
task, task_ex = _get_task_execution(id)
return _task_with_published_global(task, task_ex)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Tasks, types.uuid, int, types.uniquelist,
types.list, types.uniquelist, wtypes.text,
wtypes.text, types.uuid,
types.uuid, types.uniquelist, STATE_TYPES,
wtypes.text, wtypes.text, types.jsontype,
bool, wtypes.text, wtypes.text,
bool, types.jsontype)
def get_all(self, marker=None, limit=None, sort_keys='created_at',
sort_dirs='asc', fields='', name=None,
workflow_name=None, workflow_id=None,
workflow_execution_id=None, tags=None, state=None,
state_info=None, result=None, published=None,
processed=None, created_at=None, updated_at=None,
reset=None, env=None):
"""Return all tasks.
Where project_id is the same as the requester or
project_id is different but the scope is public.
:param marker: Optional. Pagination marker for large data sets.
:param limit: Optional. Maximum number of resources to return in a
single result. Default value is None for backward
compatibility.
:param sort_keys: Optional. Columns to sort results by.
Default: created_at, which is backward compatible.
:param sort_dirs: Optional. Directions to sort corresponding to
sort_keys, "asc" or "desc" can be chosen.
Default: desc. The length of sort_dirs can be equal
or less than that of sort_keys.
:param fields: Optional. A specified list of fields of the resource to
be returned. 'id' will be included automatically in
fields if it's provided, since it will be used when
constructing 'next' link.
:param name: Optional. Keep only resources with a specific name.
:param workflow_name: Optional. Keep only resources with a specific
workflow name.
:param workflow_id: Optional. Keep only resources with a specific
workflow ID.
:param workflow_execution_id: Optional. Keep only resources with a
specific workflow execution ID.
:param state: Optional. Keep only resources with a specific state.
:param state_info: Optional. Keep only resources with specific
state information.
:param result: Optional. Keep only resources with a specific result.
:param published: Optional. Keep only resources with specific
published content.
:param processed: Optional. Keep only resources which have been
processed or not.
:param reset: Optional. Keep only resources which have been reset or
not.
:param env: Optional. Keep only resources with a specific environment.
:param created_at: Optional. Keep only resources created at a specific
time and date.
:param updated_at: Optional. Keep only resources with specific latest
update time and date.
"""
acl.enforce('tasks:list', context.ctx())
filters = filter_utils.create_filters_from_request_params(
created_at=created_at,
workflow_name=workflow_name,
workflow_id=workflow_id,
tags=tags,
state=state,
state_info=state_info,
updated_at=updated_at,
name=name,
workflow_execution_id=workflow_execution_id,
result=result,
published=published,
processed=processed,
reset=reset,
env=env
)
LOG.debug(
"Fetch tasks. marker=%s, limit=%s, sort_keys=%s, sort_dirs=%s,"
" filters=%s", marker, limit, sort_keys, sort_dirs, filters
)
return rest_utils.get_all(
resources.Tasks,
resources.Task,
db_api.get_task_executions,
db_api.get_task_execution,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
**filters
)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Task, wtypes.text, body=resources.Task)
def put(self, id, task):
"""Update the specified task execution.
:param id: Task execution ID.
:param task: Task execution object.
"""
acl.enforce('tasks:update', context.ctx())
LOG.debug("Update task execution [id=%s, task=%s]", id, task)
@rest_utils.rest_retry_on_db_error
def _read_task_params(id, task):
with db_api.transaction():
task_ex = db_api.get_task_execution(id)
task_spec = spec_parser.get_task_spec(task_ex.spec)
task_name = task.name or None
reset = task.reset
env = task.env or None
if task_name and task_name != task_ex.name:
raise exc.WorkflowException('Task name does not match.')
wf_ex = db_api.get_workflow_execution(
task_ex.workflow_execution_id
)
return env, reset, task_ex, task_spec, wf_ex
env, reset, task_ex, task_spec, wf_ex = _read_task_params(id, task)
wf_name = task.workflow_name or None
if wf_name and wf_name != wf_ex.name:
raise exc.WorkflowException('Workflow name does not match.')
if task.state != states.RUNNING:
raise exc.WorkflowException(
'Invalid task state. '
'Only updating task to rerun is supported.'
)
if task_ex.state != states.ERROR:
raise exc.WorkflowException(
'The current task execution must be in ERROR for rerun.'
' Only updating task to rerun is supported.'
)
if not task_spec.get_with_items() and not reset:
raise exc.WorkflowException(
'Only with-items task has the option to not reset.'
)
rpc.get_engine_client().rerun_workflow(
task_ex.id,
reset=reset,
env=env
)
@rest_utils.rest_retry_on_db_error
def _retrieve_task():
with db_api.transaction():
task_ex = db_api.get_task_execution(id)
return _get_task_resource_with_result(task_ex)
return _retrieve_task()
class ExecutionTasksController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Tasks, types.uuid, types.uuid, int,
types.uniquelist, types.list, types.uniquelist,
wtypes.text, wtypes.text, types.uuid,
types.uniquelist, STATE_TYPES, wtypes.text,
wtypes.text, types.jsontype, bool,
wtypes.text, wtypes.text, bool, types.jsontype)
def get_all(self, workflow_execution_id, marker=None, limit=None,
sort_keys='created_at', sort_dirs='asc', fields='',
name=None, workflow_name=None, workflow_id=None,
tags=None, state=None, state_info=None,
result=None, published=None, processed=None,
created_at=None, updated_at=None, reset=None, env=None):
"""Return all tasks within the execution.
Where project_id is the same as the requester or
project_id is different but the scope is public.
:param marker: Optional. Pagination marker for large data sets.
:param limit: Optional. Maximum number of resources to return in a
single result. Default value is None for backward
compatibility.
:param sort_keys: Optional. Columns to sort results by.
Default: created_at, which is backward compatible.
:param sort_dirs: Optional. Directions to sort corresponding to
sort_keys, "asc" or "desc" can be chosen.
Default: desc. The length of sort_dirs can be equal
or less than that of sort_keys.
:param fields: Optional. A specified list of fields of the resource to
be returned. 'id' will be included automatically in
fields if it's provided, since it will be used when
constructing 'next' link.
:param name: Optional. Keep only resources with a specific name.
:param workflow_name: Optional. Keep only resources with a specific
workflow name.
:param workflow_id: Optional. Keep only resources with a specific
workflow ID.
:param workflow_execution_id: Optional. Keep only resources with a
specific workflow execution ID.
:param tags: Optional. Keep only resources containing specific tags.
:param state: Optional. Keep only resources with a specific state.
:param state_info: Optional. Keep only resources with specific
state information.
:param result: Optional. Keep only resources with a specific result.
:param published: Optional. Keep only resources with specific
published content.
:param processed: Optional. Keep only resources which have been
processed or not.
:param reset: Optional. Keep only resources which have been reset or
not.
:param env: Optional. Keep only resources with a specific environment.
:param created_at: Optional. Keep only resources created at a specific
time and date.
:param updated_at: Optional. Keep only resources with specific latest
update time and date.
"""
acl.enforce('tasks:list', context.ctx())
filters = filter_utils.create_filters_from_request_params(
workflow_execution_id=workflow_execution_id,
created_at=created_at,
workflow_name=workflow_name,
workflow_id=workflow_id,
tags=tags,
state=state,
state_info=state_info,
updated_at=updated_at,
name=name,
result=result,
published=published,
processed=processed,
reset=reset,
env=env
)
LOG.debug(
"Fetch tasks. workflow_execution_id=%s, marker=%s, limit=%s, "
"sort_keys=%s, sort_dirs=%s, filters=%s",
workflow_execution_id, marker, limit, sort_keys, sort_dirs,
filters
)
return rest_utils.get_all(
resources.Tasks,
resources.Task,
db_api.get_task_executions,
db_api.get_task_execution,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
**filters
)
| |
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def _ij_ik_il_to_jkl(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
return chainer.functions.matmul(_as_mat(ab).T, c).reshape(
a.shape[1], b.shape[1], c.shape[1])
def _ij_ik_jkl_to_il(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
c = c.reshape(-1, c.shape[-1]) # [jk]l
return chainer.functions.matmul(_as_mat(ab), c)
def _ij_il_jkl_to_ik(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.swapaxes(c, 1, 2))
def _ik_il_jkl_to_ij(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.rollaxis(c, 0, c.ndim))
class BilinearFunction(function_node.FunctionNode):
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 6:
raise type_check.InvalidType(
'{0} or {1}'.format(
in_types.size() == 3, in_types.size() == 6),
'{0} == {1}'.format(in_types.size(), n_in))
e1_type, e2_type, W_type = in_types[:3]
type_check_prod = type_check.make_variable(numpy.prod, 'prod')
type_check.expect(
e1_type.dtype == numpy.float32,
e1_type.ndim >= 2,
e2_type.dtype == numpy.float32,
e2_type.ndim >= 2,
e1_type.shape[0] == e2_type.shape[0],
W_type.dtype == numpy.float32,
W_type.ndim == 3,
type_check_prod(e1_type.shape[1:]) == W_type.shape[0],
type_check_prod(e2_type.shape[1:]) == W_type.shape[1],
)
if n_in == 6:
out_size = W_type.shape[2]
V1_type, V2_type, b_type = in_types[3:]
type_check.expect(
V1_type.dtype == numpy.float32,
V1_type.ndim == 2,
V1_type.shape[0] == W_type.shape[0],
V1_type.shape[1] == out_size,
V2_type.dtype == numpy.float32,
V2_type.ndim == 2,
V2_type.shape[0] == W_type.shape[1],
V2_type.shape[1] == out_size,
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W = inputs[2]
xp = backend.get_array_module(*inputs)
# optimize: y = xp.einsum('ij,ik,jkl->il', e1, e2, W)
y = xp.tensordot(xp.einsum('ij,ik->ijk', e1, e2), W, axes=2)
if len(inputs) == 6:
V1, V2, b = inputs[3:]
y += e1.dot(V1)
y += e2.dot(V2)
y += b
return y,
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1, e2, W = inputs[:3]
gy, = grad_outputs
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
return BilinearFunctionGrad().apply((e1, e2, W, V1, V2, gy))
return BilinearFunctionGrad().apply((e1, e2, W, gy))
class BilinearFunctionGrad(function_node.FunctionNode):
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
xp = backend.get_array_module(*inputs)
# optimize: gW = xp.einsum('ij,ik,il->jkl', e1, e2, gy)
gW = xp.einsum('ij,ik->jki', e1, e2).dot(gy)
gy_W = xp.tensordot(gy, W, axes=(1, 2)) # 'il,jkl->ijk'
# optimize: ge1 = xp.einsum('ik,jkl,il->ij', e2, W, gy)
ge1 = xp.einsum('ik,ijk->ij', e2, gy_W)
# optimize: ge2 = xp.einsum('ij,jkl,il->ik', e1, W, gy)
ge2 = xp.einsum('ij,ijk->ik', e1, gy_W)
ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
gV1 = e1.T.dot(gy)
gV2 = e2.T.dot(gy)
gb = gy.sum(0)
ge1 += gy.dot(V1.T)
ge2 += gy.dot(V2.T)
ret += gV1, gV2, gb
return ret
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
gge1 = _as_mat(grad_outputs[0])
gge2 = _as_mat(grad_outputs[1])
ggW = grad_outputs[2]
dge1_de2 = _ij_il_jkl_to_ik(gge1, gy, W)
dge1_dW = _ij_ik_il_to_jkl(gge1, e2, gy)
dge1_dgy = _ij_ik_jkl_to_il(gge1, e2, W)
dge2_de1 = _ik_il_jkl_to_ij(gge2, gy, W)
dge2_dW = _ij_ik_il_to_jkl(e1, gge2, gy)
dge2_dgy = _ij_ik_jkl_to_il(e1, gge2, W)
dgW_de1 = _ik_il_jkl_to_ij(e2, gy, ggW)
dgW_de2 = _ij_il_jkl_to_ik(e1, gy, ggW)
dgW_dgy = _ij_ik_jkl_to_il(e1, e2, ggW)
ge1 = dgW_de1 + dge2_de1
ge2 = dgW_de2 + dge1_de2
gW = dge1_dW + dge2_dW
ggy = dgW_dgy + dge1_dgy + dge2_dgy
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
ggV1, ggV2, ggb = grad_outputs[3:]
gV1 = chainer.functions.matmul(gge1, gy, transa=True)
gV2 = chainer.functions.matmul(gge2, gy, transa=True)
ge1 += chainer.functions.matmul(gy, ggV1, transb=True)
ge2 += chainer.functions.matmul(gy, ggV2, transb=True)
ggy += chainer.functions.matmul(gge1, V1)
ggy += chainer.functions.matmul(gge2, V2)
ggy += chainer.functions.matmul(e1, ggV1)
ggy += chainer.functions.matmul(e2, ggV2)
ggy += chainer.functions.broadcast_to(ggb, ggy.shape)
ge1 = ge1.reshape(inputs[0].shape)
ge2 = ge2.reshape(inputs[1].shape)
if len(inputs) == 6:
return ge1, ge2, gW, gV1, gV2, ggy
return ge1, ge2, gW, ggy
def bilinear(e1, e2, W, V1=None, V2=None, b=None):
"""Applies a bilinear function based on given parameters.
This is a building block of Neural Tensor Network (see the reference paper
below). It takes two input variables and one or four parameters, and
outputs one variable.
To be precise, denote six input arrays mathematically by
:math:`e^1\\in \\mathbb{R}^{I\\cdot J}`,
:math:`e^2\\in \\mathbb{R}^{I\\cdot K}`,
:math:`W\\in \\mathbb{R}^{J \\cdot K \\cdot L}`,
:math:`V^1\\in \\mathbb{R}^{J \\cdot L}`,
:math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and
:math:`b\\in \\mathbb{R}^{L}`,
where :math:`I` is mini-batch size.
In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear
parameters.
The output of forward propagation is calculated as
.. math::
y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\
\\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.
Note that V1, V2, b are optional. If these are not given, then this
function omits the last three terms in the above equation.
.. note::
This function accepts an input variable ``e1`` or ``e2`` of a non-matrix
array. In this case, the leading dimension is treated as the batch
dimension, and the other dimensions are reduced to one dimension.
.. note::
In the original paper, :math:`J` and :math:`K`
must be equal and the author denotes :math:`[V^1 V^2]`
(concatenation of matrices) by :math:`V`.
Args:
e1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Left input variable.
e2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Right input variable.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Quadratic weight variable.
V1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Left coefficient variable.
V2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Right coefficient variable.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable.
Returns:
~chainer.Variable: Output variable.
See:
`Reasoning With Neural Tensor Networks for Knowledge Base Completion
<https://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-
networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].
"""
flags = [V1 is None, V2 is None, b is None]
if any(flags):
if not all(flags):
raise ValueError('All coefficients and bias for bilinear() must '
'be None, if at least one of them is None.')
return BilinearFunction().apply((e1, e2, W))[0]
return BilinearFunction().apply((e1, e2, W, V1, V2, b))[0]
| |
import atexit
import threading
from collections import defaultdict
from dataclasses import dataclass
from multiprocessing.pool import ThreadPool
import ray
from dask.core import istask, ishashable, _execute_task
from dask.system import CPU_COUNT
from dask.threaded import pack_exception, _thread_get_id
from .callbacks import local_ray_callbacks, unpack_ray_callbacks
from .common import unpack_object_refs
from .scheduler_utils import get_async, apply_sync
main_thread = threading.current_thread()
default_pool = None
pools = defaultdict(dict)
pools_lock = threading.Lock()
def ray_dask_get(dsk, keys, **kwargs):
"""
A Dask-Ray scheduler. This scheduler will send top-level (non-inlined) Dask
tasks to a Ray cluster for execution. The scheduler will wait for the
tasks to finish executing, fetch the results, and repackage them into the
appropriate Dask collections. This particular scheduler uses a threadpool
to submit Ray tasks.
This can be passed directly to `dask.compute()`, as the scheduler:
>>> dask.compute(obj, scheduler=ray_dask_get)
You can override the currently active global Dask-Ray callbacks (e.g.
supplied via a context manager), the number of threads to use when
submitting the Ray tasks, or the threadpool used to submit Ray tasks:
>>> dask.compute(
obj,
scheduler=ray_dask_get,
ray_callbacks=some_ray_dask_callbacks,
num_workers=8,
pool=some_cool_pool,
)
Args:
dsk (Dict): Dask graph, represented as a task DAG dictionary.
keys (List[str]): List of Dask graph keys whose values we wish to
compute and return.
ray_callbacks (Optional[list[callable]]): Dask-Ray callbacks.
num_workers (Optional[int]): The number of worker threads to use in
the Ray task submission traversal of the Dask graph.
pool (Optional[ThreadPool]): A multiprocessing threadpool to use to
submit Ray tasks.
Returns:
Computed values corresponding to the provided keys.
"""
num_workers = kwargs.pop("num_workers", None)
pool = kwargs.pop("pool", None)
# We attempt to reuse any other thread pools that have been created within
# this thread and with the given number of workers. We reuse a global
# thread pool if num_workers is not given and we're in the main thread.
global default_pool
thread = threading.current_thread()
if pool is None:
with pools_lock:
if num_workers is None and thread is main_thread:
if default_pool is None:
default_pool = ThreadPool(CPU_COUNT)
atexit.register(default_pool.close)
pool = default_pool
elif thread in pools and num_workers in pools[thread]:
pool = pools[thread][num_workers]
else:
pool = ThreadPool(num_workers)
atexit.register(pool.close)
pools[thread][num_workers] = pool
ray_callbacks = kwargs.pop("ray_callbacks", None)
persist = kwargs.pop("ray_persist", False)
with local_ray_callbacks(ray_callbacks) as ray_callbacks:
# Unpack the Ray-specific callbacks.
(
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
ray_postsubmit_all_cbs,
ray_finish_cbs,
) = unpack_ray_callbacks(ray_callbacks)
# NOTE: We hijack Dask's `get_async` function, injecting a different
# task executor.
object_refs = get_async(
_apply_async_wrapper(
pool.apply_async,
_rayify_task_wrapper,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
),
len(pool._pool),
dsk,
keys,
get_id=_thread_get_id,
pack_exception=pack_exception,
**kwargs,
)
if ray_postsubmit_all_cbs is not None:
for cb in ray_postsubmit_all_cbs:
cb(object_refs, dsk)
# NOTE: We explicitly delete the Dask graph here so object references
# are garbage-collected before this function returns, i.e. before all
# Ray tasks are done. Otherwise, no intermediate objects will be
# cleaned up until all Ray tasks are done.
del dsk
if persist:
result = object_refs
else:
result = ray_get_unpack(object_refs)
if ray_finish_cbs is not None:
for cb in ray_finish_cbs:
cb(result)
# cleanup pools associated with dead threads.
with pools_lock:
active_threads = set(threading.enumerate())
if thread is not main_thread:
for t in list(pools):
if t not in active_threads:
for p in pools.pop(t).values():
p.close()
return result
def _apply_async_wrapper(apply_async, real_func, *extra_args, **extra_kwargs):
"""
Wraps the given pool `apply_async` function, hotswapping `real_func` in as
the function to be applied and adding `extra_args` and `extra_kwargs` to
`real_func`'s call.
Args:
apply_async (callable): The pool function to be wrapped.
real_func (callable): The real function that we wish the pool apply
function to execute.
*extra_args: Extra positional arguments to pass to the `real_func`.
**extra_kwargs: Extra keyword arguments to pass to the `real_func`.
Returns:
A wrapper function that will ignore it's first `func` argument and
pass `real_func` in its place. To be passed to `dask.local.get_async`.
"""
def wrapper(func, args=(), kwds=None, callback=None): # noqa: M511
if not kwds:
kwds = {}
return apply_async(
real_func,
args=args + extra_args,
kwds=dict(kwds, **extra_kwargs),
callback=callback,
)
return wrapper
def _rayify_task_wrapper(
key,
task_info,
dumps,
loads,
get_id,
pack_exception,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
):
"""
The core Ray-Dask task execution wrapper, to be given to the thread pool's
`apply_async` function. Exactly the same as `execute_task`, except that it
calls `_rayify_task` on the task instead of `_execute_task`.
Args:
key (str): The Dask graph key whose corresponding task we wish to
execute.
task_info: The task to execute and its dependencies.
dumps (callable): A result serializing function.
loads (callable): A task_info deserializing function.
get_id (callable): An ID generating function.
pack_exception (callable): An exception serializing function.
ray_presubmit_cbs (callable): Pre-task submission callbacks.
ray_postsubmit_cbs (callable): Post-task submission callbacks.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callbacks.
Returns:
A 3-tuple of the task's key, a literal or a Ray object reference for a
Ray task's result, and whether the Ray task submission failed.
"""
try:
task, deps = loads(task_info)
result = _rayify_task(
task,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
)
id = get_id()
result = dumps((result, id))
failed = False
except BaseException as e:
result = pack_exception(e, dumps)
failed = True
return key, result, failed
def _rayify_task(
task,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
):
"""
Rayifies the given task, submitting it as a Ray task to the Ray cluster.
Args:
task (tuple): A Dask graph value, being either a literal, dependency
key, Dask task, or a list thereof.
key (str): The Dask graph key for the given task.
deps (dict): The dependencies of this task.
ray_presubmit_cbs (callable): Pre-task submission callbacks.
ray_postsubmit_cbs (callable): Post-task submission callbacks.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callbacks.
Returns:
A literal, a Ray object reference representing a submitted task, or a
list thereof.
"""
if isinstance(task, list):
# Recursively rayify this list. This will still bottom out at the first
# actual task encountered, inlining any tasks in that task's arguments.
return [
_rayify_task(
t,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
) for t in task
]
elif istask(task):
# Unpacks and repacks Ray object references and submits the task to the
# Ray cluster for execution.
if ray_presubmit_cbs is not None:
alternate_returns = [
cb(task, key, deps) for cb in ray_presubmit_cbs
]
for alternate_return in alternate_returns:
# We don't submit a Ray task if a presubmit callback returns
# a non-`None` value, instead we return said value.
# NOTE: This returns the first non-None presubmit callback
# return value.
if alternate_return is not None:
return alternate_return
func, args = task[0], task[1:]
if func is multiple_return_get:
return _execute_task(task, deps)
# If the function's arguments contain nested object references, we must
# unpack said object references into a flat set of arguments so that
# Ray properly tracks the object dependencies between Ray tasks.
arg_object_refs, repack = unpack_object_refs(args, deps)
# Submit the task using a wrapper function.
object_refs = dask_task_wrapper.options(
name=f"dask:{key!s}",
num_returns=(1 if not isinstance(func, MultipleReturnFunc) else
func.num_returns),
).remote(
func,
repack,
key,
ray_pretask_cbs,
ray_posttask_cbs,
*arg_object_refs,
)
if ray_postsubmit_cbs is not None:
for cb in ray_postsubmit_cbs:
cb(task, key, deps, object_refs)
return object_refs
elif not ishashable(task):
return task
elif task in deps:
return deps[task]
else:
return task
@ray.remote
def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs,
*args):
"""
A Ray remote function acting as a Dask task wrapper. This function will
repackage the given flat `args` into its original data structures using
`repack`, execute any Dask subtasks within the repackaged arguments
(inlined by Dask's optimization pass), and then pass the concrete task
arguments to the provide Dask task function, `func`.
Args:
func (callable): The Dask task function to execute.
repack (callable): A function that repackages the provided args into
the original (possibly nested) Python objects.
key (str): The Dask key for this task.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callback.
*args (ObjectRef): Ray object references representing the Dask task's
arguments.
Returns:
The output of the Dask task. In the context of Ray, a
dask_task_wrapper.remote() invocation will return a Ray object
reference representing the Ray task's result.
"""
if ray_pretask_cbs is not None:
pre_states = [
cb(key, args) if cb is not None else None for cb in ray_pretask_cbs
]
repacked_args, repacked_deps = repack(args)
# Recursively execute Dask-inlined tasks.
actual_args = [_execute_task(a, repacked_deps) for a in repacked_args]
# Execute the actual underlying Dask task.
result = func(*actual_args)
if ray_posttask_cbs is not None:
for cb, pre_state in zip(ray_posttask_cbs, pre_states):
if cb is not None:
cb(key, result, pre_state)
return result
def ray_get_unpack(object_refs):
"""
Unpacks object references, gets the object references, and repacks.
Traverses arbitrary data structures.
Args:
object_refs: A (potentially nested) Python object containing Ray object
references.
Returns:
The input Python object with all contained Ray object references
resolved with their concrete values.
"""
if isinstance(object_refs, tuple):
object_refs = list(object_refs)
if isinstance(object_refs, list) and any(not isinstance(x, ray.ObjectRef)
for x in object_refs):
# We flatten the object references before calling ray.get(), since Dask
# loves to nest collections in nested tuples and Ray expects a flat
# list of object references. We repack the results after ray.get()
# completes.
object_refs, repack = unpack_object_refs(*object_refs)
computed_result = ray.get(object_refs)
return repack(computed_result)
else:
return ray.get(object_refs)
def ray_dask_get_sync(dsk, keys, **kwargs):
"""
A synchronous Dask-Ray scheduler. This scheduler will send top-level
(non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will
wait for the tasks to finish executing, fetch the results, and repackage
them into the appropriate Dask collections. This particular scheduler
submits Ray tasks synchronously, which can be useful for debugging.
This can be passed directly to `dask.compute()`, as the scheduler:
>>> dask.compute(obj, scheduler=ray_dask_get_sync)
You can override the currently active global Dask-Ray callbacks (e.g.
supplied via a context manager):
>>> dask.compute(
obj,
scheduler=ray_dask_get_sync,
ray_callbacks=some_ray_dask_callbacks,
)
Args:
dsk (Dict): Dask graph, represented as a task DAG dictionary.
keys (List[str]): List of Dask graph keys whose values we wish to
compute and return.
Returns:
Computed values corresponding to the provided keys.
"""
ray_callbacks = kwargs.pop("ray_callbacks", None)
persist = kwargs.pop("ray_persist", False)
with local_ray_callbacks(ray_callbacks) as ray_callbacks:
# Unpack the Ray-specific callbacks.
(
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
ray_postsubmit_all_cbs,
ray_finish_cbs,
) = unpack_ray_callbacks(ray_callbacks)
# NOTE: We hijack Dask's `get_async` function, injecting a different
# task executor.
object_refs = get_async(
_apply_async_wrapper(
apply_sync,
_rayify_task_wrapper,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
),
1,
dsk,
keys,
**kwargs,
)
if ray_postsubmit_all_cbs is not None:
for cb in ray_postsubmit_all_cbs:
cb(object_refs, dsk)
# NOTE: We explicitly delete the Dask graph here so object references
# are garbage-collected before this function returns, i.e. before all
# Ray tasks are done. Otherwise, no intermediate objects will be
# cleaned up until all Ray tasks are done.
del dsk
if persist:
result = object_refs
else:
result = ray_get_unpack(object_refs)
if ray_finish_cbs is not None:
for cb in ray_finish_cbs:
cb(result)
return result
@dataclass
class MultipleReturnFunc:
func: callable
num_returns: int
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def multiple_return_get(multiple_returns, idx):
return multiple_returns[idx]
| |
import os
import unittest
from functools import partial
from os.path import expanduser
from time import sleep
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.uix.gridlayout import GridLayout
from cobiv.modules.core.entity import Entity
from cobiv.modules.core.session.session import Session
from cobiv.modules.database.sqlitedb.sqlitedb import SqliteDb
from cobiv.modules.views.browser.browser import Browser
from cobiv.modules.views.browser.eolitem import EOLItem
from cobiv.modules.hud_components.sidebar.sidebar import Sidebar
from cobiv.modules.database.datasources.sqlite.sqliteds import Sqliteds
from cobiv.modules.database.sqlitedb.sqlitesetmanager import SqliteSetManager
from cobiv.modules.views.browser.thumbnail_image import ThumbnailImage
from test.AbstractApp import AbstractApp
Window.size = (360, 360)
class TestMainWidget(GridLayout):
def execute_cmd(self, action, *args, **kwargs):
if action == "load-set":
self.browser.load_set()
elif action == "refresh-marked":
self.browser.refresh_mark()
else:
pass
def execute_cmds(self, *args, **kwargs):
return self.execute_cmd(*args, **kwargs)
def show_progressbar(self, *args, **kwargs):
pass
def set_progressbar_value(self, *args, **kwargs):
pass
def close_progressbar(self, *args, **kwargs):
pass
def set_browser(self, instance):
self.browser = instance
self.add_widget(instance)
class MockThumbloader(Entity):
def __init__(self):
super(MockThumbloader, self).__init__()
self.thumb_path = os.path.join(expanduser('~'), '.cobiv', 'thumbnails')
self.cell_size = 120
def stop(self):
pass
def restart(self):
pass
def get_fullpath_from_file_id(self, file_id):
return None
def append(self, *items):
pass
def clear_cache(self):
pass
def get_filename_caption(self, filename):
name = os.path.basename(filename)
if len(name) > 12:
name = name[:5] + "..." + name[-7:]
return name
def delete_thumbnail(self, *items):
pass
class TestApp(AbstractApp):
session = None
def __init__(self, **kwargs):
super(TestApp, self).__init__(**kwargs)
self.configuration = {
'thumbnails.path': '',
'repositories': ['osfs://images'],
'thumbnails.path': self.get_user_path('thumbs')
}
self.ds = Sqliteds()
def build(self):
return TestMainWidget(size_hint=(1, 1), cols=1)
def lookup(self, object_name, category):
if category == "Entity" and object_name == "session":
return self.session
elif category == "Entity" and object_name == "thumbloader":
return MockThumbloader()
elif object_name == "sqlite_ds":
return self.ds
elif object_name == 'sqliteSetManager':
return SqliteSetManager()
else:
return None
class BrowserTest(unittest.TestCase):
def setUp(self):
Clock._events = [[] for i in range(256)]
def get_user_path(self, *args):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args)
def prepare_browser(self, app):
app.session = Session()
db = SqliteDb()
db.init_test_db(app.session)
b = Browser()
app.root.set_browser(b)
b.ready()
b.on_switch(loader_thread=False)
sleep(0.1)
Clock.tick()
return b, db
def proceed_search(self, db, query=None):
if query is not None:
db.search_tag(query)
else:
db.search_tag()
sleep(0.1)
for i in range(2):
Clock.tick()
def _test_initialization(self, app, *args):
b, db = self.prepare_browser(app)
self.assertEqual(0, len(b.image_queue))
self.assertEqual(len(b.grid.children), 1)
self.assertCountEqual((360, 360), Window.size)
self.assertCountEqual((1, 1), b.size_hint)
self.assertCountEqual((360, 360), b.size)
app.stop()
def _test_load_set(self, app, *args):
b, db = self.prepare_browser(app)
self.assertEqual(0, len(b.image_queue))
db.search_tag()
sleep(0.1)
Clock.tick()
self.assertEqual(27, len(b.image_queue))
for i in range(27):
self.assertEqual("/%04d.jpg" % (i + 1,), b.image_queue[i][3])
Clock.tick()
self.assertEqual(len(b.grid.children), 27)
self.assertFalse(b.cursor.is_eol())
self.assertEqual(b.page_cursor.pos, b.cursor.pos)
app.stop()
def _test_basic_moves(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
for i in range(10):
b.select_next(0)
Clock.tick()
self.assertEqual(i + 1, cursor.pos)
self.assertFalse(b.cursor.is_eol())
self.assertNotEqual(b.page_cursor.pos, b.cursor.pos)
for i in range(10):
b.select_previous(0)
Clock.tick()
self.assertEqual(10 - i - 1, cursor.pos)
self.assertFalse(b.cursor.is_eol())
self.assertEqual(b.page_cursor.pos, b.cursor.pos)
b.select_down(0)
Clock.tick()
self.assertEqual(3, cursor.pos)
b.select_next(0)
b.select_down(0)
Clock.tick()
self.assertEqual(7, cursor.pos)
b.select_next(0)
b.select_up(0)
Clock.tick()
self.assertEqual(5, cursor.pos)
b.select_up(0)
Clock.tick()
self.assertEqual(2, cursor.pos)
b.select_up(0)
Clock.tick()
self.assertEqual(0, cursor.pos)
app.stop()
def _test_load_more(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
for i in range(7):
b.select_down(0)
Clock.tick()
self.assertEqual(0, b.page_cursor.pos)
self.assertEqual(21, cursor.pos)
b.select_down(0)
Clock.tick()
self.assertEqual(3, b.page_cursor.pos)
self.assertEqual(24, cursor.pos)
self.assertEqual(len(b.grid.children), 27)
for i in range(23):
b.select_down(0)
Clock.tick()
self.assertEqual(93, cursor.pos)
self.assertEqual(72, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
b.select_down(0)
Clock.tick()
self.assertEqual(96, cursor.pos)
self.assertEqual(len(b.grid.children), 26)
self.assertEqual(75, b.page_cursor.pos)
b.select_down(0)
Clock.tick()
self.assertEqual(99, cursor.pos)
self.assertEqual(75, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 26)
b.select_down(0)
Clock.tick()
self.assertEqual(100, cursor.pos)
self.assertTrue(cursor.is_eol())
self.assertEqual(len(b.grid.children), 26)
self.assertEqual(75, b.page_cursor.pos)
# moving up
b.select_up(0)
Clock.tick()
self.assertEqual(75, b.page_cursor.pos)
self.assertEqual(97, cursor.pos)
self.assertFalse(cursor.is_eol())
for i in range(8):
b.select_up(0)
Clock.tick()
self.assertEqual(69, b.page_cursor.pos)
self.assertEqual(73, cursor.pos)
for i in range(24):
b.select_up(0)
Clock.tick()
self.assertEqual(0, b.page_cursor.pos)
self.assertEqual(1, cursor.pos)
b.select_up(0)
Clock.tick()
self.assertEqual(0, b.page_cursor.pos)
self.assertEqual(0, cursor.pos)
self.assertEqual(len(b.grid.children), 27)
app.stop()
def _test_go(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
# go same pos
b.select_next(0)
b.select_custom(cursor.pos)
self.assertEqual(1, cursor.pos)
self.assertEqual("/0002.jpg", cursor.filename)
self.assertEqual(0, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
# go same page
b.select_custom(8)
Clock.tick()
self.assertEqual(8, cursor.pos)
self.assertEqual("/0009.jpg", cursor.filename)
self.assertEqual(0, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
# go end of current page
b.select_custom(25)
Clock.tick()
self.assertEqual(25, cursor.pos)
self.assertEqual("/0026.jpg", cursor.filename)
self.assertEqual(3, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
# go another page
b.select_custom(70)
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(70, cursor.pos)
self.assertEqual("/0071.jpg", cursor.filename)
self.assertEqual(len(b.grid.children), 27)
self.assertEqual(54, b.page_cursor.pos)
b.select_custom(69)
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(69, cursor.pos)
self.assertEqual("/0070.jpg", cursor.filename)
self.assertEqual(54, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
# go multiple times
b.select_custom(5)
sleep(0.1)
Clock.tick()
Clock.tick()
b.select_custom(40)
sleep(0.1)
Clock.tick()
Clock.tick()
b.select_custom(80)
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(80, cursor.pos)
self.assertEqual("/0081.jpg", cursor.filename)
self.assertEqual(66, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
app.stop()
def _test_first_last(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
# test first / same page
for i in range(5):
b.select_first()
Clock.tick()
self.assertEqual(0, cursor.pos)
self.assertEqual("/0001.jpg", cursor.filename)
self.assertEqual(0, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
for i in range(5):
for j in range(i + 1):
b.select_next(0)
b.select_first()
Clock.tick()
self.assertEqual(0, cursor.pos)
self.assertEqual("/0001.jpg", cursor.filename)
self.assertEqual(0, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
# test first / other page
b.select_custom(60)
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(len(b.grid.children), 27)
b.select_first()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(0, cursor.pos)
self.assertEqual("/0001.jpg", cursor.filename)
self.assertEqual(0, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
# test last
b.select_last()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(99, cursor.pos)
self.assertEqual("/0100.jpg", cursor.filename)
self.assertEqual(75, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 26)
b.select_previous(0)
b.select_previous(0)
b.select_previous(0)
self.assertEqual(96, cursor.pos)
self.assertEqual("/0097.jpg", cursor.filename)
self.assertEqual(75, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 26)
Clock.tick()
b.select_last()
Clock.tick()
self.assertEqual(99, cursor.pos)
self.assertEqual("/0100.jpg", cursor.filename)
self.assertEqual(75, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 26)
app.stop()
def _test_eol_basic(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
# test previous & next
b.select_last()
sleep(0.1)
Clock.tick()
Clock.tick()
b.select_next(0)
Clock.tick()
self.assertEqual(100, cursor.pos)
self.assertTrue(cursor.is_eol())
self.assertEqual(75, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 26)
for i in range(5):
b.select_next(0)
Clock.tick()
self.assertEqual(100, cursor.pos)
self.assertTrue(cursor.is_eol())
self.assertEqual(75, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 26)
b.select_previous(0)
Clock.tick()
self.assertEqual(99, cursor.pos)
self.assertEqual("/0100.jpg", cursor.filename)
self.assertFalse(cursor.is_eol())
self.assertEqual(75, b.page_cursor.pos)
b.select_next(0)
Clock.tick()
self.assertEqual(100, cursor.pos)
self.assertTrue(cursor.is_eol())
# test up & bottom
b.select_up(0)
Clock.tick()
self.assertEqual(97, cursor.pos)
self.assertEqual("/0098.jpg", cursor.filename)
self.assertFalse(cursor.is_eol())
self.assertEqual(75, b.page_cursor.pos)
for i in range(3):
b.select_down(0)
Clock.tick()
self.assertEqual(100, cursor.pos)
self.assertTrue(cursor.is_eol())
# test first & last
b.select_last()
Clock.tick()
self.assertEqual(99, cursor.pos)
self.assertEqual("/0100.jpg", cursor.filename)
self.assertEqual(75, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 26)
b.select_next(0)
self.assertEqual(100, cursor.pos)
self.assertTrue(cursor.is_eol())
b.select_first()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(0, cursor.pos)
self.assertEqual("/0001.jpg", cursor.filename)
self.assertEqual(0, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
# test custom go
b.select_last()
sleep(0.1)
Clock.tick()
Clock.tick()
b.select_next(0)
Clock.tick()
self.assertEqual(100, cursor.pos)
b.select_custom(50)
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(50, cursor.pos)
self.assertEqual("/0051.jpg", cursor.filename)
self.assertEqual(36, b.page_cursor.pos)
self.assertEqual(len(b.grid.children), 27)
app.stop()
def _test_mark(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertEqual(0, len(marked))
b.select_custom(3)
b.mark_current(True)
for i in range(3):
b.select_next(0)
b.select_next(0)
# Clock.tick()
b.mark_current(True)
self.assertEqual(4, cursor.get_marked_count())
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertCountEqual([3, 5, 7, 9], marked)
b.select_last()
sleep(0.1)
Clock.tick()
Clock.tick()
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertCountEqual([], marked)
b.select_first()
sleep(0.1)
Clock.tick()
Clock.tick()
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertCountEqual([3, 5, 7, 9], marked)
b.select_custom(5)
b.mark_current()
# test load more
for i in range(11):
b.select_down(0)
Clock.tick()
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertCountEqual([], marked)
# test load more
for i in range(10):
b.select_up(0)
Clock.tick()
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertCountEqual([3, 7, 9], marked)
cursor.mark_all()
b.refresh_mark()
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertEqual(27, len(marked))
b.select_last()
sleep(0.1)
Clock.tick()
Clock.tick()
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertEqual(25, len(marked))
# test eol
b.select_last()
sleep(0.1)
Clock.tick()
b.select_next(0)
Clock.tick()
cursor.mark_all()
b.refresh_mark()
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertEqual(0, len(marked))
app.stop()
def _test_cut_1(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
def cut_one():
self.proceed_search(db)
b.select_custom(4)
b.mark_current()
Clock.tick()
def cut_row():
self.proceed_search(db)
for i in range(6, 9):
b.select_custom(i)
b.mark_current()
Clock.tick()
def cut_page():
self.proceed_search(db)
for i in range(27):
b.select_custom(i)
b.mark_current()
Clock.tick()
def cut_all():
self.proceed_search(db)
cursor.mark_all()
Clock.tick()
def get_filenames(c, qty):
pc = c.clone()
filenames = []
for i in range(qty):
filenames.append(pc.filename)
pc.go_next()
return filenames
def cut(cut_method, position=None):
cut_method()
if position is not None:
b.select_custom(position)
Clock.tick()
sleep(0.1)
Clock.tick()
Clock.tick()
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
def test_page(expected):
self.assertEqual(len(expected), len(b.grid.children))
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertEqual(0, len(marked))
filenames = get_filenames(b.page_cursor, len(expected))
self.assertCountEqual(expected, filenames)
# test direct cut
# # one
cut(cut_one, 2)
test_page(["/%04d.jpg" % (i + 1,) for i in range(28) if i != 4])
self.assertEqual("/0001.jpg", b.page_cursor.filename)
cut(cut_one)
test_page(["/%04d.jpg" % (i + 1,) for i in range(28) if i != 4])
self.assertEqual("/0001.jpg", b.page_cursor.filename)
# # row
cut(cut_row, 0)
test_page(["/%04d.jpg" % (i + 1,) for i in range(30) if i not in range(6, 9)])
self.assertEqual("/0001.jpg", b.page_cursor.filename)
cut(cut_row)
test_page(["/%04d.jpg" % (i + 1,) for i in range(30) if i not in range(6, 9)])
self.assertEqual("/0001.jpg", b.page_cursor.filename)
# # page
cut(cut_page, 0)
test_page(["/%04d.jpg" % (i + 1,) for i in range(27, 27 * 2)])
self.assertEqual("/0028.jpg", b.page_cursor.filename)
# test load more
# # one
cut(cut_one, 35)
self.assertNotEqual(0, b.page_cursor.pos)
self.assertEqual(35, cursor.pos)
for i in range(10):
b.select_up(0)
Clock.tick()
self.assertEqual(0, b.page_cursor.pos)
test_page(["/%04d.jpg" % (i + 1,) for i in range(28) if i != 4])
# # row
cut(cut_row, 38)
self.assertNotEqual(0, b.page_cursor.pos)
for i in range(12):
b.select_up(0)
Clock.tick()
self.assertEqual(0, b.page_cursor.pos)
test_page(["/%04d.jpg" % (i + 1,) for i in range(30) if i not in range(6, 9)])
# # page
cut(cut_page, 69)
self.assertEqual(69, b.cursor.pos)
self.assertNotEqual(0, b.page_cursor.pos)
self.assertEqual(48, b.page_cursor.pos)
for i in range(21):
b.select_up(0)
Clock.tick()
self.assertEqual(0, b.page_cursor.pos)
test_page(["/%04d.jpg" % (i + 1,) for i in range(27, 27 * 2)])
# test load set
# # one
cut(cut_one, 35)
b.select_custom(position=0)
sleep(0.1)
Clock.tick()
Clock.tick()
test_page(["/%04d.jpg" % (i + 1,) for i in range(28) if i != 4])
# # row
cut(cut_row, 38)
b.select_custom(position=0)
sleep(0.1)
Clock.tick()
Clock.tick()
test_page(["/%04d.jpg" % (i + 1,) for i in range(30) if i not in range(6, 9)])
# # page
cut(cut_page, 69)
b.select_custom(position=0)
sleep(0.1)
Clock.tick()
Clock.tick()
test_page(["/%04d.jpg" % (i + 1,) for i in range(27, 27 * 2)])
app.stop()
def _test_cut_2(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
def get_filenames(c, qty):
pc = c.clone()
filenames = []
for i in range(qty):
filenames.append(pc.filename)
pc.go_next()
return filenames
def test_page(expected):
self.assertEqual(b.grid.children[-1].position, b.page_cursor.pos)
self.assertEqual(b.grid.children[-1].file_id, b.page_cursor.file_id)
self.assertEqual(len(expected),
len(b.grid.children) - (1 if isinstance(b.grid.children[0], EOLItem) else 0))
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertEqual(0, len(marked))
filenames = get_filenames(b.page_cursor, len(expected))
self.assertCountEqual(expected, filenames)
def cut_first(qty):
self.proceed_search(db)
self.assertEqual('/0001.jpg', cursor.filename)
for i in range(qty):
b.mark_current()
b.select_next(0)
b.select_first()
b.cut_marked()
Clock.tick()
def cut_last(qty):
self.proceed_search(db)
b.select_last()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual('/0100.jpg', cursor.filename)
self.assertEqual(26, len(b.grid.children))
for i in range(qty):
b.mark_current()
b.select_previous(0)
b.select_last()
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
def cut_eol(qty):
self.proceed_search(db)
b.select_last()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual('/0100.jpg', cursor.filename)
self.assertEqual(26, len(b.grid.children))
for i in range(qty):
b.mark_current()
b.select_previous(0)
b.select_last()
b.select_next(0)
self.assertTrue(cursor.is_eol())
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertTrue(cursor.is_eol())
cut_first(1)
c1 = cursor.clone()
c1.go_first()
self.assertEqual(c1.file_id, b.page_cursor.file_id)
test_page(["/%04d.jpg" % (i + 1,) for i in range(1, 1 + 27)])
cut_first(3)
test_page(["/%04d.jpg" % (i + 1,) for i in range(3, 3 + 27)])
# test last
cut_last(1)
test_page(["/%04d.jpg" % (i + 1,) for i in range(75, 99)])
cut_last(3)
test_page(["/%04d.jpg" % (i + 1,) for i in range(72, 97)])
# test eol
cut_eol(1)
test_page(["/%04d.jpg" % (i + 1,) for i in range(75, 99)])
cut_eol(3)
test_page(["/%04d.jpg" % (i + 1,) for i in range(72, 97)])
# # test some navigation after cut
self.assertEqual(97, cursor.pos)
b.select_previous(0)
Clock.tick()
self.assertFalse(cursor.is_eol())
self.assertEqual(96, cursor.pos)
b.select_up(0)
Clock.tick()
self.assertFalse(cursor.is_eol())
self.assertEqual(93, cursor.pos)
b.select_up(0)
Clock.tick()
self.assertEqual(90, cursor.pos)
b.select_down(0)
Clock.tick()
self.assertEqual(93, cursor.pos)
b.select_down(0)
Clock.tick()
self.assertEqual(96, cursor.pos)
b.select_down(0)
Clock.tick()
self.assertEqual(97, cursor.pos)
b.select_up(0)
Clock.tick()
b.select_previous(0)
Clock.tick()
b.select_previous(0)
Clock.tick()
b.select_down(0)
Clock.tick()
b.select_next(0)
Clock.tick()
self.assertEqual(96, cursor.pos)
# test multiple cut
self.proceed_search(db)
b.select_custom(3)
b.mark_current(True)
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(1, cursor.get_clipboard_size())
b.select_custom(5)
b.mark_current(True)
b.select_custom(2)
b.mark_current(True)
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(2, cursor.get_clipboard_size())
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(2, cursor.get_clipboard_size())
# test cut all
self.proceed_search(db)
cursor.mark_all(True)
b.refresh_mark()
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(1, len(b.grid.children))
self.assertEqual(100, cursor.get_clipboard_size())
self.assertTrue(cursor.is_eol())
self.assertEqual(0, cursor.pos)
# test outside displacement
self.proceed_search(db)
b.mark_current()
b.select_custom(70)
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(27, len(b.grid.children))
self.assertEqual(70, cursor.pos)
self.assertEqual(54, b.page_cursor.pos)
self.assertEqual("/%04d.jpg" % (71,), cursor.filename)
self.assertEqual("/%04d.jpg" % (55,), b.page_cursor.filename)
self.assertEqual(cursor.file_id, b.grid.children[10].file_id)
self.assertEqual(b.page_cursor.file_id, b.grid.children[-1].file_id)
b.cut_marked()
Clock.tick()
self.assertEqual(27, len(b.grid.children))
self.assertEqual(70, cursor.pos)
self.assertEqual(54, b.page_cursor.pos)
self.assertEqual("/%04d.jpg" % (72,), cursor.filename)
self.assertEqual("/%04d.jpg" % (56,), b.page_cursor.filename)
self.assertEqual(cursor.file_id, b.grid.children[10].file_id)
self.assertEqual(b.page_cursor.file_id, b.grid.children[-1].file_id)
self.proceed_search(db)
b.mark_current()
b.select_custom(70)
sleep(0.1)
Clock.tick()
Clock.tick()
b.mark_current()
b.cut_marked()
Clock.tick()
self.assertEqual(27, len(b.grid.children))
self.assertEqual(70, cursor.pos)
self.assertEqual(54, b.page_cursor.pos)
self.assertEqual("/%04d.jpg" % (73,), cursor.filename)
self.assertEqual("/%04d.jpg" % (56,), b.page_cursor.filename)
self.assertEqual(cursor.file_id, b.grid.children[10].file_id)
self.assertEqual(b.page_cursor.file_id, b.grid.children[-1].file_id)
app.stop()
def _test_cut_3(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
def test_pos(qty, pos):
self.proceed_search(db)
for i in range(qty):
b.select_custom(i)
b.mark_current()
sleep(0.1)
Clock.tick()
Clock.tick()
b.select_custom(pos)
sleep(0.1)
Clock.tick()
Clock.tick()
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(b.page_cursor.pos, b.grid.children[-1].position)
self.assertEqual(pos, cursor.pos)
test_pos(27, 28)
test_pos(1, 1)
test_pos(1, 2)
test_pos(1, 15)
test_pos(1, 27)
test_pos(1, 65)
test_pos(3, 3)
test_pos(3, 4)
test_pos(3, 15)
test_pos(3, 27)
test_pos(3, 65)
test_pos(10, 27)
test_pos(12, 65)
test_pos(27, 65)
test_pos(20, 64)
app.stop()
def _test_paste(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
def get_filenames(c, qty):
pc = c.clone()
filenames = []
for i in range(qty):
filenames.append(pc.filename)
pc.go_next()
return filenames
def test_page(expected, debug=False):
self.assertEqual(b.grid.children[-1].position, b.page_cursor.pos)
self.assertEqual(b.grid.children[-1].file_id, b.page_cursor.file_id)
self.assertEqual(len(expected),
len(b.grid.children) - (1 if isinstance(b.grid.children[0], EOLItem) else 0))
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertEqual(0, len(marked))
filenames = get_filenames(b.page_cursor, len(expected))
self.assertCountEqual(expected, filenames)
def mark_one(init=True):
if init:
self.proceed_search(db)
b.select_custom(4)
b.mark_current()
Clock.tick()
def mark_row():
self.proceed_search(db)
for i in range(6, 9):
b.select_custom(i)
b.mark_current()
Clock.tick()
def mark_page():
self.proceed_search(db)
for i in range(27):
b.select_custom(i)
b.mark_current()
Clock.tick()
# test same cut and paste
mark_one()
b.cut_marked()
b.paste_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(4, cursor.pos)
test_page(["/%04d.jpg" % (i + 1,) for i in range(27)])
self.assertCountEqual(range(27), [i.position for i in b.grid.children if not isinstance(i, EOLItem)])
mark_row()
self.assertEqual(8, cursor.pos)
b.cut_marked()
self.assertEqual(8, cursor.pos)
b.select_custom(6)
self.assertEqual(6, cursor.pos)
b.paste_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(6, cursor.pos)
test_page(["/%04d.jpg" % (i + 1,) for i in range(27)])
self.assertCountEqual(range(27), [i.position for i in b.grid.children if not isinstance(i, EOLItem)])
mark_page()
self.assertEqual(26, cursor.pos)
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(27, len(b.grid.children))
self.assertEqual(26, cursor.pos)
self.assertEqual(12, b.page_cursor.pos)
b.select_first()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(0, b.page_cursor.pos)
b.paste_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(0, cursor.pos)
self.assertEqual(0, b.page_cursor.pos)
test_page(["/%04d.jpg" % (i + 1,) for i in range(27)])
self.assertCountEqual(range(27), [i.position for i in b.grid.children if not isinstance(i, EOLItem)])
# test different cut and paste
mark_one()
b.select_custom(6)
b.cut_marked()
b.select_custom(2)
b.paste_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(2, cursor.pos)
test_page(["/%04d.jpg" % (i + 1,) for i in [0, 1, 3, 2] + list(range(4, 27))])
self.assertCountEqual(range(27), [i.position for i in b.grid.children])
mark_row()
b.cut_marked()
b.select_custom(50)
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual("/%04d.jpg" % (39 + 1,), b.page_cursor.filename)
b.paste_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(50, cursor.pos)
self.assertEqual(36, b.page_cursor.pos)
self.assertEqual(27, len(b.grid.children))
self.assertEqual(b.page_cursor.file_id, b.grid.children[-1].file_id)
self.assertEqual("/%04d.jpg" % (39 + 1,), b.page_cursor.filename)
test_page(["/%04d.jpg" % (i + 1,) for i in list(range(39, 53)) + list(range(6, 9)) + list(range(53, 63))])
mark_page()
b.cut_marked()
b.select_last()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(26, len(b.grid.children))
self.assertEqual(72, cursor.pos)
self.assertEqual(48, b.page_cursor.pos)
self.assertEqual(b.page_cursor.file_id, b.grid.children[-1].file_id)
self.assertEqual("/%04d.jpg" % (75 + 1,), b.page_cursor.filename)
self.assertEqual("/%04d.jpg" % (99 + 1,), cursor.filename)
b.paste_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(27, len(b.grid.children))
self.assertEqual(57, b.page_cursor.pos)
self.assertEqual(72, cursor.pos)
self.assertEqual("/%04d.jpg" % (84 + 1,), b.page_cursor.filename)
self.assertEqual("/%04d.jpg" % (0 + 1,), b.cursor.filename)
# test multiple cut
mark_one()
b.cut_marked()
mark_one(init=False)
b.cut_marked()
b.paste_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(4, cursor.pos)
test_page(["/%04d.jpg" % (i + 1,) for i in list(range(4)) + list(range(5, 28))])
self.assertCountEqual(range(27), [i.position for i in b.grid.children if not isinstance(i, EOLItem)])
# test cut and paste all
self.proceed_search(db)
cursor.mark_all(True)
b.refresh_mark()
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
b.paste_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertEqual(0, cursor.pos)
test_page(["/%04d.jpg" % (i + 1,) for i in range(27)])
self.assertCountEqual(range(27), [i.position for i in b.grid.children])
app.stop()
def _test_eol_yank(self, app, *args):
b, db = self.prepare_browser(app)
self.proceed_search(db)
cursor = b.cursor
def get_filenames(c, qty):
pc = c.clone()
filenames = []
for i in range(qty):
filenames.append(pc.filename)
pc.go_next()
return filenames
def test_page(expected, debug=False):
self.assertEqual(b.grid.children[-1].position, b.page_cursor.pos)
self.assertEqual(b.grid.children[-1].file_id, b.page_cursor.file_id)
self.assertEqual(len(expected),
len(b.grid.children) - (1 if isinstance(b.grid.children[0], EOLItem) else 0))
marked = [e.position for e in b.grid.children if e.is_marked()]
self.assertEqual(0, len(marked))
filenames = get_filenames(b.page_cursor, len(expected))
self.assertCountEqual(expected, filenames)
def mark_one(init=True):
if init:
self.proceed_search(db)
b.select_custom(4)
b.mark_current()
Clock.tick()
def mark_row():
self.proceed_search(db)
for i in range(6, 9):
b.select_custom(i)
b.mark_current()
Clock.tick()
def mark_page():
self.proceed_search(db)
for i in range(27):
b.select_custom(i)
b.mark_current()
Clock.tick()
# test cut
self.proceed_search(db)
for i in range(6, 9):
b.select_custom(i)
b.mark_current()
Clock.tick()
b.select_last()
sleep(0.1)
Clock.tick()
Clock.tick()
b.select_next(0)
self.assertTrue(cursor.is_eol())
b.cut_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertTrue(cursor.is_eol())
test_page(["/%04d.jpg" % (i + 1,) for i in range(75, 100)])
self.assertEqual(26, len(b.grid.children))
self.assertCountEqual(range(72, 97), [c.position for c in b.grid.children if not isinstance(c, EOLItem)])
# test paste
b.paste_marked()
sleep(0.1)
Clock.tick()
Clock.tick()
self.assertTrue(96, cursor.pos)
self.assertFalse(cursor.is_eol())
test_page(["/%04d.jpg" % (i + 1,) for i in list(range(78, 100)) + list(range(6, 9))])
self.assertEqual(26, len(b.grid.children))
self.assertCountEqual(range(75, 100), [c.position for c in b.grid.children if not isinstance(c, EOLItem)])
app.stop()
def _test_calculate_to_remove(self, app, *args):
b, db = self.prepare_browser(app)
self.assertEqual(0, b._calculate_lines_to_remove(local_pos=0, page_size=27, actual_size=27))
self.assertEqual(0, b._calculate_lines_to_remove(local_pos=13, page_size=27, actual_size=27))
self.assertEqual(0, b._calculate_lines_to_remove(local_pos=26, page_size=27, actual_size=27))
self.assertEqual(0, b._calculate_lines_to_remove(local_pos=0, page_size=27, actual_size=30))
self.assertEqual(0, b._calculate_lines_to_remove(local_pos=5, page_size=27, actual_size=30))
self.assertEqual(0, b._calculate_lines_to_remove(local_pos=13, page_size=27, actual_size=36))
self.assertEqual(9, b._calculate_lines_to_remove(local_pos=26, page_size=27, actual_size=36))
app.stop()
def call_test(self, func):
a = TestApp()
p = partial(func, a)
Clock.schedule_once(p, 0.0001)
a.run()
def test_01_initialization(self):
self.call_test(self._test_initialization)
def test_02_load_set(self):
self.call_test(self._test_load_set)
def test_03_basic_moves(self):
self.call_test(self._test_basic_moves)
def test_04_load_more(self):
self.call_test(self._test_load_more)
def test_05_go(self):
self.call_test(self._test_go)
def test_06_first_last(self):
self.call_test(self._test_first_last)
def test_07_eol(self):
self.call_test(self._test_eol_basic)
def test_08_mark(self):
self.call_test(self._test_mark)
def test_17_cut_1(self):
self.call_test(self._test_cut_1)
def test_17_cut_2(self):
self.call_test(self._test_cut_2)
def test_17_cut_3(self):
self.call_test(self._test_cut_3)
def test_18_paste(self):
self.call_test(self._test_paste)
def test_19_eol_yank(self):
self.call_test(self._test_eol_yank)
def test_20_calc_to_remove(self):
self.call_test(self._test_calculate_to_remove)
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
"""
Sanders ik_comparison test python script
This script is written to be ran with the gazebo simulated Baxter
It will probably run on the robot live aswel, but is not tested on it
"""
#import various libraries
import argparse
import struct
import sys
import rospy
#import library to use time.sleep function
import time
#import necessary rosmessage parts
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from std_msgs.msg import Header
#import necessary service messagestuff
#to build the request message for the IK service
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
#def ik_test(beginpose, endpose):
def ik_test():
rospy.init_node("Sander_ik_test_node")
##preparing to call the IK service
#store the name of the service in a variable for easier use
servicename = "ExternalTools/right/PositionKinematicsNode/IKService"
#create a rospy.serviceproxy to be able to call this service
ikservice = rospy.ServiceProxy(servicename, SolvePositionIK)
#create a blank requestmessage
ikrequestmessage = SolvePositionIKRequest()
#every request should have the correct timestamp:
#the while loop is necessary because in simulator time rospy.time.now has
#to be called in a short timespace after timepublication on /clock
now = rospy.Time.now()
count = 0
while(now.secs == 0):
now = rospy.Time.now()
count += 1
print('amount of rospy.Time.now() requests until non-zero output: ', count)
hdr = Header(stamp=now, frame_id='base')
print(hdr)
#oke the header is created
#declaring all poses:
poses = {
'ik_example_pose': PoseStamped(
header=hdr,
pose=Pose(
position=Point(
x=0.656982770038,
y=-0.852598021641,
z=0.0388609422173,
),
orientation=Quaternion(
x=0.367048116303,
y=0.885911751787,
z=-0.108908281936,
w=0.261868353356,
),
),
),
'neutralpose': PoseStamped(
header=hdr,
pose=Pose(
position=Point(
x=0.573,
y=-0.181,
z=0.246,
),
orientation=Quaternion(
x=-0.141,
y=0.990,
z=-0.012,
w=0.026,
),
),
),
'poseA': PoseStamped(
header=hdr,
pose=Pose(
position=Point(
x=0.1,
y=0.51,
z=0.723,
),
orientation=Quaternion(
x=0,
y=1,
z=0,
w=0,
),
),
),
#'triangledepositpose'
#'squaredepositpose'
#'circledepositpose'
}
# ikrequestmessage.pose_stamp.append(poses['poseA'])
ikrequestmessage.pose_stamp.append(poses['ik_example_pose'])
# print(ikrequestmessage)
print(ikrequestmessage)
#actually call the IK_service for motorpositions on the provided pose
try:
rospy.wait_for_service(servicename, 5.0)
resp = ikservice(ikrequestmessage)
# print('resp is')
# print(resp)
#when a failure occurs, print the cause
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed: %s" % (e,))
return 1
"""
revision the rest of this function
"""
# Check if result valid, and type of seed ultimately used to get solution
# convert rospy's string representation of uint8[]'s to int's
resp_seeds = struct.unpack('<%dB' % len(resp.result_type),
resp.result_type)
if (resp_seeds[0] != resp.RESULT_INVALID):
seed_str = {
ikrequestmessage.SEED_USER: 'User Provided Seed',
ikrequestmessage.SEED_CURRENT: 'Current Joint Angles',
ikrequestmessage.SEED_NS_MAP: 'Nullspace Setpoints',
}.get(resp_seeds[0], 'None')
print("SUCCESS - Valid Joint Solution Found from Seed Type: %s" %
(seed_str,))
# Format solution into Limb API-compatible dictionary
limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))
print "\nIK Joint Solution:\n", limb_joints
print "------------------"
print "Response Message:\n", resp
else:
print("INVALID POSE - No Valid Joint Solution Found.")
return 0
def main():
"""
Sanders IK test
Uses blabla
"""
#create and initialise a rosnode
# rospy.init_node("Sander_ik_test_node")
#call the testroutine
ik_test()
return "\n IK test executed succesfully"
if __name__ == '__main__':
sys.exit(main())
"""
THIS IS THE MESSAGE WE ARE FILLING IN TO DO AN IK REQUEST
baxter_core_msgs/SolvePositionIK
uint8 SEED_AUTO=0
uint8 SEED_USER=1
uint8 SEED_CURRENT=2
uint8 SEED_NS_MAP=3
geometry_msgs/PoseStamped[] pose_stamp
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
sensor_msgs/JointState[] seed_angles
std_msgs/Header header
uint32 seq
time stamp
string frame_id
string[] name
float64[] position
float64[] velocity
float64[] effort
uint8 seed_mode
"""
| |
import random
from tests.checks.common import AgentCheckTest, load_check
MOCK_CONFIG = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
}]
}
MOCK_CONFIG_SERVICE_WHITELIST = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'service_whitelist': ['service_{0}'.format(k) for k in range(70)]
}]
}
MOCK_CONFIG_LEADER_CHECK = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'new_leader_checks': True
}]
}
MOCK_BAD_CONFIG = {
'init_config': {},
'instances' : [{ # Multiple instances should cause it to fail
'url': 'http://localhost:8500',
'catalog_checks': True,
'new_leader_checks': True
}, {
'url': 'http://localhost:8501',
'catalog_checks': True,
'new_leader_checks': True
}]
}
def _get_random_ip():
rand_int = int(15 * random.random()) + 10
return "10.0.2.{0}".format(rand_int)
class TestCheckConsul(AgentCheckTest):
CHECK_NAME = 'consul'
def mock_get_peers_in_cluster(self, instance):
return [
"10.0.2.14:8300",
"10.0.2.15:8300",
"10.0.2.16:8300"
]
def mock_get_services_in_cluster(self, instance):
return {
"service-1": [
"az-us-east-1a"
],
"service-2": [
"az-us-east-1a"
],
"service-3": [
"az-us-east-1a"
],
"service-4": [
"az-us-east-1a"
],
"service-5": [
"az-us-east-1a"
],
"service-6": [
"az-us-east-1a"
]
}
def mock_get_n_services_in_cluster(self, n):
dct = {}
for i in range(n):
k = "service_{0}".format(i)
dct[k] = []
return dct
def mock_get_local_config(self, instance):
return {
"Config": {
"AdvertiseAddr": "10.0.2.15",
"Datacenter": "dc1",
"Ports": {
"DNS": 8600,
"HTTP": 8500,
"HTTPS": -1,
"RPC": 8400,
"SerfLan": 8301,
"SerfWan": 8302,
"Server": 8300
},
}
}
def mock_get_nodes_in_cluster(self, instance):
return [
{
"Address": "10.0.2.15",
"Node": "node-1"
},
{
"Address": "10.0.2.25",
"Node": "node-2"
},
{
"Address": "10.0.2.35",
"Node": "node-2"
},
]
def mock_get_nodes_with_service(self, instance, service):
return [
{
"Checks": [
{
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Node": "node-1",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"Status": "passing"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "passing"
}
],
"Node": {
"Address": _get_random_ip(),
"Node": "node-1"
},
"Service": {
"Address": "",
"ID": service,
"Port": 80,
"Service": service,
"Tags": [
"az-us-east-1a"
]
}
}
]
def mock_get_nodes_with_service_warning(self, instance, service):
return [
{
"Checks": [
{
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Node": "node-1",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"Status": "passing"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "warning"
}
],
"Node": {
"Address": _get_random_ip(),
"Node": "node-1"
},
"Service": {
"Address": "",
"ID": service,
"Port": 80,
"Service": service,
"Tags": [
"az-us-east-1a"
]
}
}
]
def mock_get_nodes_with_service_critical(self, instance, service):
return [
{
"Checks": [
{
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Node": "node-1",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"Status": "passing"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "warning"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "critical"
}
],
"Node": {
"Address": _get_random_ip(),
"Node": "node-1"
},
"Service": {
"Address": "",
"ID": service,
"Port": 80,
"Service": service,
"Tags": [
"az-us-east-1a"
]
}
}
]
def mock_get_cluster_leader_A(self, instance):
return '10.0.2.15:8300'
def mock_get_cluster_leader_B(self, instance):
return 'My New Leader'
def _get_consul_mocks(self):
return {
'get_services_in_cluster': self.mock_get_services_in_cluster,
'get_nodes_with_service': self.mock_get_nodes_with_service,
'get_peers_in_cluster': self.mock_get_peers_in_cluster,
'_get_local_config': self.mock_get_local_config,
'_get_cluster_leader': self.mock_get_cluster_leader_A
}
def test_bad_config(self):
self.assertRaises(Exception, self.run_check, MOCK_BAD_CONFIG)
def test_get_nodes_with_service(self):
self.run_check(MOCK_CONFIG, mocks=self._get_consul_mocks())
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_passing', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_warning', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_critical', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_passing', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_warning', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_critical', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_get_nodes_with_service_warning(self):
my_mocks = self._get_consul_mocks()
my_mocks['get_nodes_with_service'] = self.mock_get_nodes_with_service_warning
self.run_check(MOCK_CONFIG, mocks=my_mocks)
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_passing', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_warning', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_critical', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_passing', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_warning', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_critical', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_get_nodes_with_service_critical(self):
my_mocks = self._get_consul_mocks()
my_mocks['get_nodes_with_service'] = self.mock_get_nodes_with_service_critical
self.run_check(MOCK_CONFIG, mocks=my_mocks)
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_passing', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_warning', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_critical', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_passing', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_warning', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_critical', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_get_peers_in_cluster(self):
mocks = self._get_consul_mocks()
# When node is leader
self.run_check(MOCK_CONFIG, mocks=mocks)
self.assertMetric('consul.peers', value=3, tags=['consul_datacenter:dc1', 'mode:leader'])
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
# When node is follower
self.run_check(MOCK_CONFIG, mocks=mocks)
self.assertMetric('consul.peers', value=3, tags=['consul_datacenter:dc1', 'mode:follower'])
def test_cull_services_list(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
# Pad num_services to kick in truncation logic
num_services = self.check.MAX_SERVICES + 20
# Big whitelist
services = self.mock_get_n_services_in_cluster(num_services)
whitelist = ['service_{0}'.format(k) for k in range(num_services)]
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)
# Whitelist < MAX_SERVICES should spit out the whitelist
services = self.mock_get_n_services_in_cluster(num_services)
whitelist = ['service_{0}'.format(k) for k in range(self.check.MAX_SERVICES-1)]
self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))
# No whitelist, still triggers truncation
whitelist = []
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)
# Num. services < MAX_SERVICES should be no-op in absence of whitelist
num_services = self.check.MAX_SERVICES - 1
services = self.mock_get_n_services_in_cluster(num_services)
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), num_services)
# Num. services < MAX_SERVICES should spit out only the whitelist when one is defined
num_services = self.check.MAX_SERVICES - 1
whitelist = ['service_1', 'service_2', 'service_3']
services = self.mock_get_n_services_in_cluster(num_services)
self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))
def test_new_leader_event(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
self.check._last_known_leader = 'My Old Leader'
mocks = self._get_consul_mocks()
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
self.run_check(MOCK_CONFIG_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 1)
event = self.events[0]
self.assertEqual(event['event_type'], 'consul.new_leader')
self.assertIn('prev_consul_leader:My Old Leader', event['tags'])
self.assertIn('curr_consul_leader:My New Leader', event['tags'])
| |
from bolt.discord.models.base import Enum, Model, Field, ListField, Snowflake, Timestamp, SearchableList
from bolt.discord.models.channel import Channel
from bolt.discord.models.user import User
from bolt.discord.permissions import Permission
class MessageNotificationLevel(Enum):
ALL_MESSAGES = 0
ONLY_MENTIONS = 1
class VerificationLevel(Enum):
NONE = 0
LOW = 1
MEDIUM = 2
HIGH = 3
VERY_HIGH = 4
class ExplicitContentFilterLevel(Enum):
DISABLED = 0
MEMBERS_WITHOUT_ROLES = 1
ALL_MEMBERS = 2
class MFALevel(Enum):
NONE = 0
ELEVATED = 1
class PremiumTier(Enum):
NONE = 0
TIER_1 = 1
TIER_2 = 2
TIER_3 = 3
class GuildMember(Model):
__repr_keys__ = ['id', 'name']
user = Field(User)
guild_id = Field(Snowflake)
_nick = Field(str, json_key="nick")
roles = ListField(int)
joined_at = Field(Timestamp)
deaf = Field(bool)
mute = Field(bool)
@property
def mention(self):
return self.user.mention
def squelch(self):
return self.api.modify_guild_member(self.guild_id, self.id, mute=True)
def unsquelch(self):
return self.api.modify_guild_member(self.guild_id, self.id, mute=False)
def deafen(self):
return self.api.modify_guild_member(self.guild_id, self.id, deaf=True)
def undeafen(self):
return self.api.modify_guild_member(self.guild_id, self.id, deaf=False)
def move(self, channel):
return self.api.modify_guild_member(self.guild_id, self.id, channel_id=channel.id)
def whisper(self, *args, **kwargs):
channel = Channel.marshal(self.api.create_dm(self.id))
channel.api = self.api
return channel.say(*args, **kwargs)
def kick(self):
return self.api.remove_guild_member(self.guild_id, self.id)
def ban(self, reason):
return self.api.create_guild_ban(self.guild_id, self.id, reason=reason)
def unban(self):
return self.api.remove_guild_ban(self.guild_id, self.id)
def nickname(self, nickname):
raise NotImplementedError
def add_role(self, role):
self.api.add_guild_member_role(self.guild_id, self.id, role.id)
def remove_role(self, role):
self.api.remove_guild_member_role(self.guild_id, self.id, role.id)
def has_role(self, role):
return bool(self.roles.find(id=role.id))
@property
def nick(self):
return self._nick
@nick.setter
def nick(self, value):
self._nick = str(value)
@property
def id(self):
return self.user.id
@property
def name(self):
if self.nick is not None:
return self.nick
else:
return self.user.name
class Role(Model):
__repr_keys__ = ['id', 'name']
id = Field(Snowflake, required=True)
name = Field(str, required=True)
color = Field(int)
hoist = Field(bool)
position = Field(int)
permissions = Field(Permission)
managed = Field(bool)
mentionable = Field(bool)
# TODO: Implement update ability
def rename(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
class VoiceState(Model):
guild_id = Field(Snowflake)
channel_id = Field(Snowflake)
user_id = Field(Snowflake)
session_id = Field(str)
deaf = Field(bool)
mute = Field(bool)
self_deaf = Field(bool)
self_mute = Field(bool)
suppress = Field(bool)
class VoiceRegion(Model):
id = Field(str)
name = Field(str)
vip = Field(bool)
optimal = Field(bool)
deprecated = Field(bool)
custom = Field(bool)
class ActivityType(Enum):
GAME = 0
STREAMING = 1
LISTENING = 2
WATCHING = 3
CUSTOM_STATUS = 4
class Activity(Model):
name = Field(str)
type = Field(ActivityType)
url = Field(str)
application_id = Field(int)
details = Field(str)
state = Field(str)
# timestamps:
# party:
# assets:
class Presence(Model):
__repr_keys__ = ['user']
user = Field(User)
game = Field(Activity)
guild_id = Field(Snowflake)
status = Field(str)
class Ban(Model):
reason = Field(str)
user = Field(User)
class Emoji(Model):
__repr_keys__ = ['id', 'name']
id = Field(Snowflake, required=True)
name = Field(str, required=True)
roles = ListField(Role)
user = ListField(User)
require_colons = Field(bool, default=False)
managed = Field(bool, default=False)
animated = Field(bool, default=False)
# TODO: Implement Update ability
def rename(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
class Guild(Model):
__repr_keys__ = ['id', 'name']
id = Field(Snowflake, required=True)
name = Field(str)
icon = Field(str)
splash = Field(str)
owner_id = Field(Snowflake)
permissions = Field(Permission)
region = Field(str)
afk_channel_id = Field(Snowflake)
afk_timeout = Field(int)
embed_enabled = Field(bool, default=False)
embed_channel_id = Field(Snowflake)
verification_level = Field(VerificationLevel)
default_message_notifications = Field(MessageNotificationLevel)
explicit_content_filter = Field(ExplicitContentFilterLevel)
roles = ListField(Role)
emojis = ListField(Emoji)
features = ListField(str)
mfa_level = Field(MFALevel)
application_id = Field(Snowflake)
widget_enabled = Field(bool)
widget_channel_id = Field(Snowflake)
system_channel_id = Field(Snowflake)
joined_at = Field(Timestamp)
large = Field(bool)
unavailable = Field(bool)
member_count = Field(int)
voice_states = ListField(VoiceState)
members = ListField(GuildMember)
channels = ListField(Channel)
presences = ListField(Presence)
max_presences = Field(int)
max_members = Field(int)
vanity_url_code = Field(str)
banner = Field(str)
premium_tier = Field(PremiumTier)
premium_subscription_count = Field(int)
def leave(self):
return self.api.leave_guild(self.id)
def prune(self, days, compute_prune_count=False):
return self.api.begin_guild_prune(days, compute_prune_count=compute_prune_count)
def create_voice_channel(self):
pass
def create_text_channel(self):
pass
def create_category(self):
pass
@property
def invites(self):
return self.api.get_guild_invites(self.id)
@property
def prune_count(self):
return self.api.get_guild_prune_count(self.id)
@property
def bans(self):
all = SearchableList()
guild_bans = self.api.get_guild_bans(self.id)
for ban in guild_bans:
all.append(Ban.marshal(ban))
@property
def embed_channel(self):
return self.cache.channels[self.embed_channel_id]
@property
def widget_channel(self):
return self.cache.channels[self.widget_channel_id]
@property
def afk_channel(self):
return self.cache.channels[self.afk_channel_id]
@property
def system_channel(self):
return self.cache.channels[self.system_channel_id]
@property
def owner(self):
return self.cache.users[self.owner_id]
| |
"""
ESQuery
=======
ESQuery is a library for building elasticsearch queries in a friendly,
more readable manner.
Basic usage
-----------
There should be a file and subclass of ESQuery for each index we have.
Each method returns a new object, so you can chain calls together like
SQLAlchemy. Here's an example usage:
.. code-block:: python
q = (FormsES()
.domain(self.domain)
.xmlns(self.xmlns)
.submitted(gte=self.datespan.startdate_param,
lt=self.datespan.enddateparam)
.fields(['xmlns', 'domain', 'app_id'])
.sort('received_on', desc=False)
.size(self.pagination.count)
.start(self.pagination.start)
.terms_aggregation('babies.count', 'babies_saved'))
result = q.run()
total_docs = result.total
hits = result.hits
Generally useful filters and queries should be abstracted away for re-use,
but you can always add your own like so:
.. code-block:: python
q.filter({"some_arbitrary_filter": {...}})
q.set_query({"fancy_query": {...}})
For debugging or more helpful error messages, you can use ``query.dumps()``
and ``query.pprint()``, both of which use ``json.dumps()`` and are suitable for
pasting in to ES Head or Marvel or whatever
Filtering
---------
Filters are implemented as standalone functions, so they can be composed and
nested ``q.OR(web_users(), mobile_users())``.
Filters can be passed to the ``query.filter`` method: ``q.filter(web_users())``
There is some syntactic sugar that lets you skip this boilerplate and just
call the filter as if it were a method on the query class: ``q.web_users()``
In order to be available for this shorthand, filters are added to the
``builtin_filters`` property of the main query class.
I know that's a bit confusing, but it seemed like the best way to make filters
available in both contexts.
Generic filters applicable to all indices are available in
``corehq.apps.es.filters``. (But most/all can also be accessed as a query
method, if appropriate)
Filtering Specific Indices
--------------------------
There is a file for each elasticsearch index (if not, feel free to add one).
This file provides filters specific to that index, as well as an
appropriately-directed ESQuery subclass with references to these filters.
These index-specific query classes also have default filters to exclude things
like inactive users or deleted docs.
These things should nearly always be excluded, but if necessary, you can remove
these with ``remove_default_filters``.
Running against production
--------------------------
Since the ESQuery library is read-only, it's mostly safe to run against
production. You can define alternate elasticsearch hosts in your localsettings
file in the ``ELASTICSEARCH_DEBUG_HOSTS`` dictionary and pass in this host name
as the ``debug_host`` to the constructor:
.. code-block:: python
>>> CaseES(debug_host='prod').domain('dimagi').count()
120
Language
--------
* es_query - the entire query, filters, query, pagination, facets
* filters - a list of the individual filters
* query - the query, used for searching, not filtering
* field - a field on the document. User docs have a 'domain' field.
* lt/gt - less/greater than
* lte/gte - less/greater than or equal to
.. TODOs:
sorting
Add esquery.iter() method
"""
from collections import namedtuple
from copy import deepcopy
import json
from dimagi.utils.decorators.memoized import memoized
from corehq.elastic import ES_META, ESError, run_query, scroll_query, SIZE_LIMIT, \
ScanResult
from . import aggregations
from . import filters
from . import queries
from .utils import values_list, flatten_field_dict
class ESQuery(object):
"""
This query builder only outputs the following query structure::
{
"query": {
"filtered": {
"filter": {
"and": [
<filters>
]
},
"query": <query>
}
},
<size, sort, other params>
}
"""
index = None
_exclude_source = None
_legacy_fields = False
_start = None
_size = None
_aggregations = None
_source = None
default_filters = {
"match_all": filters.match_all()
}
def __init__(self, index=None, debug_host=None):
from corehq.apps.userreports.util import is_ucr_table
self.index = index if index is not None else self.index
if self.index not in ES_META and not is_ucr_table(self.index):
msg = "%s is not a valid ES index. Available options are: %s" % (
index, ', '.join(ES_META.keys()))
raise IndexError(msg)
self.debug_host = debug_host
self._default_filters = deepcopy(self.default_filters)
self._facets = []
self._aggregations = []
self._source = None
self.es_query = {"query": {
"filtered": {
"filter": {"and": []},
"query": queries.match_all()
}
}}
@property
def builtin_filters(self):
"""
A list of callables that return filters. These will all be available as
instance methods, so you can do ``self.term(field, value)`` instead of
``self.filter(filters.term(field, value))``
"""
return [
filters.term,
filters.OR,
filters.AND,
filters.NOT,
filters.range_filter,
filters.date_range,
filters.missing,
filters.exists,
filters.empty,
filters.non_null,
filters.doc_id,
filters.nested,
filters.regexp,
]
def __getattr__(self, attr):
# This is syntactic sugar
# If you do query.<attr> and attr isn't found as a classmethod,
# this will look for it in self.builtin_filters.
for fn in self.builtin_filters:
if fn.__name__ == attr:
def add_filter(*args, **kwargs):
return self.filter(fn(*args, **kwargs))
return add_filter
raise AttributeError("There is no builtin filter named %s" % attr)
def __getitem__(self, sliced_or_int):
if isinstance(sliced_or_int, (int, long)):
start = sliced_or_int
size = 1
else:
start = sliced_or_int.start or 0
size = sliced_or_int.stop - start
return self.start(start).size(size).run().hits
def run(self, include_hits=False):
"""Actually run the query. Returns an ESQuerySet object."""
query = self._clean_before_run(include_hits)
raw = run_query(query.index, query.raw_query, debug_host=query.debug_host)
return ESQuerySet(raw, deepcopy(query))
def _clean_before_run(self, include_hits=False):
query = deepcopy(self)
if not include_hits and query.uses_aggregations():
query = query.size(0)
return query
def scroll(self):
"""
Run the query against the scroll api. Returns an iterator yielding each
document that matches the query.
"""
result = scroll_query(self.index, self.raw_query)
return ScanResult(
result.count,
(ESQuerySet.normalize_result(deepcopy(self), r) for r in result)
)
@property
def _filters(self):
return self.es_query['query']['filtered']['filter']['and']
def exclude_source(self):
"""
Turn off _source retrieval. Mostly useful if you just want the doc_ids
"""
self._exclude_source = True
return self
def filter(self, filter):
"""
Add the passed-in filter to the query. All filtering goes through
this class.
"""
query = deepcopy(self)
query._filters.append(filter)
return query
@property
def filters(self):
"""
Return a list of the filters used in this query, suitable if you
want to reproduce a query with additional filtering.
"""
return self._default_filters.values() + self._filters
def uses_aggregations(self):
return len(self._aggregations) > 0
def aggregation(self, aggregation):
"""
Add the passed-in aggregation to the query
"""
query = deepcopy(self)
query._aggregations.append(aggregation)
return query
def aggregations(self, aggregations):
query = deepcopy(self)
query._aggregations.extend(aggregations)
return query
def terms_aggregation(self, term, name, size=None):
return self.aggregation(aggregations.TermsAggregation(name, term, size=size))
def date_histogram(self, name, datefield, interval, timezone=None):
return self.aggregation(aggregations.DateHistogram(name, datefield, interval, timezone=timezone))
@property
def _query(self):
return self.es_query['query']['filtered']['query']
def set_query(self, query):
"""
Set the query. Most stuff we want is better done with filters, but
if you actually want Levenshtein distance or prefix querying...
"""
es = deepcopy(self)
es.es_query['query']['filtered']['query'] = query
return es
def search_string_query(self, search_string, default_fields=None):
"""Accepts a user-defined search string"""
return self.set_query(
queries.search_string_query(search_string, default_fields)
)
def _assemble(self):
"""Build out the es_query dict"""
self._filters.extend(self._default_filters.values())
if self._start is not None:
self.es_query['from'] = self._start
self.es_query['size'] = self._size if self._size is not None else SIZE_LIMIT
if self._exclude_source:
self.es_query['_source'] = False
elif self._source is not None:
self.es_query['_source'] = self._source
if self._aggregations:
self.es_query['aggs'] = {
agg.name: agg.assemble()
for agg in self._aggregations
}
def fields(self, fields):
"""
Restrict the fields returned from elasticsearch
Deprecated. Use `source` instead.
"""
self._legacy_fields = True
return self.source(fields)
def source(self, include, exclude=None):
"""
Restrict the output of _source in the queryset. This can be used to return an object in a queryset
"""
self._exclude_source = False
source = include
if exclude:
source = {
'include': include,
'exclude': exclude
}
query = deepcopy(self)
query._source = source
return query
def start(self, start):
"""Pagination. Analagous to SQL offset."""
query = deepcopy(self)
query._start = start
return query
def size(self, size):
"""Restrict number of results returned. Analagous to SQL limit."""
query = deepcopy(self)
query._size = size
return query
@property
def raw_query(self):
query = deepcopy(self)
query._assemble()
return query.es_query
def dumps(self, pretty=False):
"""Returns the JSON query that will be sent to elasticsearch."""
indent = 4 if pretty else None
return json.dumps(self.raw_query, indent=indent)
def pprint(self):
"""pretty prints the JSON query that will be sent to elasticsearch."""
print self.dumps(pretty=True)
def sort(self, field, desc=False, reset_sort=True):
"""Order the results by field."""
query = deepcopy(self)
sort_field = {
field: {'order': 'desc' if desc else 'asc'}
}
if reset_sort:
query.es_query['sort'] = [sort_field]
else:
if not query.es_query.get('sort'):
query.es_query['sort'] = []
query.es_query['sort'].append(sort_field)
return query
def set_sorting_block(self, sorting_block):
"""To be used with `get_sorting_block`, which interprets datatables sorting"""
query = deepcopy(self)
query.es_query['sort'] = sorting_block
return query
def remove_default_filters(self):
"""Sensible defaults are provided. Use this if you don't want 'em"""
query = deepcopy(self)
query._default_filters = {"match_all": filters.match_all()}
return query
def remove_default_filter(self, default):
"""Remove a specific default filter by passing in its name."""
query = deepcopy(self)
if default in query._default_filters:
query._default_filters.pop(default)
if len(query._default_filters) == 0:
query._default_filters = {"match_all": filters.match_all()}
return query
def values(self, *fields):
"""modeled after django's QuerySet.values"""
if fields:
return self.fields(fields).run().hits
else:
return self.run().hits
def values_list(self, *fields, **kwargs):
return values_list(self.fields(fields).run().hits, *fields, **kwargs)
def count(self):
"""Performs a minimal query to get the count of matching documents"""
return self.size(0).run().total
def get_ids(self):
"""Performs a minimal query to get the ids of the matching documents"""
return self.exclude_source().run().doc_ids
class ESQuerySet(object):
"""
The object returned from ``ESQuery.run``
* ESQuerySet.raw is the raw response from elasticsearch
* ESQuerySet.query is the ESQuery object
"""
def __init__(self, raw, query):
if 'error' in raw:
msg = ("ElasticSearch Error\n{error}\nIndex: {index}"
"\nQuery: {query}").format(
error=raw['error'],
index=query.index,
query=query.dumps(pretty=True),
)
raise ESError(msg)
self.raw = raw
self.query = query
@staticmethod
def normalize_result(query, result):
"""Return the doc from an item in the query response."""
if query._exclude_source:
return result['_id']
if query._legacy_fields:
return flatten_field_dict(result, fields_property='_source')
else:
return result['_source']
@property
def raw_hits(self):
return self.raw['hits']['hits']
@property
def doc_ids(self):
"""Return just the docs ids from the response."""
return [r['_id'] for r in self.raw_hits]
@property
def hits(self):
"""Return the docs from the response."""
raw_hits = self.raw_hits
if not raw_hits and self.query.uses_aggregations() and self.query._size == 0:
raise ESError("no hits, did you forget about no_hits_with_aggs?")
return [self.normalize_result(self.query, r) for r in raw_hits]
@property
def total(self):
"""Return the total number of docs matching the query."""
return self.raw['hits']['total']
def aggregation(self, name):
return self.raw['aggregations'][name]
@property
@memoized
def aggregations(self):
aggregations = self.query._aggregations
raw = self.raw.get('aggregations', {})
results = namedtuple('aggregation_results', [a.name for a in aggregations])
return results(**{a.name: a.parse_result(raw) for a in aggregations})
def __repr__(self):
return '{}({!r}, {!r})'.format(self.__class__.__name__, self.raw, self.query)
class HQESQuery(ESQuery):
"""
Query logic specific to CommCareHQ
"""
@property
def builtin_filters(self):
return [
filters.doc_id,
filters.doc_type,
filters.domain,
] + super(HQESQuery, self).builtin_filters
| |
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient.common import exceptions as neutron_client_exc
from oslo_utils import uuidutils
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import pxe_utils
from ironic.conductor import task_manager
from ironic.dhcp import neutron
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as object_utils
class TestNeutron(db_base.DbTestCase):
def setUp(self):
super(TestNeutron, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake')
self.config(
cleaning_network='00000000-0000-0000-0000-000000000000',
group='neutron')
self.config(enabled_drivers=['fake'])
self.config(dhcp_provider='neutron',
group='dhcp')
self.node = object_utils.create_test_node(self.context)
self.ports = [
object_utils.create_test_port(
self.context, node_id=self.node.id, id=2,
uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c782',
address='52:54:00:cf:2d:32')]
# Very simple neutron port representation
self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00',
'mac_address': '52:54:00:cf:2d:32'}
dhcp_factory.DHCPFactory._dhcp_provider = None
@mock.patch('ironic.common.neutron.get_client', autospec=True)
def test_update_port_dhcp_opts(self, client_mock):
opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '1.1.1.1'},
{'opt_name': 'server-ip-address',
'opt_value': '1.1.1.1'}]
port_id = 'fake-port-id'
expected = {'port': {'extra_dhcp_opts': opts}}
api = dhcp_factory.DHCPFactory()
api.provider.update_port_dhcp_opts(port_id, opts)
client_mock.return_value.update_port.assert_called_once_with(
port_id, expected)
@mock.patch('ironic.common.neutron.get_client', autospec=True)
def test_update_port_dhcp_opts_with_exception(self, client_mock):
opts = [{}]
port_id = 'fake-port-id'
client_mock.return_value.update_port.side_effect = (
neutron_client_exc.NeutronClientException())
api = dhcp_factory.DHCPFactory()
self.assertRaises(
exception.FailedToUpdateDHCPOptOnPort,
api.provider.update_port_dhcp_opts,
port_id, opts)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
@mock.patch('ironic.common.network.get_node_vif_ids')
def test_update_dhcp(self, mock_gnvi, mock_updo):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, opts)
mock_updo.assert_called_once_with('vif-uuid', opts)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
@mock.patch('ironic.common.network.get_node_vif_ids')
def test_update_dhcp_no_vif_data(self, mock_gnvi, mock_updo):
mock_gnvi.return_value = {'portgroups': {}, 'ports': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory()
self.assertRaises(exception.FailedToUpdateDHCPOptOnPort,
api.update_dhcp, task, self.node)
self.assertFalse(mock_updo.called)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
@mock.patch('ironic.common.network.get_node_vif_ids')
def test_update_dhcp_some_failures(self, mock_gnvi, mock_updo):
# confirm update is called twice, one fails, but no exception raised
mock_gnvi.return_value = {'ports': {'p1': 'v1', 'p2': 'v2'},
'portgroups': {}}
exc = exception.FailedToUpdateDHCPOptOnPort('fake exception')
mock_updo.side_effect = [None, exc]
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, self.node)
mock_gnvi.assert_called_once_with(task)
self.assertEqual(2, mock_updo.call_count)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
@mock.patch('ironic.common.network.get_node_vif_ids')
def test_update_dhcp_fails(self, mock_gnvi, mock_updo):
# confirm update is called twice, both fail, and exception is raised
mock_gnvi.return_value = {'ports': {'p1': 'v1', 'p2': 'v2'},
'portgroups': {}}
exc = exception.FailedToUpdateDHCPOptOnPort('fake exception')
mock_updo.side_effect = [exc, exc]
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory()
self.assertRaises(exception.FailedToUpdateDHCPOptOnPort,
api.update_dhcp,
task, self.node)
mock_gnvi.assert_called_once_with(task)
self.assertEqual(2, mock_updo.call_count)
@mock.patch.object(neutron, 'LOG', autospec=True)
@mock.patch('time.sleep', autospec=True)
@mock.patch.object(neutron.NeutronDHCPApi, 'update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids', autospec=True)
def test_update_dhcp_set_sleep_and_fake(self, mock_gnvi, mock_updo,
mock_ts, mock_log):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
self.config(port_setup_delay=30, group='neutron')
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, opts)
mock_log.debug.assert_called_once_with(
"Waiting %d seconds for Neutron.", 30)
mock_ts.assert_called_with(30)
mock_updo.assert_called_once_with(mock.ANY, 'vif-uuid', opts)
@mock.patch.object(neutron, 'LOG', autospec=True)
@mock.patch.object(neutron.NeutronDHCPApi, 'update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids', autospec=True)
def test_update_dhcp_unset_sleep_and_fake(self, mock_gnvi, mock_updo,
mock_log):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, opts)
mock_log.debug.assert_not_called()
mock_updo.assert_called_once_with(mock.ANY, 'vif-uuid', opts)
def test__get_fixed_ip_address(self):
port_id = 'fake-port-id'
expected = "192.168.1.3"
api = dhcp_factory.DHCPFactory().provider
port_data = {
"id": port_id,
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "192.168.1.3",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
}
fake_client = mock.Mock()
fake_client.show_port.return_value = {'port': port_data}
result = api._get_fixed_ip_address(port_id, fake_client)
self.assertEqual(expected, result)
fake_client.show_port.assert_called_once_with(port_id)
def test__get_fixed_ip_address_invalid_ip(self):
port_id = 'fake-port-id'
api = dhcp_factory.DHCPFactory().provider
port_data = {
"id": port_id,
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "invalid.ip",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
}
fake_client = mock.Mock()
fake_client.show_port.return_value = {'port': port_data}
self.assertRaises(exception.InvalidIPv4Address,
api._get_fixed_ip_address,
port_id, fake_client)
fake_client.show_port.assert_called_once_with(port_id)
def test__get_fixed_ip_address_with_exception(self):
port_id = 'fake-port-id'
api = dhcp_factory.DHCPFactory().provider
fake_client = mock.Mock()
fake_client.show_port.side_effect = (
neutron_client_exc.NeutronClientException())
self.assertRaises(exception.FailedToGetIPAddressOnPort,
api._get_fixed_ip_address, port_id, fake_client)
fake_client.show_port.assert_called_once_with(port_id)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def _test__get_port_ip_address(self, mock_gfia, network):
expected = "192.168.1.3"
fake_vif = 'test-vif-%s' % network
port = object_utils.create_test_port(
self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
internal_info={
'cleaning_vif_port_id': (fake_vif if network == 'cleaning'
else None),
'provisioning_vif_port_id': (fake_vif
if network == 'provisioning'
else None),
'tenant_vif_port_id': (fake_vif if network == 'tenant'
else None),
}
)
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_port_ip_address(task, port,
mock.sentinel.client)
self.assertEqual(expected, result)
mock_gfia.assert_called_once_with(fake_vif, mock.sentinel.client)
def test__get_port_ip_address_tenant(self):
self._test__get_port_ip_address(network='tenant')
def test__get_port_ip_address_cleaning(self):
self._test__get_port_ip_address(network='cleaning')
def test__get_port_ip_address_provisioning(self):
self._test__get_port_ip_address(network='provisioning')
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def test__get_port_ip_address_for_portgroup(self, mock_gfia):
expected = "192.168.1.3"
pg = object_utils.create_test_portgroup(
self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
internal_info={'tenant_vif_port_id': 'test-vif-A'})
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_port_ip_address(task, pg,
mock.sentinel.client)
self.assertEqual(expected, result)
mock_gfia.assert_called_once_with('test-vif-A', mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def test__get_port_ip_address_with_exception(self, mock_gfia):
expected = "192.168.1.3"
port = object_utils.create_test_port(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid())
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
self.assertRaises(exception.FailedToGetIPAddressOnPort,
api._get_port_ip_address, task, port,
mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def test__get_port_ip_address_for_portgroup_with_exception(
self, mock_gfia):
expected = "192.168.1.3"
pg = object_utils.create_test_portgroup(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid())
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
self.assertRaises(exception.FailedToGetIPAddressOnPort,
api._get_port_ip_address, task, pg,
mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def _test__get_ip_addresses_ports(self, key, mock_gfia):
if key == "extra":
kwargs1 = {key: {'vif_port_id': 'test-vif-A'}}
else:
kwargs1 = {key: {'tenant_vif_port_id': 'test-vif-A'}}
ip_address = '10.10.0.1'
expected = [ip_address]
port = object_utils.create_test_port(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
**kwargs1)
mock_gfia.return_value = ip_address
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_ip_addresses(task, [port],
mock.sentinel.client)
self.assertEqual(expected, result)
def test__get_ip_addresses_ports_extra(self):
self._test__get_ip_addresses_ports('extra')
def test__get_ip_addresses_ports_int_info(self):
self._test__get_ip_addresses_ports('internal_info')
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def _test__get_ip_addresses_portgroup(self, key, mock_gfia):
if key == "extra":
kwargs1 = {key: {'vif_port_id': 'test-vif-A'}}
else:
kwargs1 = {key: {'tenant_vif_port_id': 'test-vif-A'}}
ip_address = '10.10.0.1'
expected = [ip_address]
pg = object_utils.create_test_portgroup(
self.context, node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff', uuid=uuidutils.generate_uuid(),
**kwargs1)
mock_gfia.return_value = ip_address
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_ip_addresses(task, [pg], mock.sentinel.client)
self.assertEqual(expected, result)
def test__get_ip_addresses_portgroup_extra(self):
self._test__get_ip_addresses_portgroup('extra')
def test__get_ip_addresses_portgroup_int_info(self):
self._test__get_ip_addresses_portgroup('internal_info')
@mock.patch('ironic.common.neutron.get_client', autospec=True)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_port_ip_address')
def test_get_ip_addresses(self, get_ip_mock, client_mock):
ip_address = '10.10.0.1'
expected = [ip_address]
get_ip_mock.return_value = ip_address
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api.get_ip_addresses(task)
get_ip_mock.assert_called_once_with(task, task.ports[0],
client_mock.return_value)
self.assertEqual(expected, result)
@mock.patch('ironic.common.neutron.get_client', autospec=True)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_port_ip_address')
def test_get_ip_addresses_for_port_and_portgroup(self, get_ip_mock,
client_mock):
object_utils.create_test_portgroup(
self.context, node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
internal_info={'tenant_vif_port_id': 'test-vif-A'})
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
api.get_ip_addresses(task)
get_ip_mock.assert_has_calls(
[mock.call(task, task.ports[0], client_mock.return_value),
mock.call(task, task.portgroups[0], client_mock.return_value)]
)
| |
"""The tests for sensor recorder platform."""
# pylint: disable=protected-access,invalid-name
from datetime import timedelta
import importlib
import json
import sys
from unittest.mock import patch, sentinel
import pytest
from pytest import approx
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from homeassistant.components import recorder
from homeassistant.components.recorder import SQLITE_URL_PREFIX, history, statistics
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.models import (
StatisticsShortTerm,
process_timestamp_to_utc_isoformat,
)
from homeassistant.components.recorder.statistics import (
async_add_external_statistics,
delete_duplicates,
get_last_short_term_statistics,
get_last_statistics,
get_metadata,
list_statistic_ids,
statistics_during_period,
)
from homeassistant.components.recorder.util import session_scope
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import setup_component
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant, mock_registry
from tests.components.recorder.common import wait_recording_done
def test_compile_hourly_statistics(hass_recorder):
"""Test compiling hourly statistics."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
zero, four, states = record_states(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}):
stats = statistics_during_period(hass, zero, period="5minute", **kwargs)
assert stats == {}
stats = get_last_short_term_statistics(hass, 0, "sensor.test1", True)
assert stats == {}
recorder.do_adhoc_statistics(start=zero)
recorder.do_adhoc_statistics(start=four)
wait_recording_done(hass)
expected_1 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"end": process_timestamp_to_utc_isoformat(zero + timedelta(minutes=5)),
"mean": approx(14.915254237288135),
"min": approx(10.0),
"max": approx(20.0),
"last_reset": None,
"state": None,
"sum": None,
}
expected_2 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(four),
"end": process_timestamp_to_utc_isoformat(four + timedelta(minutes=5)),
"mean": approx(20.0),
"min": approx(20.0),
"max": approx(20.0),
"last_reset": None,
"state": None,
"sum": None,
}
expected_stats1 = [
{**expected_1, "statistic_id": "sensor.test1"},
{**expected_2, "statistic_id": "sensor.test1"},
]
expected_stats2 = [
{**expected_1, "statistic_id": "sensor.test2"},
{**expected_2, "statistic_id": "sensor.test2"},
]
# Test statistics_during_period
stats = statistics_during_period(hass, zero, period="5minute")
assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2}
stats = statistics_during_period(
hass, zero, statistic_ids=["sensor.test2"], period="5minute"
)
assert stats == {"sensor.test2": expected_stats2}
stats = statistics_during_period(
hass, zero, statistic_ids=["sensor.test3"], period="5minute"
)
assert stats == {}
# Test get_last_short_term_statistics
stats = get_last_short_term_statistics(hass, 0, "sensor.test1", True)
assert stats == {}
stats = get_last_short_term_statistics(hass, 1, "sensor.test1", True)
assert stats == {"sensor.test1": [{**expected_2, "statistic_id": "sensor.test1"}]}
stats = get_last_short_term_statistics(hass, 2, "sensor.test1", True)
assert stats == {"sensor.test1": expected_stats1[::-1]}
stats = get_last_short_term_statistics(hass, 3, "sensor.test1", True)
assert stats == {"sensor.test1": expected_stats1[::-1]}
stats = get_last_short_term_statistics(hass, 1, "sensor.test3", True)
assert stats == {}
@pytest.fixture
def mock_sensor_statistics():
"""Generate some fake statistics."""
def sensor_stats(entity_id, start):
"""Generate fake statistics."""
return {
"meta": {
"statistic_id": entity_id,
"unit_of_measurement": "dogs",
"has_mean": True,
"has_sum": False,
},
"stat": {"start": start},
}
def get_fake_stats(_hass, start, _end):
return [
sensor_stats("sensor.test1", start),
sensor_stats("sensor.test2", start),
sensor_stats("sensor.test3", start),
]
with patch(
"homeassistant.components.sensor.recorder.compile_statistics",
side_effect=get_fake_stats,
):
yield
@pytest.fixture
def mock_from_stats():
"""Mock out Statistics.from_stats."""
counter = 0
real_from_stats = StatisticsShortTerm.from_stats
def from_stats(metadata_id, stats):
nonlocal counter
if counter == 0 and metadata_id == 2:
counter += 1
return None
return real_from_stats(metadata_id, stats)
with patch(
"homeassistant.components.recorder.statistics.StatisticsShortTerm.from_stats",
side_effect=from_stats,
autospec=True,
):
yield
def test_compile_periodic_statistics_exception(
hass_recorder, mock_sensor_statistics, mock_from_stats
):
"""Test exception handling when compiling periodic statistics."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
now = dt_util.utcnow()
recorder.do_adhoc_statistics(start=now)
recorder.do_adhoc_statistics(start=now + timedelta(minutes=5))
wait_recording_done(hass)
expected_1 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(now),
"end": process_timestamp_to_utc_isoformat(now + timedelta(minutes=5)),
"mean": None,
"min": None,
"max": None,
"last_reset": None,
"state": None,
"sum": None,
}
expected_2 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(now + timedelta(minutes=5)),
"end": process_timestamp_to_utc_isoformat(now + timedelta(minutes=10)),
"mean": None,
"min": None,
"max": None,
"last_reset": None,
"state": None,
"sum": None,
}
expected_stats1 = [
{**expected_1, "statistic_id": "sensor.test1"},
{**expected_2, "statistic_id": "sensor.test1"},
]
expected_stats2 = [
{**expected_2, "statistic_id": "sensor.test2"},
]
expected_stats3 = [
{**expected_1, "statistic_id": "sensor.test3"},
{**expected_2, "statistic_id": "sensor.test3"},
]
stats = statistics_during_period(hass, now, period="5minute")
assert stats == {
"sensor.test1": expected_stats1,
"sensor.test2": expected_stats2,
"sensor.test3": expected_stats3,
}
def test_rename_entity(hass_recorder):
"""Test statistics is migrated when entity_id is changed."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
entity_reg = mock_registry(hass)
@callback
def add_entry():
reg_entry = entity_reg.async_get_or_create(
"sensor",
"test",
"unique_0000",
suggested_object_id="test1",
)
assert reg_entry.entity_id == "sensor.test1"
hass.add_job(add_entry)
hass.block_till_done()
zero, four, states = record_states(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}):
stats = statistics_during_period(hass, zero, period="5minute", **kwargs)
assert stats == {}
stats = get_last_short_term_statistics(hass, 0, "sensor.test1", True)
assert stats == {}
recorder.do_adhoc_statistics(start=zero)
wait_recording_done(hass)
expected_1 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"end": process_timestamp_to_utc_isoformat(zero + timedelta(minutes=5)),
"mean": approx(14.915254237288135),
"min": approx(10.0),
"max": approx(20.0),
"last_reset": None,
"state": None,
"sum": None,
}
expected_stats1 = [
{**expected_1, "statistic_id": "sensor.test1"},
]
expected_stats2 = [
{**expected_1, "statistic_id": "sensor.test2"},
]
expected_stats99 = [
{**expected_1, "statistic_id": "sensor.test99"},
]
stats = statistics_during_period(hass, zero, period="5minute")
assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2}
@callback
def rename_entry():
entity_reg.async_update_entity("sensor.test1", new_entity_id="sensor.test99")
hass.add_job(rename_entry)
hass.block_till_done()
stats = statistics_during_period(hass, zero, period="5minute")
assert stats == {"sensor.test99": expected_stats99, "sensor.test2": expected_stats2}
def test_statistics_duplicated(hass_recorder, caplog):
"""Test statistics with same start time is not compiled."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
zero, four, states = record_states(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
wait_recording_done(hass)
assert "Compiling statistics for" not in caplog.text
assert "Statistics already compiled" not in caplog.text
with patch(
"homeassistant.components.sensor.recorder.compile_statistics"
) as compile_statistics:
recorder.do_adhoc_statistics(start=zero)
wait_recording_done(hass)
assert compile_statistics.called
compile_statistics.reset_mock()
assert "Compiling statistics for" in caplog.text
assert "Statistics already compiled" not in caplog.text
caplog.clear()
recorder.do_adhoc_statistics(start=zero)
wait_recording_done(hass)
assert not compile_statistics.called
compile_statistics.reset_mock()
assert "Compiling statistics for" not in caplog.text
assert "Statistics already compiled" in caplog.text
caplog.clear()
def test_external_statistics(hass_recorder, caplog):
"""Test inserting external statistics."""
hass = hass_recorder()
wait_recording_done(hass)
assert "Compiling statistics for" not in caplog.text
assert "Statistics already compiled" not in caplog.text
zero = dt_util.utcnow()
period1 = zero.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1)
period2 = zero.replace(minute=0, second=0, microsecond=0) + timedelta(hours=2)
external_statistics1 = {
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
}
external_statistics2 = {
"start": period2,
"last_reset": None,
"state": 1,
"sum": 3,
}
external_metadata = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import",
"unit_of_measurement": "kWh",
}
async_add_external_statistics(
hass, external_metadata, (external_statistics1, external_statistics2)
)
wait_recording_done(hass)
stats = statistics_during_period(hass, zero, period="hour")
assert stats == {
"test:total_energy_import": [
{
"statistic_id": "test:total_energy_import",
"start": period1.isoformat(),
"end": (period1 + timedelta(hours=1)).isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(0.0),
"sum": approx(2.0),
},
{
"statistic_id": "test:total_energy_import",
"start": period2.isoformat(),
"end": (period2 + timedelta(hours=1)).isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(1.0),
"sum": approx(3.0),
},
]
}
statistic_ids = list_statistic_ids(hass)
assert statistic_ids == [
{
"statistic_id": "test:total_energy_import",
"name": "Total imported energy",
"source": "test",
"unit_of_measurement": "kWh",
}
]
metadata = get_metadata(hass, statistic_ids=("test:total_energy_import",))
assert metadata == {
"test:total_energy_import": (
1,
{
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import",
"unit_of_measurement": "kWh",
},
)
}
last_stats = get_last_statistics(hass, 1, "test:total_energy_import", True)
assert last_stats == {
"test:total_energy_import": [
{
"statistic_id": "test:total_energy_import",
"start": period2.isoformat(),
"end": (period2 + timedelta(hours=1)).isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(1.0),
"sum": approx(3.0),
},
]
}
# Update the previously inserted statistics
external_statistics = {
"start": period1,
"last_reset": None,
"state": 5,
"sum": 6,
}
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
stats = statistics_during_period(hass, zero, period="hour")
assert stats == {
"test:total_energy_import": [
{
"statistic_id": "test:total_energy_import",
"start": period1.isoformat(),
"end": (period1 + timedelta(hours=1)).isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(5.0),
"sum": approx(6.0),
},
{
"statistic_id": "test:total_energy_import",
"start": period2.isoformat(),
"end": (period2 + timedelta(hours=1)).isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(1.0),
"sum": approx(3.0),
},
]
}
# Update the previously inserted statistics
external_statistics = {
"start": period1,
"max": 1,
"mean": 2,
"min": 3,
"last_reset": None,
"state": 4,
"sum": 5,
}
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
stats = statistics_during_period(hass, zero, period="hour")
assert stats == {
"test:total_energy_import": [
{
"statistic_id": "test:total_energy_import",
"start": period1.isoformat(),
"end": (period1 + timedelta(hours=1)).isoformat(),
"max": approx(1.0),
"mean": approx(2.0),
"min": approx(3.0),
"last_reset": None,
"state": approx(4.0),
"sum": approx(5.0),
},
{
"statistic_id": "test:total_energy_import",
"start": period2.isoformat(),
"end": (period2 + timedelta(hours=1)).isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(1.0),
"sum": approx(3.0),
},
]
}
def test_external_statistics_errors(hass_recorder, caplog):
"""Test validation of external statistics."""
hass = hass_recorder()
wait_recording_done(hass)
assert "Compiling statistics for" not in caplog.text
assert "Statistics already compiled" not in caplog.text
zero = dt_util.utcnow()
period1 = zero.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1)
_external_statistics = {
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
}
_external_metadata = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import",
"unit_of_measurement": "kWh",
}
# Attempt to insert statistics for an entity
external_metadata = {
**_external_metadata,
"statistic_id": "sensor.total_energy_import",
}
external_statistics = {**_external_statistics}
with pytest.raises(HomeAssistantError):
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
assert statistics_during_period(hass, zero, period="hour") == {}
assert list_statistic_ids(hass) == []
assert get_metadata(hass, statistic_ids=("sensor.total_energy_import",)) == {}
# Attempt to insert statistics for the wrong domain
external_metadata = {**_external_metadata, "source": "other"}
external_statistics = {**_external_statistics}
with pytest.raises(HomeAssistantError):
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
assert statistics_during_period(hass, zero, period="hour") == {}
assert list_statistic_ids(hass) == []
assert get_metadata(hass, statistic_ids=("test:total_energy_import",)) == {}
# Attempt to insert statistics for an naive starting time
external_metadata = {**_external_metadata}
external_statistics = {
**_external_statistics,
"start": period1.replace(tzinfo=None),
}
with pytest.raises(HomeAssistantError):
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
assert statistics_during_period(hass, zero, period="hour") == {}
assert list_statistic_ids(hass) == []
assert get_metadata(hass, statistic_ids=("test:total_energy_import",)) == {}
# Attempt to insert statistics for an invalid starting time
external_metadata = {**_external_metadata}
external_statistics = {**_external_statistics, "start": period1.replace(minute=1)}
with pytest.raises(HomeAssistantError):
async_add_external_statistics(hass, external_metadata, (external_statistics,))
wait_recording_done(hass)
assert statistics_during_period(hass, zero, period="hour") == {}
assert list_statistic_ids(hass) == []
assert get_metadata(hass, statistic_ids=("test:total_energy_import",)) == {}
@pytest.mark.parametrize("timezone", ["America/Regina", "Europe/Vienna", "UTC"])
@pytest.mark.freeze_time("2021-08-01 00:00:00+00:00")
def test_monthly_statistics(hass_recorder, caplog, timezone):
"""Test inserting external statistics."""
dt_util.set_default_time_zone(dt_util.get_time_zone(timezone))
hass = hass_recorder()
wait_recording_done(hass)
assert "Compiling statistics for" not in caplog.text
assert "Statistics already compiled" not in caplog.text
zero = dt_util.utcnow()
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_statistics = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 3,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 4,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
)
external_metadata = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import",
"unit_of_measurement": "kWh",
}
async_add_external_statistics(hass, external_metadata, external_statistics)
wait_recording_done(hass)
stats = statistics_during_period(hass, zero, period="month")
sep_start = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
sep_end = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
oct_start = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
oct_end = dt_util.as_utc(dt_util.parse_datetime("2021-11-01 00:00:00"))
assert stats == {
"test:total_energy_import": [
{
"statistic_id": "test:total_energy_import",
"start": sep_start.isoformat(),
"end": sep_end.isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(1.0),
"sum": approx(3.0),
},
{
"statistic_id": "test:total_energy_import",
"start": oct_start.isoformat(),
"end": oct_end.isoformat(),
"max": None,
"mean": None,
"min": None,
"last_reset": None,
"state": approx(3.0),
"sum": approx(5.0),
},
]
}
dt_util.set_default_time_zone(dt_util.get_time_zone("UTC"))
def _create_engine_test(*args, **kwargs):
"""Test version of create_engine that initializes with old schema.
This simulates an existing db with the old schema.
"""
module = "tests.components.recorder.models_schema_23"
importlib.import_module(module)
old_models = sys.modules[module]
engine = create_engine(*args, **kwargs)
old_models.Base.metadata.create_all(engine)
with Session(engine) as session:
session.add(recorder.models.StatisticsRuns(start=statistics.get_start_time()))
session.add(
recorder.models.SchemaChanges(schema_version=old_models.SCHEMA_VERSION)
)
session.commit()
return engine
def test_delete_duplicates(caplog, tmpdir):
"""Test removal of duplicated statistics."""
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
module = "tests.components.recorder.models_schema_23"
importlib.import_module(module)
old_models = sys.modules[module]
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_energy_statistics_1 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 3,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 4,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
)
external_energy_metadata_1 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_1",
"unit_of_measurement": "kWh",
}
external_energy_statistics_2 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 20,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 30,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 40,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
)
external_energy_metadata_2 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_2",
"unit_of_measurement": "kWh",
}
external_co2_statistics = (
{
"start": period1,
"last_reset": None,
"mean": 10,
},
{
"start": period2,
"last_reset": None,
"mean": 30,
},
{
"start": period3,
"last_reset": None,
"mean": 60,
},
{
"start": period4,
"last_reset": None,
"mean": 90,
},
)
external_co2_metadata = {
"has_mean": True,
"has_sum": False,
"name": "Fossil percentage",
"source": "test",
"statistic_id": "test:fossil_percentage",
"unit_of_measurement": "%",
}
# Create some duplicated statistics with schema version 23
with patch.object(recorder, "models", old_models), patch.object(
recorder.migration, "SCHEMA_VERSION", old_models.SCHEMA_VERSION
), patch(
"homeassistant.components.recorder.create_engine", new=_create_engine_test
):
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
session.add(
recorder.models.StatisticsMeta.from_meta(external_energy_metadata_1)
)
session.add(
recorder.models.StatisticsMeta.from_meta(external_energy_metadata_2)
)
session.add(recorder.models.StatisticsMeta.from_meta(external_co2_metadata))
with session_scope(hass=hass) as session:
for stat in external_energy_statistics_1:
session.add(recorder.models.Statistics.from_stats(1, stat))
for stat in external_energy_statistics_2:
session.add(recorder.models.Statistics.from_stats(2, stat))
for stat in external_co2_statistics:
session.add(recorder.models.Statistics.from_stats(3, stat))
hass.stop()
# Test that the duplicates are removed during migration from schema 23
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
assert "Deleted 2 duplicated statistics rows" in caplog.text
assert "Found non identical" not in caplog.text
assert "Found duplicated" not in caplog.text
def test_delete_duplicates_many(caplog, tmpdir):
"""Test removal of duplicated statistics."""
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
module = "tests.components.recorder.models_schema_23"
importlib.import_module(module)
old_models = sys.modules[module]
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_energy_statistics_1 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 3,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 4,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
)
external_energy_metadata_1 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_1",
"unit_of_measurement": "kWh",
}
external_energy_statistics_2 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 20,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 30,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 40,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
)
external_energy_metadata_2 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_2",
"unit_of_measurement": "kWh",
}
external_co2_statistics = (
{
"start": period1,
"last_reset": None,
"mean": 10,
},
{
"start": period2,
"last_reset": None,
"mean": 30,
},
{
"start": period3,
"last_reset": None,
"mean": 60,
},
{
"start": period4,
"last_reset": None,
"mean": 90,
},
)
external_co2_metadata = {
"has_mean": True,
"has_sum": False,
"name": "Fossil percentage",
"source": "test",
"statistic_id": "test:fossil_percentage",
"unit_of_measurement": "%",
}
# Create some duplicated statistics with schema version 23
with patch.object(recorder, "models", old_models), patch.object(
recorder.migration, "SCHEMA_VERSION", old_models.SCHEMA_VERSION
), patch(
"homeassistant.components.recorder.create_engine", new=_create_engine_test
):
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
session.add(
recorder.models.StatisticsMeta.from_meta(external_energy_metadata_1)
)
session.add(
recorder.models.StatisticsMeta.from_meta(external_energy_metadata_2)
)
session.add(recorder.models.StatisticsMeta.from_meta(external_co2_metadata))
with session_scope(hass=hass) as session:
for stat in external_energy_statistics_1:
session.add(recorder.models.Statistics.from_stats(1, stat))
for _ in range(3000):
session.add(
recorder.models.Statistics.from_stats(
1, external_energy_statistics_1[-1]
)
)
for stat in external_energy_statistics_2:
session.add(recorder.models.Statistics.from_stats(2, stat))
for stat in external_co2_statistics:
session.add(recorder.models.Statistics.from_stats(3, stat))
hass.stop()
# Test that the duplicates are removed during migration from schema 23
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
assert "Deleted 3002 duplicated statistics rows" in caplog.text
assert "Found non identical" not in caplog.text
assert "Found duplicated" not in caplog.text
@pytest.mark.freeze_time("2021-08-01 00:00:00+00:00")
def test_delete_duplicates_non_identical(caplog, tmpdir):
"""Test removal of duplicated statistics."""
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
module = "tests.components.recorder.models_schema_23"
importlib.import_module(module)
old_models = sys.modules[module]
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00"))
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_energy_statistics_1 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 2,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 3,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 4,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 6,
},
)
external_energy_metadata_1 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_1",
"unit_of_measurement": "kWh",
}
external_energy_statistics_2 = (
{
"start": period1,
"last_reset": None,
"state": 0,
"sum": 20,
},
{
"start": period2,
"last_reset": None,
"state": 1,
"sum": 30,
},
{
"start": period3,
"last_reset": None,
"state": 2,
"sum": 40,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
{
"start": period4,
"last_reset": None,
"state": 3,
"sum": 50,
},
)
external_energy_metadata_2 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_2",
"unit_of_measurement": "kWh",
}
# Create some duplicated statistics with schema version 23
with patch.object(recorder, "models", old_models), patch.object(
recorder.migration, "SCHEMA_VERSION", old_models.SCHEMA_VERSION
), patch(
"homeassistant.components.recorder.create_engine", new=_create_engine_test
):
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
session.add(
recorder.models.StatisticsMeta.from_meta(external_energy_metadata_1)
)
session.add(
recorder.models.StatisticsMeta.from_meta(external_energy_metadata_2)
)
with session_scope(hass=hass) as session:
for stat in external_energy_statistics_1:
session.add(recorder.models.Statistics.from_stats(1, stat))
for stat in external_energy_statistics_2:
session.add(recorder.models.Statistics.from_stats(2, stat))
hass.stop()
# Test that the duplicates are removed during migration from schema 23
hass = get_test_home_assistant()
hass.config.config_dir = tmpdir
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
assert "Deleted 2 duplicated statistics rows" in caplog.text
assert "Deleted 1 non identical" in caplog.text
assert "Found duplicated" not in caplog.text
isotime = dt_util.utcnow().isoformat()
backup_file_name = f".storage/deleted_statistics.{isotime}.json"
with open(hass.config.path(backup_file_name)) as backup_file:
backup = json.load(backup_file)
assert backup == [
{
"duplicate": {
"created": "2021-08-01T00:00:00",
"id": 4,
"last_reset": None,
"max": None,
"mean": None,
"metadata_id": 1,
"min": None,
"start": "2021-10-31T23:00:00",
"state": 3.0,
"sum": 5.0,
},
"original": {
"created": "2021-08-01T00:00:00",
"id": 5,
"last_reset": None,
"max": None,
"mean": None,
"metadata_id": 1,
"min": None,
"start": "2021-10-31T23:00:00",
"state": 3.0,
"sum": 6.0,
},
}
]
def test_delete_duplicates_short_term(caplog, tmpdir):
"""Test removal of duplicated statistics."""
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
module = "tests.components.recorder.models_schema_23"
importlib.import_module(module)
old_models = sys.modules[module]
period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00"))
external_energy_metadata_1 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_1",
"unit_of_measurement": "kWh",
}
statistic_row = {
"start": period4,
"last_reset": None,
"state": 3,
"sum": 5,
}
# Create some duplicated statistics with schema version 23
with patch.object(recorder, "models", old_models), patch.object(
recorder.migration, "SCHEMA_VERSION", old_models.SCHEMA_VERSION
), patch(
"homeassistant.components.recorder.create_engine", new=_create_engine_test
):
hass = get_test_home_assistant()
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
session.add(
recorder.models.StatisticsMeta.from_meta(external_energy_metadata_1)
)
with session_scope(hass=hass) as session:
session.add(
recorder.models.StatisticsShortTerm.from_stats(1, statistic_row)
)
session.add(
recorder.models.StatisticsShortTerm.from_stats(1, statistic_row)
)
hass.stop()
# Test that the duplicates are removed during migration from schema 23
hass = get_test_home_assistant()
hass.config.config_dir = tmpdir
setup_component(hass, "recorder", {"recorder": {"db_url": dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
assert "duplicated statistics rows" not in caplog.text
assert "Found non identical" not in caplog.text
assert "Deleted duplicated short term statistic" in caplog.text
def test_delete_duplicates_no_duplicates(hass_recorder, caplog):
"""Test removal of duplicated statistics."""
hass = hass_recorder()
wait_recording_done(hass)
with session_scope(hass=hass) as session:
delete_duplicates(hass.data[DATA_INSTANCE], session)
assert "duplicated statistics rows" not in caplog.text
assert "Found non identical" not in caplog.text
assert "Found duplicated" not in caplog.text
def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog):
"""Test the recorder does not blow up if statistics is duplicated."""
hass = hass_recorder()
wait_recording_done(hass)
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
external_energy_metadata_1 = {
"has_mean": False,
"has_sum": True,
"name": "Total imported energy",
"source": "test",
"statistic_id": "test:total_energy_import_tariff_1",
"unit_of_measurement": "kWh",
}
external_energy_statistics_1 = [
{
"start": period1,
"last_reset": None,
"state": 3,
"sum": 5,
},
]
external_energy_statistics_2 = [
{
"start": period2,
"last_reset": None,
"state": 3,
"sum": 6,
}
]
with patch.object(
statistics, "_statistics_exists", return_value=False
), patch.object(
statistics, "_insert_statistics", wraps=statistics._insert_statistics
) as insert_statistics_mock:
async_add_external_statistics(
hass, external_energy_metadata_1, external_energy_statistics_1
)
async_add_external_statistics(
hass, external_energy_metadata_1, external_energy_statistics_1
)
async_add_external_statistics(
hass, external_energy_metadata_1, external_energy_statistics_2
)
wait_recording_done(hass)
assert insert_statistics_mock.call_count == 3
with session_scope(hass=hass) as session:
tmp = session.query(recorder.models.Statistics).all()
assert len(tmp) == 2
assert "Blocked attempt to insert duplicated statistic rows" in caplog.text
def record_states(hass):
"""Record some test states.
We inject a bunch of state updates temperature sensors.
"""
mp = "media_player.test"
sns1 = "sensor.test1"
sns2 = "sensor.test2"
sns3 = "sensor.test3"
sns4 = "sensor.test4"
sns1_attr = {
"device_class": "temperature",
"state_class": "measurement",
"unit_of_measurement": TEMP_CELSIUS,
}
sns2_attr = {
"device_class": "humidity",
"state_class": "measurement",
"unit_of_measurement": "%",
}
sns3_attr = {"device_class": "temperature"}
sns4_attr = {}
def set_state(entity_id, state, **kwargs):
"""Set the state."""
hass.states.set(entity_id, state, **kwargs)
wait_recording_done(hass)
return hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(seconds=1 * 5)
two = one + timedelta(seconds=15 * 5)
three = two + timedelta(seconds=30 * 5)
four = three + timedelta(seconds=15 * 5)
states = {mp: [], sns1: [], sns2: [], sns3: [], sns4: []}
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=one):
states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[sns1].append(set_state(sns1, "10", attributes=sns1_attr))
states[sns2].append(set_state(sns2, "10", attributes=sns2_attr))
states[sns3].append(set_state(sns3, "10", attributes=sns3_attr))
states[sns4].append(set_state(sns4, "10", attributes=sns4_attr))
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=two):
states[sns1].append(set_state(sns1, "15", attributes=sns1_attr))
states[sns2].append(set_state(sns2, "15", attributes=sns2_attr))
states[sns3].append(set_state(sns3, "15", attributes=sns3_attr))
states[sns4].append(set_state(sns4, "15", attributes=sns4_attr))
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=three):
states[sns1].append(set_state(sns1, "20", attributes=sns1_attr))
states[sns2].append(set_state(sns2, "20", attributes=sns2_attr))
states[sns3].append(set_state(sns3, "20", attributes=sns3_attr))
states[sns4].append(set_state(sns4, "20", attributes=sns4_attr))
return zero, four, states
| |
###############################################################################
##
## Copyright 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ('SrdpToolProvider',)
import struct, binascii
import zope
from zope.interface import implementer
from twisted.internet import reactor
from twisted.python import log
from twisted.internet.defer import Deferred, \
DeferredList, \
returnValue, \
inlineCallbacks
from srdp import SrdpFrameHeader
from interfaces import ISrdpProvider
from srdpprovider import SrdpProvider
def tabify(fields, formats, truncate = 120, filler = ['-', '+']):
"""
Tabified output formatting.
"""
## compute total length of all fields
##
totalLen = 0
flexIndicators = 0
flexIndicatorIndex = None
for i in xrange(len(formats)):
ffmt = formats[i][1:]
if ffmt != "*":
totalLen += int(ffmt)
else:
flexIndicators += 1
flexIndicatorIndex = i
if flexIndicators > 1:
raise Exception("more than 1 flex field indicator")
## reserve space for column separators (" | " or " + ")
##
totalLen += 3 * (len(formats) - 1)
if totalLen > truncate:
raise Exception("cannot fit content in truncate length %d" % truncate)
r = []
for i in xrange(len(formats)):
if i == flexIndicatorIndex:
N = truncate - totalLen
else:
N = int(formats[i][1:])
if fields:
s = str(fields[i])
if len(s) > N:
s = s[:N-2] + ".."
l = N - len(s)
m = formats[i][0]
else:
s = ''
l = N
m = '+'
if m == 'l':
r.append(s + ' ' * l)
elif m == 'r':
r.append(' ' * l + s)
elif m == 'c':
c1 = l / 2
c2 = l - c1
r.append(' ' * c1 + s + ' ' * c2)
elif m == '+':
r.append(filler[0] * l)
else:
raise Exception("invalid field format")
if m == '+':
return (filler[0] + filler[1] + filler[0]).join(r)
else:
return ' | '.join(r)
@implementer(ISrdpProvider)
class SrdpToolProvider(SrdpProvider):
def __init__(self, config, edsDb, debug = False):
self._config = config
self._edsDb = edsDb
self._debug = debug
def onChannelOpen(self, channel):
print 'Channel open ..'
if self._config['transport'] == 'serial':
delay = self._config['delay']
else:
delay = None
mode = self._config['mode']
modearg = self._config['modearg']
self.LINELENGTH = self._config['linelength']
cmdmap = {'show': self.showDevice,
'read': self.readDevice,
'list': self.listDevices,
'monitor': self.monitorDevice}
if cmdmap.has_key(mode):
if delay:
print "Giving the device %s seconds to get ready .." % delay
reactor.callLater(delay, cmdmap[mode], modearg)
else:
cmdmap[mode](modearg)
else:
raise Exception("mode '%s' not implemented" % mode)
def onChannelClose(self, reason):
print 'Channel closed.'
if self._debug:
log.msg(reason)
reactor.stop()
@inlineCallbacks
def listDevices(self, _):
"""
List all devices currently connected to the adapter.
"""
try:
em = yield self.getDeviceEdsMap()
im = yield self.getDeviceUuidMap()
print
print "SRDP Adapter: Connected Devices"
print "==============================="
print
print "Adapter UUID : %s" % (binascii.hexlify(im[1]))
print "Adapter EDS URI : %s" % (em[1])
print
LINEFORMAT = ['r7', 'l32', 'l*', 'c9']
print tabify(None, LINEFORMAT, self.LINELENGTH)
print tabify(["Device", "UUID", "EDS URI", "Registers"], LINEFORMAT, self.LINELENGTH)
print tabify(None, LINEFORMAT, self.LINELENGTH)
for i in sorted(em.keys()):
if i == 2:
print tabify(None, LINEFORMAT, self.LINELENGTH, filler = ['.', '|'])
eds = self._edsDb.getEdsByUri(em[i])
print tabify([i, binascii.hexlify(im[i]), em[i], len(eds.registersByIndex)], LINEFORMAT, self.LINELENGTH)
print tabify(None, LINEFORMAT, self.LINELENGTH)
print
finally:
self.channel.close()
@inlineCallbacks
def showDevice(self, modearg):
"""
Show information for specified device.
"""
try:
device = int(modearg)
uuid = yield self.getUuid(device)
edsUri = yield self.getEdsUri(device)
print
print "SRDP Device: Register Map"
print "========================="
print
print "Device Index : %d" % device
print "Device UUID : %s" % (binascii.hexlify(uuid))
print "Device EDS URI : %s" % (edsUri)
print
eds = self._edsDb.getEdsByUri(edsUri)
if eds is None:
raise Exception("EDS for device not in database")
if self._config['write']:
res = self.writeRegisters(device, eds, self._config['write'])
LINEFORMAT = ["r9", "l30", "l10", "l8", "l8", "l8", "l10", "l*"]
print tabify(None, LINEFORMAT, self.LINELENGTH)
print tabify(["Register", "Path", "Access", "Optional", "Count", "Type", "Component", "Description"], LINEFORMAT, self.LINELENGTH)
print tabify(None, LINEFORMAT, self.LINELENGTH)
sysRegsDone = False
for k in sorted(eds.registersByIndex.keys()):
if not sysRegsDone and k >= 1024:
print tabify(None, LINEFORMAT, self.LINELENGTH, filler = ['.', '|'])
sysRegsDone = True
reg = eds.registersByIndex[k]
if type(reg['type']) == list:
rtype = 'dict:'
else:
rtype = reg['type']
print tabify([reg['index'],
reg['path'],
reg['access'],
reg['optional'],
reg['count'],
rtype,
"",
reg['desc']],
LINEFORMAT,
self.LINELENGTH)
if rtype == 'dict:':
for att in reg['type']:
print tabify(["",
"",
"",
"",
"",
" " + att["type"],
att["field"],
att["desc"]],
LINEFORMAT,
self.LINELENGTH)
print tabify(None, LINEFORMAT, self.LINELENGTH)
print
finally:
self.channel.close()
def writeRegistersAsync(self, device, eds, items):
dl = []
for reg, value in items:
register, data = eds.serialize(reg, value)
self.channel.writeRegister(device, register['index'], data)
dl.append(self.channel.writeRegister(device, register['index'], data))
return DeferredList(dl)
#@inlineCallbacks
def writeRegisters(self, device, eds, items):
for reg, value in items:
register, data = eds.serialize(reg, value)
#res = yield self.writeRegister(device, register['index'], data)
self.channel.writeRegister(device, register['index'], data)
#print "*", res
@inlineCallbacks
def readDevice(self, modearg):
"""
Read all current values from device registers (that allow to "read").
"""
try:
device = int(modearg)
uuid = yield self.getUuid(device)
edsUri = yield self.getEdsUri(device)
print
print "SRDP Device: Register Values"
print "============================"
print
print "Device Index : %d" % device
print "Device UUID : %s" % (binascii.hexlify(uuid))
print "Device EDS URI : %s" % (edsUri)
print
eds = self._edsDb.getEdsByUri(edsUri)
if eds is None:
raise Exception("EDS for device not in database")
if self._config['write']:
res = self.writeRegisters(device, eds, self._config['write'])
LINEFORMAT = ["r9", "l30", "l*"]
print tabify(None, LINEFORMAT, self.LINELENGTH)
print tabify(["Register", "Path", "Current Value"], LINEFORMAT, self.LINELENGTH)
print tabify(None, LINEFORMAT, self.LINELENGTH)
sysRegsDone = False
for k in sorted(eds.registersByIndex.keys()):
if not sysRegsDone and k >= 1024:
print tabify(None, LINEFORMAT, self.LINELENGTH, filler = ['.', '|'])
sysRegsDone = True
reg = eds.registersByIndex[k]
if reg['access'] in ['read', 'readwrite']:
try:
data = yield self.channel.readRegister(device, reg['index'])
except Exception, e:
if reg['optional'] and e.args[0] == SrdpFrameHeader.SRDP_ERR_NO_SUCH_REGISTER:
print tabify([k, reg['path'], '- (not implemented)'], LINEFORMAT, self.LINELENGTH)
else:
print tabify([k, reg['path'], 'Error: %s.' % e.args[1]], LINEFORMAT, self.LINELENGTH)
else:
_, val = eds.unserialize(k, data)
print tabify([k, reg['path'], val], LINEFORMAT, self.LINELENGTH)
print tabify(None, LINEFORMAT, self.LINELENGTH)
print
finally:
self.channel.close()
@inlineCallbacks
def monitorDevice(self, modearg):
"""
"""
try:
device = int(modearg)
uuid = yield self.getUuid(device)
edsUri = yield self.getEdsUri(device)
print
print "SRDP Device: Monitor Registers"
print "=============================="
print
print "Device Index : %d" % device
print "Device UUID : %s" % (binascii.hexlify(uuid))
print "Device EDS URI : %s" % (edsUri)
print
eds = self._edsDb.getEdsByUri(edsUri)
if eds is None:
raise Exception("EDS for device not in database")
if self._config['write']:
res = self.writeRegisters(device, eds, self._config['write'])
LINEFORMAT = ["r9", "l30", "l*"]
self.LINES = 0
def _printHeader():
print tabify(None, LINEFORMAT, self.LINELENGTH)
print tabify(["Register", "Path", "Current Value"], LINEFORMAT, self.LINELENGTH)
print tabify(None, LINEFORMAT, self.LINELENGTH)
_printHeader()
def _onRegisterChange(device, register, position, data):
self.LINES += 1
if (self.LINES % 40) == 0:
_printHeader()
reg, val = eds.unserialize(register, data)
print tabify([reg['index'], reg['path'], val], LINEFORMAT, self.LINELENGTH)
self.onRegisterChange = _onRegisterChange
finally:
#self.channel.close()
print "Keeping channel open and listening for register change notificiations. Hit Ctrl-C/D/Z to stop."
| |
# NOTE - It is a known issue that the keyboard-related functions don't work on Ubuntu VMs in Virtualbox.
import pyautogui
import sys
import os
from pyautogui import LEFT, MIDDLE, RIGHT
from Xlib.display import Display
from Xlib import X
from Xlib.ext.xtest import fake_input
import Xlib.XK
BUTTON_NAME_MAPPING = {LEFT: 1, MIDDLE: 2, RIGHT: 3, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7}
if sys.platform in ('java', 'darwin', 'win32'):
raise Exception('The pyautogui_x11 module should only be loaded on a Unix system that supports X11.')
#from pyautogui import *
"""
Much of this code is based on information gleaned from Paul Barton's PyKeyboard in PyUserInput from 2013, itself derived from Akkana Peck's pykey in 2008 ( http://www.shallowsky.com/software/crikey/pykey-0.1 ), itself derived from her "Crikey" lib.
"""
def _position():
"""Returns the current xy coordinates of the mouse cursor as a two-integer
tuple.
Returns:
(x, y) tuple of the current xy coordinates of the mouse cursor.
"""
coord = _display.screen().root.query_pointer()._data
return coord["root_x"], coord["root_y"]
def _size():
return _display.screen().width_in_pixels, _display.screen().height_in_pixels
def _vscroll(clicks, x=None, y=None):
clicks = int(clicks)
if clicks == 0:
return
elif clicks > 0:
button = 4 # scroll up
else:
button = 5 # scroll down
for i in range(abs(clicks)):
_click(x, y, button=button)
def _hscroll(clicks, x=None, y=None):
clicks = int(clicks)
if clicks == 0:
return
elif clicks > 0:
button = 7 # scroll right
else:
button = 6 # scroll left
for i in range(abs(clicks)):
_click(x, y, button=button)
def _scroll(clicks, x=None, y=None):
return _vscroll(clicks, x, y)
def _click(x, y, button):
assert button in BUTTON_NAME_MAPPING.keys(), "button argument not in ('left', 'middle', 'right', 4, 5, 6, 7)"
button = BUTTON_NAME_MAPPING[button]
_mouseDown(x, y, button)
_mouseUp(x, y, button)
def _moveTo(x, y):
fake_input(_display, X.MotionNotify, x=x, y=y)
_display.sync()
def _mouseDown(x, y, button):
_moveTo(x, y)
assert button in BUTTON_NAME_MAPPING.keys(), "button argument not in ('left', 'middle', 'right', 4, 5, 6, 7)"
button = BUTTON_NAME_MAPPING[button]
fake_input(_display, X.ButtonPress, button)
_display.sync()
def _mouseUp(x, y, button):
_moveTo(x, y)
assert button in BUTTON_NAME_MAPPING.keys(), "button argument not in ('left', 'middle', 'right', 4, 5, 6, 7)"
button = BUTTON_NAME_MAPPING[button]
fake_input(_display, X.ButtonRelease, button)
_display.sync()
def _keyDown(key):
"""Performs a keyboard key press without the release. This will put that
key in a held down state.
NOTE: For some reason, this does not seem to cause key repeats like would
happen if a keyboard key was held down on a text field.
Args:
key (str): The key to be pressed down. The valid names are listed in
pyautogui.KEY_NAMES.
Returns:
None
"""
if key not in keyboardMapping or keyboardMapping[key] is None:
return
if type(key) == int:
fake_input(_display, X.KeyPress, key)
_display.sync()
return
needsShift = pyautogui.isShiftCharacter(key)
if needsShift:
fake_input(_display, X.KeyPress, keyboardMapping['shift'])
fake_input(_display, X.KeyPress, keyboardMapping[key])
if needsShift:
fake_input(_display, X.KeyRelease, keyboardMapping['shift'])
_display.sync()
def _keyUp(key):
"""Performs a keyboard key release (without the press down beforehand).
Args:
key (str): The key to be released up. The valid names are listed in
pyautogui.KEY_NAMES.
Returns:
None
"""
"""
Release a given character key. Also works with character keycodes as
integers, but not keysyms.
"""
if key not in keyboardMapping or keyboardMapping[key] is None:
return
if type(key) == int:
keycode = key
else:
keycode = keyboardMapping[key]
fake_input(_display, X.KeyRelease, keycode)
_display.sync()
# Taken from PyKeyboard's ctor function.
_display = Display(os.environ['DISPLAY'])
""" Information for keyboardMapping derived from PyKeyboard's special_key_assignment() function.
The *KB dictionaries in pyautogui map a string that can be passed to keyDown(),
keyUp(), or press() into the code used for the OS-specific keyboard function.
They should always be lowercase, and the same keys should be used across all OSes."""
keyboardMapping = dict([(key, None) for key in pyautogui.KEY_NAMES])
keyboardMapping.update({
'backspace': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('BackSpace')),
'\b': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('BackSpace')),
'tab': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Tab')),
'enter': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Return')),
'return': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Return')),
'shift': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Shift_L')),
'ctrl': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Control_L')),
'alt': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Alt_L')),
'pause': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Pause')),
'capslock': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Caps_Lock')),
'esc': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Escape')),
'escape': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Escape')),
'pgup': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Page_Up')),
'pgdn': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Page_Down')),
'pageup': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Page_Up')),
'pagedown': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Page_Down')),
'end': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('End')),
'home': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Home')),
'left': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Left')),
'up': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Up')),
'right': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Right')),
'down': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Down')),
'select': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Select')),
'print': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Print')),
'execute': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Execute')),
'prtsc': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Print')),
'prtscr': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Print')),
'prntscrn': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Print')),
'printscreen': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Print')),
'insert': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Insert')),
'del': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Delete')),
'delete': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Delete')),
'help': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Help')),
'win': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Super_L')),
'winleft': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Super_L')),
'winright': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Super_R')),
'apps': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Menu')),
'num0': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_0')),
'num1': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_1')),
'num2': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_2')),
'num3': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_3')),
'num4': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_4')),
'num5': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_5')),
'num6': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_6')),
'num7': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_7')),
'num8': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_8')),
'num9': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_9')),
'multiply': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_Multiply')),
'add': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_Add')),
'separator': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_Separator')),
'subtract': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_Subtract')),
'decimal': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_Decimal')),
'divide': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('KP_Divide')),
'f1': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F1')),
'f2': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F2')),
'f3': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F3')),
'f4': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F4')),
'f5': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F5')),
'f6': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F6')),
'f7': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F7')),
'f8': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F8')),
'f9': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F9')),
'f10': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F10')),
'f11': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F11')),
'f12': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F12')),
'f13': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F13')),
'f14': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F14')),
'f15': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F15')),
'f16': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F16')),
'f17': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F17')),
'f18': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F18')),
'f19': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F19')),
'f20': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F20')),
'f21': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F21')),
'f22': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F22')),
'f23': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F23')),
'f24': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('F24')),
'numlock': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Num_Lock')),
'scrolllock': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Scroll_Lock')),
'shiftleft': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Shift_L')),
'shiftright': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Shift_R')),
'ctrlleft': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Control_L')),
'ctrlright': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Control_R')),
'altleft': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Alt_L')),
'altright': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Alt_R')),
# These are added because unlike a-zA-Z0-9, the single characters do not have a
' ': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('space')),
'space': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('space')),
'\t': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Tab')),
'\n': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Return')), # for some reason this needs to be cr, not lf
'\r': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Return')),
'\e': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('Escape')),
'!': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('exclam')),
'#': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('numbersign')),
'%': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('percent')),
'$': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('dollar')),
'&': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('ampersand')),
'"': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('quotedbl')),
"'": _display.keysym_to_keycode(Xlib.XK.string_to_keysym('apostrophe')),
'(': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('parenleft')),
')': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('parenright')),
'*': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('asterisk')),
'=': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('equal')),
'+': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('plus')),
',': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('comma')),
'-': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('minus')),
'.': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('period')),
'/': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('slash')),
':': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('colon')),
';': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('semicolon')),
'<': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('less')),
'>': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('greater')),
'?': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('question')),
'@': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('at')),
'[': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('bracketleft')),
']': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('bracketright')),
'\\': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('backslash')),
'^': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('asciicircum')),
'_': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('underscore')),
'`': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('grave')),
'{': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('braceleft')),
'|': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('bar')),
'}': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('braceright')),
'~': _display.keysym_to_keycode(Xlib.XK.string_to_keysym('asciitilde')),
})
# Trading memory for time" populate winKB so we don't have to call VkKeyScanA each time.
for c in """abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890""":
keyboardMapping[c] = _display.keysym_to_keycode(Xlib.XK.string_to_keysym(c))
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_dns_nameserver
short_description: Manage LTM DNS nameservers on a BIG-IP
description:
- Manages LTM DNS nameservers on a BIG-IP. These nameservers form part of what is
known as DNS Express on a BIG-IP. This module does not configure GTM (DNS module) related
functionality, nor does it configure system-level name servers that affect the
ability of the base system to resolve DNS names.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the nameserver.
type: str
required: True
address:
description:
- Specifies the IP address on which the DNS nameserver (client) or back-end DNS
authoritative server (DNS Express server) listens for DNS messages.
- When creating a new nameserver, if this value is not specified, the default
is C(127.0.0.1).
type: str
service_port:
description:
- Specifies the service port on which the DNS nameserver (client) or back-end DNS
authoritative server (DNS Express server) listens for DNS messages.
- When creating a new nameserver, if this value is not specified, the default
is C(53).
type: str
route_domain:
description:
- Specifies the local route domain the DNS nameserver (client) or back-end
DNS authoritative server (DNS Express server) uses for outbound traffic.
- When creating a new nameserver, if this value is not specified, the default
is C(0).
type: str
tsig_key:
description:
- Specifies the TSIG key the system uses to communicate with this DNS nameserver
(client) or back-end DNS authoritative server (DNS Express server) for AXFR zone
transfers.
- If the nameserver is a client, then the system uses this TSIG key to verify the
request and sign the response.
- If this nameserver is a DNS Express server, then this TSIG key must match the
TSIG key for the zone on the back-end DNS authoritative server.
type: str
state:
description:
- When C(present), ensures the resource exists.
- When C(absent), ensures the resource is removed.
type: str
choices:
- present
- absent
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a nameserver
bigip_dns_nameserver:
name: foo
address: 10.10.10.10
service_port: 53
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
address:
description: Address which the nameserver listens for DNS messages.
returned: changed
type: str
sample: 127.0.0.1
service_port:
description: Service port on which the nameserver listens for DNS messages.
returned: changed
type: int
sample: 53
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, fq_name
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'routeDomain': 'route_domain',
'port': 'service_port',
'tsigKey': 'tsig_key'
}
api_attributes = [
'address',
'routeDomain',
'port',
'tsigKey'
]
returnables = [
'address',
'service_port',
'route_domain',
'tsig_key',
]
updatables = [
'address',
'service_port',
'route_domain',
'tsig_key',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def tsig_key(self):
if self._values['tsig_key'] in [None, '']:
return self._values['tsig_key']
return fq_name(self.partition, self._values['tsig_key'])
@property
def route_domain(self):
if self._values['route_domain'] is None:
return None
return fq_name(self.partition, self._values['route_domain'])
@property
def service_port(self):
if self._values['service_port'] is None:
return None
try:
return int(self._values['service_port'])
except ValueError:
# Reserving the right to add well-known ports
raise F5ModuleError(
"The 'service_port' must be in numeric form."
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def tsig_key(self):
if self.want.tsig_key is None:
return None
if self.have.tsig_key is None and self.want.tsig_key == '':
return None
if self.want.tsig_key != self.have.tsig_key:
return self.want.tsig_key
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
errors = [401, 403, 409, 500, 501, 502, 503, 504]
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.want.address is None:
self.want.update({'address': '127.0.0.1'})
if self.want.service_port is None:
self.want.update({'service_port': '53'})
if self.want.route_domain is None:
self.want.update({'route_domain': '/Common/0'})
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/dns/nameserver/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
address=dict(),
service_port=dict(),
route_domain=dict(),
tsig_key=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| |
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom optparse OptionParser and functions for reading Python settings files.
Default values for trunk/scripts flags can be specified in valid Python syntax
in the ~/.soc_scripts_settings file. For example, a default value for the
--user flag can be specified with a variable assignment in the settings file
like:
user = 'joeuser'
Defaults in the ~/.soc_scripts_settings file can be explicitly overridden by
supplied the actual flag. For example, supplying:
--user=someotheruser
would override the default value present in the settings file.
Option: class derived from optparse.Option that adds a 'required' parameter
OptionParser: class derived from optparse.OptionParser for use with Option
readPythonSettings(): interprets a valid Python file as a settings file
"""
__authors__ = [
# alphabetical order by last name, please
'"Todd Larsen" <tlarsen@google.com>',
]
import os
import optparse
import sys
DEF_SETTINGS_FILE_DIR = "~"
DEF_SETTINGS_FILE_NAME = '.soc_scripts_settings'
class Error(Exception):
"""Base exception class for all exceptions in the settings module."""
pass
class Option(optparse.Option):
"""Class derived from optparse.Option that adds a 'required' parameter."""
ATTRS = optparse.Option.ATTRS + ['required']
def _check_required(self):
"""Insures that 'required' option can accept a value."""
if self.required and (not self.takes_value()):
raise optparse.OptionError(
"'required' parameter set for option that does not take a value",
self)
# Make sure _check_required() is called from the constructor!
CHECK_METHODS = optparse.Option.CHECK_METHODS + [_check_required]
def process(self, opt, value, values, parser):
optparse.Option.process(self, opt, value, values, parser)
parser.option_seen[self] = 1
class OptionParser(optparse.OptionParser):
"""Class derived from optparse.OptionParser for use with Option."""
def _init_parsing_state(self):
"""Sets up dict to track options seen so far."""
optparse.OptionParser._init_parsing_state(self)
self.option_seen = {}
def error(self, *args):
"""Convert errors reported by optparse.OptionParser to Error exceptions.
Args:
*args: passed through to the Error exception __init__() constructor,
usually a list of strings
Raises:
Error with the supplied *args
"""
raise Error(*args)
def check_values(self, values, args):
"""Checks to make sure all required=True options were supplied.
Args:
values, args: passed through unchanged (see Returns:)
Returns:
(values, args) unchanged.
Raises:
Error if an option was not supplied that had required=True; exception
positional arguments are the error message strings.
"""
errors = []
for option in self.option_list:
if (isinstance(option, Option)
and option.required
and (not self.option_seen.has_key(option))):
errors.append(
'required %s option not supplied'
' (and default settings not allowed)' % option)
if errors:
self.error(*errors)
return values, args
def printErrors(errors, exit_code=1):
"""Prints error message strings to sys.stderr and returns an exit code.
Args:
errors: error message string or list of error message strings to be printed
to sys.stderr
exit_code: exit code to return (so that this function can be used as an
expression in sys.exit() for example); default is 1
Returns:
exit_code
"""
sys.stderr.write('\nERRORS:\n')
if (not isinstance(errors, tuple)) and (not isinstance(errors, list)):
errors = [errors]
for msg in errors:
sys.stderr.write(' %s\n' % msg)
sys.stderr.write('\n')
return exit_code
def printErrorsAndUsage(errors, parser, exit_code=1):
"""Prints error messages and usage text to sys.stderr and returns exit code.
Args:
errors: error message string or list of error message strings to be printed
to sys.stderr
parser: OptionParser with a print_help() method
exit_code: exit code to return (so that this function can be used as an
expression in sys.exit() for example); default is 1
Returns:
exit_code
"""
exit_code = printErrors(errors, exit_code=exit_code)
parser.print_help(file=sys.stderr)
return exit_code
def getExpandedPath(path):
"""Returns an expanded, normalized, absolute path.
Args:
path: path (possibly relative, possibly containing environment variables,
etc.) to be expanded, normalized and made absolute
Returns:
absolute path, after expanding any environment variables and "~", then
removing excess . and .. path elements
"""
return os.path.abspath(
os.path.normpath(
os.path.expanduser(
os.path.expandvars(path))))
def readPythonSettings(defaults={}, # {} OK since defaults is always copied
settings_dir=DEF_SETTINGS_FILE_DIR,
settings_file=DEF_SETTINGS_FILE_NAME):
"""Executes a Python-syntax settings file and returns the local variables.
Args:
defaults: dict of default values to use when settings are not present
in the settings file (or if no settings file is present at all); this
dict is *copied* and is not altered at all
settings_dir: optional directory containing settings_file
settings_file: optional settings file name found in settings_dir
Returns:
dict of setting name/value pairs (possibly including some values from the
defaults parameter). Since the settings file is full-fledged Python
source, the values could be any valid Python object.
Raises:
Error if some error occurred parsing the present settings file; exception
positional arguments are the error message strings.
"""
# do not let the original defaults be altered
defaults = defaults.copy()
# form absolute path to the settings file, expanding any environment
# variables and "~", then removing excess . and .. path elements
path = getExpandedPath(os.path.join(settings_dir, settings_file))
# empty dict to capture the local variables in the settings file
settings_locals = {}
try:
# execute the Python source file and recover the local variables as settings
execfile(path, {}, settings_locals)
except IOError:
# If the settings file is not present, there are no defaults.
pass
except Exception, error:
# Other exceptions usually mean a faulty settings file.
raise Error(
'faulty settings file:',
(' %s: %s' % (error.__class__.__name__, str(error))),
(' %s' % path))
# overwrite defaults copy with values from the (possibly empty) settings file
defaults.update(settings_locals)
return defaults
def readPythonSettingsOrDie(parser=None, **kwargs):
"""Calls readPythonSettings(), calling sys.exit() on any errors.
Args:
parser: if supplied, an OptionParser instance used to call print_help()
to print usage information if errors occur
**kwargs: see readPythonSettings()
Returns:
On success, returns results of readPythonSettings().
Exits:
On any error from readPythonSettings(), prints error messages to stderr,
possibly prints usage information, and calls sys.exit(1).
"""
try:
return readPythonSettings(**kwargs)
except Error, error:
if parser:
sys.exit(printErrorsAndUsage(error.args, parser))
else:
sys.exit(printErrors(error.args))
def makeOptionParserOrDie(*args, **kwargs):
"""Creates and returns an OptionParser, calling sys.exit() on any errors.
Args:
*args, **kwargs: supplied directly to OptionParser constructor
Returns:
On success, returns an OptionParser instance.
Exits:
On any error, prints error messages to stderr and calls sys.exit(1).
"""
try:
return OptionParser(*args, **kwargs)
except Error, error:
sys.exit(printErrors(error.args))
def parseOptionsOrDie(parser, args):
"""Parses command-line options, calling sys.exit() on any errors.
Args:
parser: an OptionParser instance
args: list of command-line arguments to supply to parser
Returns:
On success, returns (options, args) returned by parser.parse_args(args).
Exits:
On any error, prints error messages and usage information to stderr and
calls sys.exit(1).
"""
try:
return parser.parse_args(args)
except Error, error:
sys.exit(printErrorsAndUsage(error.args, parser))
def checkCommonSvnOptions(options):
"""Checks a common subset of command-line options.
Multiple scripts accept a subset of common command-line options. This
function does some sanity checks on these flags. These checks are collected
here because they were being duplicated in multiple scripts.
Args:
options: OptionParser.parse_args() options instance to check
Returns:
list of error message strings, or an empty list if no errors
"""
errors = []
if not options.repo:
errors.extend(
['--repo must be supplied or have a settings file default'])
if not options.wc:
errors.extend(
['--wc must be supplied or have a settings file default'])
if not options.branch:
if not options.user:
errors.extend(
['at least one of --branch or --user must be supplied'])
return errors
def checkCommonSvnOptionsOrDie(options, parser):
"""Checks subset of command-line options, calling sys.exit() on any errors.
Args:
options: see checkCommonSvnOptions()
parser: an OptionParser instance used to call print_help() to print
usage information if errors occur
Exits:
On any error messages returned by checkCommonSvnOptions(), prints error
messages and usage information to stderr and calls sys.exit(1).
"""
errors = checkCommonSvnOptions(options)
if errors:
sys.exit(printErrorsAndUsage(errors, parser))
| |
import mock
import pytest
from rest_framework import exceptions
from django.utils import timezone
from api.base.settings.defaults import API_BASE
from api_tests import utils as test_utils
from framework.auth.core import Auth
from osf.models import PreprintService, NodeLicense
from osf.utils.workflows import DefaultStates
from osf_tests.factories import (
PreprintFactory,
AuthUserFactory,
ProjectFactory,
SubjectFactory,
PreprintProviderFactory,
)
from website.settings import DOI_FORMAT
def build_preprint_update_payload(
node_id, attributes=None, relationships=None,
jsonapi_type='preprints'):
payload = {
'data': {
'id': node_id,
'type': jsonapi_type,
'attributes': attributes,
'relationships': relationships
}
}
return payload
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestPreprintDetail:
@pytest.fixture()
def preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def preprint_pre_mod(self, user):
return PreprintFactory(provider__reviews_workflow='pre-moderation', is_published=False, creator=user)
@pytest.fixture()
def unpublished_preprint(self, user):
return PreprintFactory(creator=user, is_published=False)
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/'.format(API_BASE, preprint._id)
@pytest.fixture()
def unpublished_url(self, unpublished_preprint):
return '/{}preprints/{}/'.format(API_BASE, unpublished_preprint._id)
@pytest.fixture()
def res(self, app, url):
return app.get(url)
@pytest.fixture()
def data(self, res):
return res.json['data']
def test_preprint_detail(self, app, user, preprint, url, res, data):
# test_preprint_detail_success
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_preprint_top_level
assert data['type'] == 'preprints'
assert data['id'] == preprint._id
# test title in preprint data
assert data['attributes']['title'] == preprint.node.title
# test contributors in preprint data
assert data['relationships'].get('contributors', None)
assert data['relationships']['contributors'].get('data', None) is None
# test node type and id in preprint data
assert data['relationships']['node']['data'].get(
'id', None) == preprint.node._id
assert data['relationships']['node']['data'].get(
'type', None) == 'nodes'
# test_preprint_node_deleted_detail_failure
deleted_node = ProjectFactory(creator=user, is_deleted=True)
deleted_preprint = PreprintFactory(project=deleted_node, creator=user)
deleted_preprint_url = '/{}preprints/{}/'.format(
API_BASE, deleted_preprint._id)
deleted_preprint_res = app.get(
deleted_preprint_url, expect_errors=True)
assert deleted_preprint_res.status_code == 404
assert res.content_type == 'application/vnd.api+json'
def test_withdrawn_preprint(self, app, user, preprint_pre_mod):
# test_retracted_fields
url = '/{}preprints/{}/'.format(API_BASE, preprint_pre_mod._id)
res = app.get(url, auth=user.auth)
data = res.json['data']
assert not data['attributes']['date_withdrawn']
assert 'withdrawal_justification' not in data['attributes']
assert 'ever_public' not in data['attributes']
## retracted and not ever_public
assert not preprint_pre_mod.ever_public
preprint_pre_mod.date_withdrawn = timezone.now()
preprint_pre_mod.withdrawal_justification = 'assumptions no longer apply'
preprint_pre_mod.save()
assert preprint_pre_mod.is_retracted
res = app.get(url, expect_errors=True)
assert res.status_code == 404
## retracted and ever_public (True)
preprint_pre_mod.ever_public = True
preprint_pre_mod.save()
res = app.get(url, auth=user.auth)
data = res.json['data']
assert data['attributes']['date_withdrawn']
assert 'withdrawal_justification' in data['attributes']
assert 'assumptions no longer apply' == data['attributes']['withdrawal_justification']
assert 'date_withdrawn' in data['attributes']
def test_embed_contributors(self, app, user, preprint):
url = '/{}preprints/{}/?embed=contributors'.format(
API_BASE, preprint._id)
res = app.get(url, auth=user.auth)
embeds = res.json['data']['embeds']
ids = preprint.node.contributors.all().values_list('guids___id', flat=True)
ids = ['{}-{}'.format(preprint.node._id, id_) for id_ in ids]
for contrib in embeds['contributors']['data']:
assert contrib['id'] in ids
def test_preprint_doi_link_absent_in_unpublished_preprints(
self, app, user, unpublished_preprint, unpublished_url):
res = app.get(unpublished_url, auth=user.auth)
assert res.json['data']['id'] == unpublished_preprint._id
assert res.json['data']['attributes']['is_published'] is False
assert 'preprint_doi' not in res.json['data']['links'].keys()
assert res.json['data']['attributes']['preprint_doi_created'] is None
def test_published_preprint_doi_link_not_returned_before_doi_request(
self, app, user, unpublished_preprint, unpublished_url):
unpublished_preprint.is_published = True
unpublished_preprint.save()
res = app.get(unpublished_url, auth=user.auth)
assert res.json['data']['id'] == unpublished_preprint._id
assert res.json['data']['attributes']['is_published'] is True
assert 'preprint_doi' not in res.json['data']['links'].keys()
def test_published_preprint_doi_link_returned_after_doi_request(
self, app, user, preprint, url):
expected_doi = DOI_FORMAT.format(
prefix=preprint.provider.doi_prefix,
guid=preprint._id
)
preprint.set_identifier_values(doi=expected_doi)
res = app.get(url, auth=user.auth)
assert res.json['data']['id'] == preprint._id
assert res.json['data']['attributes']['is_published'] is True
assert 'preprint_doi' in res.json['data']['links'].keys()
assert res.json['data']['links']['preprint_doi'] == 'https://dx.doi.org/{}'.format(
expected_doi)
assert res.json['data']['attributes']['preprint_doi_created']
def test_preprint_embed_identifiers(self, app, user, preprint, url):
embed_url = url + '?embed=identifiers'
res = app.get(embed_url)
assert res.status_code == 200
link = res.json['data']['relationships']['identifiers']['links']['related']['href']
assert '{}identifiers/'.format(url) in link
@pytest.mark.django_db
class TestPreprintDelete:
@pytest.fixture()
def unpublished_preprint(self, user):
return PreprintFactory(creator=user, is_published=False)
@pytest.fixture()
def published_preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def url(self, user):
return '/{}preprints/{{}}/'.format(API_BASE)
def test_can_delete_unpublished(
self, app, user, url, unpublished_preprint):
previous_ids = list(
PreprintService.objects.all().values_list(
'pk', flat=True))
app.delete(url.format(unpublished_preprint._id), auth=user.auth)
remaining_ids = list(
PreprintService.objects.all().values_list(
'pk', flat=True))
assert unpublished_preprint.pk in previous_ids
assert unpublished_preprint.pk not in remaining_ids
def test_cannot_delete_published(self, app, user, published_preprint, url):
previous_ids = list(
PreprintService.objects.all().values_list(
'pk', flat=True)
)
res = app.delete(
url.format(
published_preprint._id),
auth=user.auth,
expect_errors=True)
remaining_ids = list(
PreprintService.objects.all().values_list(
'pk', flat=True))
assert res.status_code == 409
assert previous_ids == remaining_ids
assert published_preprint.pk in remaining_ids
def test_deletes_only_requested_document(
self, app, user, published_preprint,
unpublished_preprint, url):
previous_ids = list(
PreprintService.objects.all().values_list(
'pk', flat=True))
app.delete(url.format(unpublished_preprint._id), auth=user.auth)
remaining_ids = list(
PreprintService.objects.all().values_list(
'pk', flat=True))
assert unpublished_preprint.pk in previous_ids
assert published_preprint.pk in previous_ids
assert unpublished_preprint.pk not in remaining_ids
assert published_preprint.pk in remaining_ids
@pytest.mark.django_db
class TestPreprintUpdate:
@pytest.fixture()
def preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/'.format(API_BASE, preprint._id)
@pytest.fixture()
def subject(self):
return SubjectFactory()
def test_update_preprint_permission_denied(self, app, preprint, url):
update_doi_payload = build_preprint_update_payload(
preprint._id, attributes={'article_doi': '10.123/456/789'})
noncontrib = AuthUserFactory()
res = app.patch_json_api(
url,
update_doi_payload,
auth=noncontrib.auth,
expect_errors=True)
assert res.status_code == 403
res = app.patch_json_api(url, update_doi_payload, expect_errors=True)
assert res.status_code == 401
def test_update_subjects(self, app, user, preprint, subject, url):
assert not preprint.subjects.filter(_id=subject._id).exists()
update_subjects_payload = build_preprint_update_payload(
preprint._id, attributes={'subjects': [[subject._id]]})
res = app.patch_json_api(url, update_subjects_payload, auth=user.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.subjects.filter(_id=subject._id).exists()
def test_update_invalid_subjects(self, app, user, preprint, url):
subjects = preprint.subjects
update_subjects_payload = build_preprint_update_payload(
preprint._id, attributes={'subjects': [['wwe']]})
res = app.patch_json_api(
url, update_subjects_payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
preprint.reload()
assert preprint.subjects == subjects
def test_update_primary_file(self, app, user, preprint, url):
new_file = test_utils.create_test_file(
preprint.node, user, filename='shook_that_mans_hand.pdf')
relationships = {
'primary_file': {
'data': {
'type': 'file',
'id': new_file._id
}
}
}
assert preprint.primary_file != new_file
update_file_payload = build_preprint_update_payload(
preprint._id, relationships=relationships)
res = app.patch_json_api(url, update_file_payload, auth=user.auth)
assert res.status_code == 200
preprint.node.reload()
assert preprint.primary_file == new_file
log = preprint.node.logs.latest()
assert log.action == 'preprint_file_updated'
assert log.params.get('preprint') == preprint._id
def test_update_preprints_with_none_type(self, app, user, preprint, url):
payload = {
'data': {
'id': preprint._id,
'type': None,
'attributes': None,
'relationship': None
}
}
res = app.patch_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
def test_update_preprints_with_no_type(self, app, user, preprint, url):
payload = {
'data': {
'id': preprint._id,
'attributes': None,
'relationship': None
}
}
res = app.patch_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
def test_update_preprints_with_wrong_type(self, app, user, preprint, url):
update_file_payload = build_preprint_update_payload(preprint._id, jsonapi_type='Nonsense')
res = app.patch_json_api(url, update_file_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 409
def test_new_primary_not_in_node(self, app, user, preprint, url):
project = ProjectFactory()
file_for_project = test_utils.create_test_file(
project, user, filename='six_pack_novak.pdf')
relationships = {
'primary_file': {
'data': {
'type': 'file',
'id': file_for_project._id
}
}
}
update_file_payload = build_preprint_update_payload(
preprint._id, relationships=relationships)
res = app.patch_json_api(
url, update_file_payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
preprint.reload()
assert preprint.primary_file != file_for_project
def test_update_article_doi(self, app, user, preprint, url):
new_doi = '10.1234/ASDFASDF'
assert preprint.article_doi != new_doi
update_subjects_payload = build_preprint_update_payload(
preprint._id, attributes={'doi': new_doi})
res = app.patch_json_api(url, update_subjects_payload, auth=user.auth)
assert res.status_code == 200
preprint.node.reload()
assert preprint.article_doi == new_doi
preprint_detail = app.get(url, auth=user.auth).json['data']
assert preprint_detail['links']['doi'] == 'https://dx.doi.org/{}'.format(
new_doi)
@mock.patch('website.preprints.tasks.update_or_enqueue_on_preprint_updated')
def test_update_description_and_title(
self, mock_preprint_updated, app, user, preprint, url):
new_title = 'Brother Nero'
new_description = "I knew you'd come!"
assert preprint.node.description != new_description
assert preprint.node.title != new_title
update_title_description_payload = build_preprint_update_payload(
preprint._id,
attributes={
'title': new_title,
'description': new_description
}
)
res = app.patch_json_api(
url,
update_title_description_payload,
auth=user.auth)
assert res.status_code == 200
preprint.node.reload()
assert preprint.node.description == new_description
assert preprint.node.title == new_title
assert mock_preprint_updated.called
@mock.patch('website.preprints.tasks.update_or_enqueue_on_preprint_updated')
def test_update_tags(self, mock_update_doi_metadata, app, user, preprint, url):
new_tags = ['hey', 'sup']
for tag in new_tags:
assert tag not in preprint.node.tags.all().values_list('name', flat=True)
update_tags_payload = build_preprint_update_payload(
preprint._id,
attributes={
'tags': new_tags
}
)
res = app.patch_json_api(url, update_tags_payload, auth=user.auth)
assert res.status_code == 200
preprint.node.reload()
assert sorted(
list(
preprint.node.tags.all().values_list(
'name',
flat=True))
) == new_tags
assert mock_update_doi_metadata.called
@mock.patch('website.preprints.tasks.update_or_enqueue_on_preprint_updated')
def test_update_contributors(
self, mock_update_doi_metadata, app, user, preprint, url):
new_user = AuthUserFactory()
contributor_payload = {
'data': {
'attributes': {
'bibliographic': True,
'permission': 'write',
'send_email': False
},
'type': 'contributors',
'relationships': {
'users': {
'data': {
'id': new_user._id,
'type': 'users'
}
}
}
}
}
contributor_url = url + 'contributors/'
res = app.post_json_api(
contributor_url,
contributor_payload,
auth=user.auth)
assert res.status_code == 201
assert new_user in preprint.node.contributors
assert mock_update_doi_metadata.called
def test_cannot_set_primary_file(self, app, user, preprint, url):
# test_write_contrib_cannot_set_primary_file
read_write_contrib = AuthUserFactory()
preprint.node.add_contributor(
read_write_contrib,
permissions=['read', 'write'],
auth=Auth(user), save=True)
new_file = test_utils.create_test_file(
preprint.node, user, filename='lovechild_reason.pdf')
data = {
'data': {
'type': 'primary_file',
'id': preprint._id,
'attributes': {},
'relationships': {
'primary_file': {
'data': {
'type': 'file',
'id': new_file._id
}
}
}
}
}
res = app.patch_json_api(
url, data,
auth=read_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_noncontrib_cannot_set_primary_file
non_contrib = AuthUserFactory()
new_file = test_utils.create_test_file(
preprint.node, user, filename='flowerchild_nik.pdf')
data = {
'data': {
'type': 'primary_file',
'id': preprint._id,
'attributes': {},
'relationships': {
'primary_file': {
'data': {
'type': 'file',
'id': new_file._id
}
}
}
}
}
res = app.patch_json_api(
url, data,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
def test_contribs_cannot_set_subjects(
self, app, user, preprint, subject, url):
# def test_write_contrib_cannot_set_subjects(self, app, user, preprint,
# subject, url):
write_contrib = AuthUserFactory()
preprint.node.add_contributor(
write_contrib,
permissions=['read', 'write'],
auth=Auth(user), save=True)
assert not preprint.subjects.filter(_id=subject._id).exists()
update_subjects_payload = build_preprint_update_payload(
preprint._id, attributes={'subjects': [[subject._id]]})
res = app.patch_json_api(
url, update_subjects_payload,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert not preprint.subjects.filter(_id=subject._id).exists()
# def test_non_contrib_cannot_set_subjects(self, app, user, preprint,
# subject, url):
non_contrib = AuthUserFactory()
assert not preprint.subjects.filter(_id=subject._id).exists()
update_subjects_payload = build_preprint_update_payload(
preprint._id, attributes={'subjects': [[subject._id]]})
res = app.patch_json_api(
url, update_subjects_payload,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert not preprint.subjects.filter(_id=subject._id).exists()
def test_update_published(self, app, user):
unpublished = PreprintFactory(creator=user, is_published=False)
url = '/{}preprints/{}/'.format(API_BASE, unpublished._id)
payload = build_preprint_update_payload(
unpublished._id, attributes={'is_published': True})
app.patch_json_api(url, payload, auth=user.auth)
unpublished.reload()
assert unpublished.is_published
def test_update_published_makes_node_public(
self, app, user):
unpublished = PreprintFactory(creator=user, is_published=False)
assert not unpublished.node.is_public
url = '/{}preprints/{}/'.format(API_BASE, unpublished._id)
payload = build_preprint_update_payload(
unpublished._id, attributes={'is_published': True})
app.patch_json_api(url, payload, auth=user.auth)
unpublished.node.reload()
assert unpublished.node.is_public
@mock.patch('website.preprints.tasks.update_or_enqueue_on_preprint_updated')
def test_update_preprint_task_called_on_api_update(
self, mock_on_preprint_updated, app, user, preprint, url):
update_doi_payload = build_preprint_update_payload(
preprint._id, attributes={'doi': '10.1234/ASDFASDF'})
app.patch_json_api(url, update_doi_payload, auth=user.auth)
preprint.node.reload()
assert mock_on_preprint_updated.called
@pytest.mark.django_db
class TestPreprintUpdateLicense:
@pytest.fixture()
def admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def cc0_license(self):
return NodeLicense.objects.filter(name='CC0 1.0 Universal').first()
@pytest.fixture()
def mit_license(self):
return NodeLicense.objects.filter(name='MIT License').first()
@pytest.fixture()
def no_license(self):
return NodeLicense.objects.filter(name='No license').first()
@pytest.fixture()
def preprint_provider(self, cc0_license, no_license):
preprint_provider = PreprintProviderFactory()
preprint_provider.licenses_acceptable = [cc0_license, no_license]
preprint_provider.save()
return preprint_provider
@pytest.fixture()
def preprint(
self, admin_contrib, write_contrib, read_contrib,
preprint_provider):
preprint = PreprintFactory(
creator=admin_contrib,
provider=preprint_provider)
preprint.node.add_contributor(write_contrib, auth=Auth(admin_contrib))
preprint.node.add_contributor(
read_contrib,
auth=Auth(admin_contrib),
permissions=['read'])
preprint.node.save()
return preprint
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/'.format(API_BASE, preprint._id)
@pytest.fixture()
def make_payload(self):
def payload(
node_id, license_id=None, license_year=None,
copyright_holders=None, jsonapi_type='preprints'
):
attributes = {}
if license_year and copyright_holders:
attributes = {
'license_record': {
'year': license_year,
'copyright_holders': copyright_holders
}
}
elif license_year:
attributes = {
'license_record': {
'year': license_year
}
}
elif copyright_holders:
attributes = {
'license_record': {
'copyright_holders': copyright_holders
}
}
return {
'data': {
'id': node_id,
'type': jsonapi_type,
'attributes': attributes,
'relationships': {
'license': {
'data': {
'type': 'licenses',
'id': license_id
}
}
}
}
} if license_id else {
'data': {
'id': node_id,
'type': jsonapi_type,
'attributes': attributes
}
}
return payload
@pytest.fixture()
def make_request(self, app):
def request(url, data, auth=None, expect_errors=False):
return app.patch_json_api(
url, data, auth=auth, expect_errors=expect_errors)
return request
def test_admin_update_license_with_invalid_id(
self, admin_contrib, preprint, url, make_payload, make_request):
data = make_payload(
node_id=preprint._id,
license_id='thisisafakelicenseid'
)
assert preprint.license is None
res = make_request(
url, data,
auth=admin_contrib.auth,
expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified license.'
preprint.reload()
assert preprint.license is None
def test_admin_can_update_license(
self, admin_contrib, preprint, cc0_license,
url, make_payload, make_request):
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
assert preprint.license is None
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.reload()
res_data = res.json['data']
pp_license_id = preprint.license.node_license._id
assert res_data['relationships']['license']['data'].get(
'id', None) == pp_license_id
assert res_data['relationships']['license']['data'].get(
'type', None) == 'licenses'
assert preprint.license.node_license == cc0_license
assert preprint.license.year is None
assert preprint.license.copyright_holders == []
# check logs
log = preprint.node.logs.latest()
assert log.action == 'preprint_license_updated'
assert log.params.get('preprint') == preprint._id
def test_admin_can_update_license_record(
self, admin_contrib, preprint, no_license,
url, make_payload, make_request):
data = make_payload(
node_id=preprint._id,
license_id=no_license._id,
license_year='2015',
copyright_holders=['Tonya Shepoly, Lucas Pucas']
)
assert preprint.license is None
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2015'
assert preprint.license.copyright_holders == [
'Tonya Shepoly, Lucas Pucas']
def test_cannot_update_license(
self, write_contrib, read_contrib, non_contrib,
preprint, cc0_license, url, make_payload, make_request):
# test_write_contrib_cannot_update_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(
url, data,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'User must be an admin to update a preprint.'
# test_read_contrib_cannot_update_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(
url, data,
auth=read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_non_contrib_cannot_update_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(
url, data,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_unauthenticated_user_cannot_update_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(url, data, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
def test_update_error(
self, admin_contrib, preprint, preprint_provider,
mit_license, no_license, url, make_payload, make_request):
# test_update_preprint_with_invalid_license_for_provider
data = make_payload(
node_id=preprint._id,
license_id=mit_license._id
)
assert preprint.license is None
res = make_request(
url, data,
auth=admin_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'Invalid license chosen for {}'.format(
preprint_provider.name)
# test_update_preprint_license_without_required_year_in_payload
data = make_payload(
node_id=preprint._id,
license_id=no_license._id,
copyright_holders=['Rachel', 'Rheisen']
)
res = make_request(
url, data,
auth=admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'year must be specified for this license'
# test_update_preprint_license_without_required_copyright_holders_in_payload
data = make_payload(
node_id=preprint._id,
license_id=no_license._id,
license_year='1994'
)
res = make_request(
url, data,
auth=admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'copyrightHolders must be specified for this license'
def test_update_preprint_with_existing_license_year_attribute_only(
self, admin_contrib, preprint, no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2014',
'copyrightHolders': ['Daniel FromBrazil', 'Queen Jaedyn']
},
Auth(admin_contrib),
)
preprint.save()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == [
'Daniel FromBrazil', 'Queen Jaedyn']
data = make_payload(
node_id=preprint._id,
license_year='2015'
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.license.reload()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2015'
assert preprint.license.copyright_holders == [
'Daniel FromBrazil', 'Queen Jaedyn']
def test_update_preprint_with_existing_license_copyright_holders_attribute_only(
self, admin_contrib, preprint, no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2014',
'copyrightHolders': ['Captain Haley', 'Keegor Cannoli']
},
Auth(admin_contrib),
)
preprint.save()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == [
'Captain Haley', 'Keegor Cannoli']
data = make_payload(
node_id=preprint._id,
copyright_holders=['Reason Danish', 'Ben the NJB']
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.license.reload()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == [
'Reason Danish', 'Ben the NJB']
def test_update_preprint_with_existing_license_relationship_only(
self, admin_contrib, preprint, cc0_license,
no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. Lulu']
},
Auth(admin_contrib),
)
preprint.save()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == ['Reason', 'Mr. Lulu']
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.license.reload()
assert preprint.license.node_license == cc0_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == ['Reason', 'Mr. Lulu']
def test_update_preprint_with_existing_license_relationship_and_attributes(
self, admin_contrib, preprint, cc0_license,
no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. Cosgrove']
},
Auth(admin_contrib),
save=True
)
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == ['Reason', 'Mr. Cosgrove']
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id,
license_year='2015',
copyright_holders=['Rheisen', 'Princess Tyler']
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.license.reload()
assert preprint.license.node_license == cc0_license
assert preprint.license.year == '2015'
assert preprint.license.copyright_holders == [
'Rheisen', 'Princess Tyler']
def test_update_preprint_license_does_not_change_project_license(
self, admin_contrib, preprint, cc0_license,
no_license, url, make_payload, make_request):
preprint.node.set_node_license(
{
'id': no_license.license_id,
'year': '2015',
'copyrightHolders': ['Simba', 'Mufasa']
},
auth=Auth(admin_contrib)
)
preprint.node.save()
assert preprint.node.node_license.node_license == no_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.license.node_license == cc0_license
assert preprint.node.node_license.node_license == no_license
def test_update_preprint_license_without_change_does_not_add_log(
self, admin_contrib, preprint, no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2015',
'copyrightHolders': ['Kim', 'Kanye']
},
auth=Auth(admin_contrib),
save=True
)
before_num_logs = preprint.node.logs.count()
before_update_log = preprint.node.logs.latest()
data = make_payload(
node_id=preprint._id,
license_id=no_license._id,
license_year='2015',
copyright_holders=['Kanye', 'Kim']
)
res = make_request(url, data, auth=admin_contrib.auth)
preprint.node.reload()
after_num_logs = preprint.node.logs.count()
after_update_log = preprint.node.logs.latest()
assert res.status_code == 200
assert before_num_logs == after_num_logs
assert before_update_log._id == after_update_log._id
@pytest.mark.django_db
class TestPreprintDetailPermissions:
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def public_project(self, admin, write_contrib):
public_project = ProjectFactory(creator=admin, is_public=True)
public_project.add_contributor(
write_contrib, permissions=['read', 'write'], save=True)
return public_project
@pytest.fixture()
def private_project(self, admin, write_contrib):
public_project = ProjectFactory(creator=admin, is_public=False)
public_project.add_contributor(
write_contrib, permissions=['read', 'write'], save=True)
return public_project
@pytest.fixture()
def subject(self):
return SubjectFactory()
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def file_one_public_project(self, admin, public_project):
return test_utils.create_test_file(
public_project, admin, 'toe_socks_and_sunrises.pdf')
@pytest.fixture()
def unpublished_preprint(self, admin, provider, subject, public_project):
return PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=provider,
subjects=[[subject._id]],
project=public_project,
is_published=False,
machine_state='initial')
@pytest.fixture()
def private_preprint(self, admin, provider, subject, private_project):
return PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=provider,
subjects=[[subject._id]],
project=private_project,
is_published=False,
machine_state='accepted')
@pytest.fixture()
def abandoned_private_preprint(
self, admin, provider, subject, private_project):
return PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=provider,
subjects=[[subject._id]],
project=private_project,
is_published=False,
machine_state='initial')
@pytest.fixture()
def abandoned_public_preprint(
self, admin, provider, subject, public_project):
return PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=provider,
subjects=[[subject._id]],
project=public_project,
is_published=False,
machine_state='initial')
@pytest.fixture()
def abandoned_private_url(self, abandoned_private_preprint):
return '/{}preprints/{}/'.format(
API_BASE, abandoned_private_preprint._id)
@pytest.fixture()
def abandoned_public_url(self, abandoned_public_preprint):
return '/{}preprints/{}/'.format(
API_BASE, abandoned_public_preprint._id)
@pytest.fixture()
def unpublished_url(self, unpublished_preprint):
return '/{}preprints/{}/'.format(API_BASE, unpublished_preprint._id)
@pytest.fixture()
def private_url(self, private_preprint):
return '/{}preprints/{}/'.format(API_BASE, private_preprint._id)
def test_preprint_is_published_detail(
self, app, admin, write_contrib, non_contrib,
unpublished_preprint, unpublished_url):
# test_unpublished_visible_to_admins
res = app.get(unpublished_url, auth=admin.auth)
assert res.json['data']['id'] == unpublished_preprint._id
# test_unpublished_invisible_to_write_contribs
res = app.get(
unpublished_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_non_contribs
res = app.get(
unpublished_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_public
res = app.get(unpublished_url, expect_errors=True)
assert res.status_code == 401
def test_preprint_is_public_detail(
self, app, admin, write_contrib, non_contrib,
private_preprint, private_url):
# test_private_visible_to_admins
res = app.get(private_url, auth=admin.auth)
assert res.json['data']['id'] == private_preprint._id
# test_private_visible_to_write_contribs
res = app.get(private_url, auth=write_contrib.auth)
assert res.status_code == 200
# test_private_invisible_to_non_contribs
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_private_invisible_to_public
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
def test_preprint_is_abandoned_detail(
self, app, admin, write_contrib,
non_contrib, abandoned_private_preprint,
abandoned_public_preprint,
abandoned_private_url,
abandoned_public_url):
# test_abandoned_private_visible_to_admins
res = app.get(abandoned_private_url, auth=admin.auth)
assert res.json['data']['id'] == abandoned_private_preprint._id
# test_abandoned_private_invisible_to_write_contribs
res = app.get(
abandoned_private_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_abandoned_private_invisible_to_non_contribs
res = app.get(
abandoned_private_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_abandoned_private_invisible_to_public
res = app.get(abandoned_private_url, expect_errors=True)
assert res.status_code == 401
# test_abandoned_public_visible_to_admins
res = app.get(abandoned_public_url, auth=admin.auth)
assert res.json['data']['id'] == abandoned_public_preprint._id
# test_abandoned_public_invisible_to_write_contribs
res = app.get(
abandoned_public_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_abandoned_public_invisible_to_non_contribs
res = app.get(
abandoned_public_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_abandoned_public_invisible_to_public
res = app.get(abandoned_public_url, expect_errors=True)
assert res.status_code == 401
@pytest.mark.django_db
class TestReviewsPreprintDetailPermissions:
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def public_project(self, admin, write_contrib):
public_project = ProjectFactory(creator=admin, is_public=True)
public_project.add_contributor(
write_contrib, permissions=['read', 'write'], save=True)
return public_project
@pytest.fixture()
def private_project(self, admin, write_contrib):
public_project = ProjectFactory(creator=admin, is_public=False)
public_project.add_contributor(
write_contrib, permissions=['read', 'write'], save=True)
return public_project
@pytest.fixture()
def subject(self):
return SubjectFactory()
@pytest.fixture()
def reviews_provider(self):
return PreprintProviderFactory(reviews_workflow='pre-moderation')
@pytest.fixture()
def file_one_public_project(self, admin, public_project):
return test_utils.create_test_file(
public_project, admin, 'toe_socks_and_sunrises.pdf')
@pytest.fixture()
def file_one_private_project(self, admin, private_project):
return test_utils.create_test_file(
private_project, admin, 'toe_socks_and_sunsets.pdf')
@pytest.fixture()
def unpublished_reviews_preprint(
self, admin, reviews_provider, subject, public_project):
return PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=reviews_provider,
subjects=[[subject._id]],
project=public_project,
is_published=False,
machine_state=DefaultStates.PENDING.value)
@pytest.fixture()
def unpublished_reviews_initial_preprint(
self, admin, reviews_provider, subject, public_project):
return PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=reviews_provider,
subjects=[[subject._id]],
project=public_project,
is_published=False,
machine_state=DefaultStates.INITIAL.value)
@pytest.fixture()
def private_reviews_preprint(
self, admin, reviews_provider, subject, private_project):
return PreprintFactory(
creator=admin,
filename='toe_socks_and_sunsets.pdf',
provider=reviews_provider,
subjects=[[subject._id]],
project=private_project,
is_published=False,
machine_state=DefaultStates.PENDING.value)
@pytest.fixture()
def unpublished_url(self, unpublished_reviews_preprint):
return '/{}preprints/{}/'.format(
API_BASE, unpublished_reviews_preprint._id)
@pytest.fixture()
def unpublished_initial_url(self, unpublished_reviews_initial_preprint):
return '/{}preprints/{}/'.format(
API_BASE, unpublished_reviews_initial_preprint._id)
@pytest.fixture()
def private_url(self, private_reviews_preprint):
return '/{}preprints/{}/'.format(
API_BASE, private_reviews_preprint._id)
def test_reviews_preprint_is_published_detail(
self, app, admin, write_contrib, non_contrib,
unpublished_reviews_preprint, unpublished_url):
# test_unpublished_visible_to_admins
res = app.get(unpublished_url, auth=admin.auth)
assert res.json['data']['id'] == unpublished_reviews_preprint._id
# test_unpublished_visible_to_write_contribs
res = app.get(
unpublished_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 200
# test_unpublished_invisible_to_non_contribs
res = app.get(
unpublished_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_public
res = app.get(unpublished_url, expect_errors=True)
assert res.status_code == 401
def test_reviews_preprint_initial_detail(
self, app, admin, write_contrib, non_contrib,
unpublished_reviews_initial_preprint,
unpublished_initial_url):
# test_unpublished_visible_to_admins
res = app.get(unpublished_initial_url, auth=admin.auth)
assert res.json['data']['id'] == unpublished_reviews_initial_preprint._id
# test_unpublished_invisible_to_write_contribs
res = app.get(
unpublished_initial_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_non_contribs
res = app.get(
unpublished_initial_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_public
res = app.get(unpublished_initial_url, expect_errors=True)
assert res.status_code == 401
def test_reviews_preprint_is_public_detail(
self, app, admin, write_contrib, non_contrib,
private_reviews_preprint, private_url):
# test_private_visible_to_admins
res = app.get(private_url, auth=admin.auth)
assert res.json['data']['id'] == private_reviews_preprint._id
# test_private_visible_to_write_contribs
res = app.get(private_url, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 200
# test_private_invisible_to_non_contribs
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_private_invisible_to_public
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
| |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
"""
.. note::
These are the database functions for SPLAT
"""
# imports: internal
import os
import re
import requests
# imports: external
import numpy
# splat functions and codes
from splat.initialize import *
from splat.utilities import *
import splat
def bibTexParser(bib_input,**kwargs):
'''
:Purpose:
Parses a bibtex segment and returns a dictionary of parameter fields
:Required parameters:
:param bib_tex: String containing bibtex data in standard format
:Optional parameters:
None
:Output:
A dictionary containing the parsed bibtex information
'''
bib_dict = {'bib_tex': bib_input}
bib_tex = bib_input.split(',\n')
# get bib code
bib_dict['type'] = bib_tex[0][1:bib_tex[0].find('{')]
bib_dict['bibcode'] = bib_tex[0][bib_tex[0].find('{')+1:]
# begin = bib_tex.find('{')
# end = bib_tex.find(',')
# bib_dict['bibcode'] = bib_tex[begin+1:end]
# bib_tex = bib_tex[end+1:] # remove bib code line
# bib_tex = bib_tex.split(',\n') # this moght not always work for author lists
for line in bib_tex[1:]:
line = line.strip()
line = line.replace('{','').replace('}','').replace('\"','').replace('\n@','').replace('\n','').replace('\t','')
line = line.split('=')
# TEMPORARY FIX AS THIS LINE ISN'T WORKING RIGHT
if len(line) > 1:
line[0] = line[0].strip().lower()
line[1] = line[1].strip()
bib_dict[line[0]] = line[1]
# Journal massaging: look up table
if 'journal' in list(bib_dict.keys()):
if bib_dict['journal'][1:].lower() in list(JOURNALS_LONGNAMES.keys()): bib_dict['journal'] = JOURNALS_LONGNAMES[bib_dict['journal'][1:].lower()]
elif bib_dict['journal'][2:].lower() in list(JOURNALS_LONGNAMES.keys()): bib_dict['journal'] = JOURNALS_LONGNAMES[bib_dict['journal'][2:].lower()]
else: pass
return bib_dict
def veryShortRef(bib_dict,**kwargs):
'''
:Purpose:
Takes a bibtex dictionary and returns a short (in-line) version of the citation
:Required parameters:
:param bib_tex: Dictionary output from bibTexParser, else a bibcode that is fed into bibTexParser
:Optional parameters:
None
:Output:
A string of the format ``Burgasser et al. (2006)``
'''
if type(bib_dict) is not dict:
if type(bib_dict) is numpy.str:
bib_dict = str(bib_dict)
if type(bib_dict) is str:
bib_dict = getBibTeX(bib_dict,**kwargs)
if isinstance(bib_dict,dict) == False: return ''
else:
if kwargs.get('verbose',False): print('Input to shortRef is neither a bibcode nor a bibTex dictionary')
return ''
authors = bib_dict['author'].split(' and ')
a = authors[0].replace('~',' ').split(' ')
a = a[0].replace(',','')
if len(authors) == 1:
output = a
else:
output = '{} et al.'.format(a)
# fill in missing data
if 'year' not in bib_dict.keys():
bib_dict['year'] = ''
return output+' ({})'.format(bib_dict['year'])
def shortRef(bib_dict,**kwargs):
'''
:Purpose:
Takes a bibtex dictionary and returns a short (in-line) version of the citation
:Required parameters:
:param bib_tex: Dictionary output from bibTexParser, else a bibcode that is fed into bibTexParser
:Optional parameters:
None
:Output:
A string of the format ``Burgasser, A. J., et al. (2006, ApJ, 710, 1142)``
'''
if type(bib_dict) is not dict:
if type(bib_dict) is numpy.str:
bib_dict = str(bib_dict)
if type(bib_dict) is str:
bib_dict = getBibTeX(bib_dict,**kwargs)
if isinstance(bib_dict,dict) == False: return ''
else:
if kwargs.get('verbose',False): print('Input to shortRef is neither a bibcode nor a bibTex dictionary')
return ''
authors = bib_dict['author'].split(' and ')
if len(authors) == 1:
output = '{}'.format(authors[0].replace('~',' '))
elif len(authors) == 2:
output = '{} & {}'.format(authors[0].replace('~',' '),authors[1].replace('~',' '))
# elif len(a) == 3:
# output = '{}, {} & {}'.format(a[0].replace('~',' '),a[1].replace('~',' '),a[2].replace('~',' '))
# else:
# output = '{}, {}, {}, et al.'.format(a[0].replace('~',' '),a[1].replace('~',' '),a[2].replace('~',' '))
else:
output = '{} et al.'.format(authors[0].replace('~',' '))
# fill in missing data
if 'year' not in bib_dict.keys():
bib_dict['year'] = ''
if 'journal' not in bib_dict.keys():
bib_dict['journal'] = ''
if 'volume' not in bib_dict.keys():
bib_dict['volume'] = ''
if 'pages' not in bib_dict.keys():
bib_dict['pages'] = ''
return output+' ({}, {}, {}, {})'.format(bib_dict['year'],bib_dict['journal'],bib_dict['volume'],bib_dict['pages'])
def longRef(bib_dict,**kwargs):
'''
:Purpose:
Takes a bibtex dictionary and returns a long (in-line) version of the citation
:Required parameters:
:param bib_tex: Dictionary output from bibTexParser, else a bibcode that is fed into bibTexParser
:Optional parameters:
None
:Output:
A string of the format ``Burgasser, A. J., Cruz, K. L., Cushing, M., et al. SpeX Spectroscopy of Unresolved Very Low Mass Binaries.
I. Identification of 17 Candidate Binaries Straddling the L Dwarf/T Dwarf Transition. ApJ 710, 1142 (2010)``
'''
if type(bib_dict) is not dict:
if type(bib_dict) is numpy.str:
bib_dict = str(bib_dict)
if type(bib_dict) is str:
bib_dict = getBibTeX(bib_dict,**kwargs)
if isinstance(bib_dict,dict) == False: return ''
else:
if kwargs.get('verbose',False): print('Input to longRef is neither a bibcode nor a bibTex dictionary')
return ''
authors = bib_dict['author'].split(' and ')
if len(authors) == 1:
output = '{}'.format(authors[0].replace('~',' '))
elif len(authors) == 2:
output = '{} & {}'.format(authors[0].replace('~',' '),authors[1].replace('~',' '))
elif len(authors) == 3:
output = '{}, {} & {}'.format(authors[0].replace('~',' '),authors[1].replace('~',' '),authors[2].replace('~',' '))
else:
output = '{}, {}, {}, et al'.format(authors[0].replace('~',' '),authors[1].replace('~',' '),authors[2].replace('~',' '))
# fill in missing data
if 'year' not in bib_dict.keys():
bib_dict['year'] = ''
if 'title' not in bib_dict.keys():
bib_dict['title'] = ''
if 'journal' not in bib_dict.keys():
bib_dict['journal'] = ''
if 'volume' not in bib_dict.keys():
bib_dict['volume'] = ''
if 'pages' not in bib_dict.keys():
bib_dict['pages'] = ''
return output+' "{}". {}, {}, {} ({})'.format(bib_dict['title'],bib_dict['journal'],bib_dict['volume'],bib_dict['pages'],bib_dict['year'])
def veryLongRef(bib_dict,**kwargs):
'''
:Purpose:
Takes a bibtex dictionary and returns a long (in-line) version of the citation
:Required parameters:
:param bib_tex: Dictionary output from bibTexParser, else a bibcode that is fed into bibTexParser
:Optional parameters:
None
:Output:
A string of the format ``Burgasser, A. J., Cruz, K. L., Cushing, M., et al. SpeX Spectroscopy of Unresolved Very Low Mass Binaries.
I. Identification of 17 Candidate Binaries Straddling the L Dwarf/T Dwarf Transition. ApJ 710, 1142 (2010)``
'''
if type(bib_dict) is not dict:
if type(bib_dict) is numpy.str:
bib_dict = str(bib_dict)
if type(bib_dict) is str:
bib_dict = getBibTeX(bib_dict,**kwargs)
if isinstance(bib_dict,dict) == False: return ''
else:
if kwargs.get('verbose',False): print('Input to verylongRef is neither a bibcode nor a bibTex dictionary')
return ''
authors = bib_dict['author'].split(' and ')
if len(authors) == 1:
output = '{}'.format(authors[0].replace('~',' '))
elif len(authors) == 2:
output = '{} & {}'.format(authors[0].replace('~',' '),authors[1].replace('~',' '))
else:
output=''
for a in authors[:-3]:
output+='{}, '.format(a.replace('~',' '))
output+='{} & {}'.format(authors[-2].replace('~',' '),authors[-1].replace('~',' '))
# fill in missing data
if 'year' not in bib_dict.keys():
bib_dict['year'] = ''
if 'title' not in bib_dict.keys():
bib_dict['title'] = ''
if 'journal' not in bib_dict.keys():
bib_dict['journal'] = ''
if 'volume' not in bib_dict.keys():
bib_dict['volume'] = ''
if 'pages' not in bib_dict.keys():
bib_dict['pages'] = ''
return output+' "{}". {}, {}, {} ({})'.format(bib_dict['title'],bib_dict['journal'],bib_dict['volume'],bib_dict['pages'],bib_dict['year'])
def citeURL(bib_dict,**kwargs):
'''
:Purpose:
Generate the URL corresponding to a citation, based on the bibcode and NASA ADS syntax
:Required parameters:
:param bib_tex: Dictionary output from bibTexParser, else a bibcode that is fed into bibTexParser
:Optional parameters:
None
:Output:
A string of the format ``Burgasser, A. J., Cruz, K. L., Cushing, M., et al. SpeX Spectroscopy of Unresolved Very Low Mass Binaries.
I. Identification of 17 Candidate Binaries Straddling the L Dwarf/T Dwarf Transition. ApJ 710, 1142 (2010)``
'''
if type(bib_dict) is not dict:
if type(bib_dict) is numpy.str:
bib_dict = str(bib_dict)
if type(bib_dict) is str:
# assume this is a bibcode
return '{}{}/abstract'.format(CITATION_URL_BASE,bib_dict)
else:
raise NameError('Input to citeURL is neither a bibcode nor a bibTex dictionary')
else:
if 'bibcode' in list(bib_dict.keys()):
return '{}{}/abstract'.format(CITATION_URL_BASE,bib_dict['bibcode'])
else:
raise NameError('BibTex dictionary does not contain a bibcode')
def processBiblibrary(biblibrary,verbose=False):
'''
Purpose
Processes a bibtex .bib library (multiple bibtex entries) into a dictionary whose keys
are the bibcode
:Required parameters:
:param biblibrary: .bib file containing the bibtex entries
:Optional parameters:
:param: verbose = False: Set to True to provide extensive feedback
:Output:
A dictionary containing the bibtex information, organized by bibcode key
'''
if not os.path.exists(os.path.normpath(biblibrary)):
raise ValueError('Could not find bibtex library file {}'.format(biblibrary))
with open(os.path.normpath(biblibrary), 'r') as bib_file:
text = bib_file.read()
# find all of the bibtex codes
output = {}
flg = 'upper'
in_lib = re.search('@[A-Z]+{', text)
if in_lib==None:
flg = 'lower'
in_lib = re.search('@[a-z]+{', text)
if in_lib==None:
raise ValueError('Cannot find any bib entries in text {}'.format(text[:1000]))
while in_lib != None:
if flg=='upper': in_lib = re.search('@[A-Z]+{', text)
else: in_lib = re.search('@[a-z]+{', text)
asc = text[in_lib.start():]
in_lib = re.search('\n@', asc)
if in_lib != None:
text = asc[(in_lib.start()-2):]
asc = asc[:in_lib.end()]
p = bibTexParser(asc)
output[p['bibcode']] = p
return output
def getBibTeX(bibcode,**kwargs):
'''
Purpose
Takes a bibcode and returns a dictionary containing the bibtex information; looks either in internal SPLAT
or user-supplied bibfile, or seeks online. If nothing found, gives a soft warning and returns False
:Note:
**Currently not functional**
:Required parameters:
:param bibcode: Bibcode string to look up (e.g., '2014ApJ...787..126L')
:Optional parameters:
:param biblibrary: Filename for biblibrary to use in place of SPLAT internal one
:type string: optional, default = ''
:param online: If True, go directly online; if False, do not try to go online
:type logical: optional, default = null
:Output:
- A dictionary containing the bibtex fields, or False if not found
'''
# go online first if directed to do so
if kwargs.get('online',False) and checkOnline():
bib_tex = getBibTeXOnline(bibcode)
# read locally first
else:
biblibrary = kwargs.get('biblibrary', SPLAT_PATH+DB_FOLDER+BIBFILE)
# check the file
if not os.path.exists(os.path.normpath(biblibrary)):
if kwargs.get('verbose',True) == True: print('Could not find bibtex library {}'.format(biblibrary))
biblibrary = SPLAT_PATH+DB_FOLDER+BIBFILE
if not os.path.exists(os.path.normpath(biblibrary)):
raise NameError('Could not find SPLAT main bibtext library {}; something is wrong'.format(biblibrary))
with open(os.path.normpath(biblibrary), 'r') as bib_file:
text = bib_file.read()
#print re.search('@[A-Z]+{' + bib_code, bib_file)
in_lib = re.search('@[a-z]+{' + bibcode, text)
if in_lib == None:
if kwargs.get('force',False): return False
if kwargs.get('verbose',False) == True: print('Bibcode {} not in bibtex library {}; checking online'.format(bibcode,biblibrary))
bib_tex = getBibTeXOnline(bibcode)
else:
begin = text.find(re.search('@[a-z]+{' + bibcode, text).group(0))
text = text[begin:]
end = text.find('\n@')
bib_tex = text[:end]
if bib_tex == False:
return False
else:
return bibTexParser(bib_tex)
def getBibTeXOnline(bibcode,verbose=False):
'''
Purpose
Takes a bibcode and searches for the bibtex information online through NASA ADS; requires user to be online.
If successful, returns full bibtex string block; otherwise False.
:Required parameters:
:param bibcode: Bibcode string to look up (e.g., '2014ApJ...787..126L')
:Optional parameters:
:param bibfile: Filename for bibfile to use in place of SPLAT internal one
:type string: optional, default = ''
:param online: If True, go directly online; if False, do not try to go online
:type logical: optional, default = null
:Output:
- A string block of the basic bibtex information
'''
if not checkOnline():
return False
url_begin = "http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode="
url_end = "&data_type=BIBTEX"
url = url_begin + bibcode + url_end
bib_tex = requests.get(url).content
# Check if content is in html which means bad bib_code was given
if isinstance(bib_tex,bytes):
bib_tex = bib_tex.decode()
if "<HTML>" in bib_tex:
if verbose==True: print('{} is not a valid online bib code.'.format(bibcode))
return False
# Cut off extraneous info from website before the bibtex code
else:
begin = bib_tex.find('@')
bib_tex = bib_tex[begin:]
return bib_tex
def nsfbib(biblibrary,file=''):
'''
Purpose
Takes a biblibrary and generates an NSF-formatted reference list, based on the following requirements:
"Each reference must include the names of all authors (in the same sequence in which they
appear in the publication), the article and journal title, book title, volume number, page numbers, a
and year of publication. If the proposer has a website
address readily available, that information should be included in the citation."
:Required parameters:
:param biblibrary: .bib file containing the bibtex entries
:Optional parameters:
:param file: Filename to save latex output (default='citations.tex')
:Output:
A latex file containing relevant bibliographic information
'''
try:
cites = processBiblibrary(biblibrary,verbose=True)
except:
raise ValueError('Could not parse biblibrary input {}; make sure this a full path to the .bib file'.format(biblibrary))
# process into a set of strings
output = []
for c in list(cites.keys()):
line = cites[c]['author']
li = line.rsplit(' and ',1)
line = ', & '.join(li)
line = line.replace(' and ',', ').replace('~','')
line = line+' "'+cites[c]['title']+'." '+cites[c]['year']
# article?
if cites[c]['type'] == 'article':
if 'journal' in list(cites[c].keys()): line=line+', '+cites[c]['journal']
if 'volume' in list(cites[c].keys()): line=line+', '+cites[c]['volume']
if 'pages' in list(cites[c].keys()): line=line+', '+cites[c]['pages']
# book?
if cites[c]['type'] == 'book':
if 'publisher' in list(cites[c].keys()): line=line+', '+cites[c]['publisher']
# add url
if 'adsurl' in list(cites[c].keys()): line=line+' ('+cites[c]['adsurl']+')'
elif 'bdsk-url-1' in list(cites[c].keys()): line=line+' ('+cites[c]['bdsk-url-1']+')'
else: pass
output.append(line)
output.sort()
# save to file if provided
if file != '':
try:
f = open(os.path.normpath(file),'w')
for o in output: f.write(o+'\n')
f.close()
return True
except:
print('Warning: problem saving to output file {}'.format(file))
line = ''
for o in output: line=line+o+'\n'
return line
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.losses.python.losses.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class AbsoluteDifferenceLossTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.absolute_difference(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.absolute_difference(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2,])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrect(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=weights).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partion function becomes
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = loss_ops.softmax_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(dtypes.float32, shape=[None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrectInt32Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([0, 1, 2])
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int32)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int64)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testNonZeroLossWithColumnWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([[1.2], [3.4], [5.6]])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 1], [2, 3]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SigmoidCrossEntropyLossTest(test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 1))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 1))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 2))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 2))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testAllWrongSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(1700.0 / 7.0, loss.eval(), 3)
def testMultiCorrectSigmoid(self):
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = constant_op.constant([[1, 0, 1],
[1, 1, 0],
[0, 1, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = loss_ops.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.test_session():
label_smoothing = 0.1
sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = constant_op.constant([[1, 0, 1]])
sigmoid_loss = loss_ops.sigmoid_cross_entropy(
sigmoid_logits, sigmoid_labels, label_smoothing=label_smoothing)
softmax_logits = constant_op.constant(
[[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = loss_ops.softmax_cross_entropy(
softmax_logits, softmax_labels, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
class LogLossTest(test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
labels = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_labels = labels
epsilon = 1e-7
self._expected_losses = np.multiply(
labels, np.log(predictions + epsilon)) + np.multiply(
1 - labels, np.log(1 - predictions + epsilon))
self._predictions = constant_op.constant(predictions)
self._labels = constant_op.constant(labels)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._labels, self._labels, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.log_loss(self._labels, self._labels)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_labels.shape)
loss = loss_ops.log_loss(tf_predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(
0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
def testNonZeroLoss(self):
loss = loss_ops.log_loss(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_predictions.shape)
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weights = constant_op.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._predictions, self._labels, weights)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
loss = loss_ops.log_loss(
tf_predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
tf_weights = constant_op.constant(weights, shape=(2, 3))
loss = loss_ops.log_loss(tf_predictions, self._labels, tf_weights)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weights = array_ops.zeros(shape=(2, 3))
loss = loss_ops.log_loss(self._predictions, self._labels, tf_weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class HingeLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.test_session():
logits = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = loss_ops.hinge_loss(logits, labels).eval()
def testAllOutsideMargin(self):
with self.test_session():
logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
loss = loss_ops.hinge_loss(logits, labels)
self.assertAllClose(loss.eval(), [0.0, 0.0, 0.0, 0.0], atol=1e-3)
def testSomeInsideMargin(self):
with self.test_session():
logits = constant_op.constant([[-0.7], [-1.4], [1.4], [0.6]])
labels = constant_op.constant([[0.0], [0.0], [1.0], [1.0]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), [[0.3], [0.0], [0.0], [0.4]], atol=1e-3)
def testSomeMisclassified(self):
with self.test_session():
logits = constant_op.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
labels = constant_op.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(
loss.eval(), [[[0.0], [1.4], [0.0], [2.1]]], atol=1e-3)
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_squared_error(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_squared_error(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_squared_error(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2,])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2, 1])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class MeanPairwiseSquaresErrorTest(test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12], [8, 1, 3]])
self._labels = np.array([[1, 9, 2], [-5, -5, 7]])
batch_size, dims = self._labels.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size, 1))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._labels[b, i].item() - self._labels[b, j].item()
tmp = (x - y) * (x - y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
inputs = array_ops.ones((2, 3))
weights = variable_scope.get_variable(
'weights',
shape=[3, 4],
initializer=init_ops.truncated_normal_initializer())
predictions = math_ops.matmul(inputs, weights)
optimizer = momentum_lib.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
loss = loss_ops.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=weights)
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weights = 0
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weights = 2.3
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.float32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(weights * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weights = np.asarray([0.0, 0.0]).reshape((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
weights = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.int32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weights = np.zeros((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossIsAssociativeAcrossBatchElements(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
height = 3
width = 4
shape = (1, height, width, 1)
labels0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
labels1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
loss0 = loss_ops.mean_pairwise_squared_error(
predictions=predictions0,
labels=labels0)
loss1 = loss_ops.mean_pairwise_squared_error(
predictions=predictions1,
labels=labels1)
loss0_1 = loss_ops.mean_pairwise_squared_error(
predictions=array_ops.concat([predictions0, predictions1], 0),
labels=array_ops.concat([labels0, labels1], 0))
with self.test_session() as session:
loss0, loss1, loss0_1 = session.run([loss0, loss1, loss0_1])
self.assertTrue(loss0 > 0)
self.assertTrue(loss1 > 0)
self.assertAlmostEqual(loss0 + loss1, loss0_1, 5)
class CosineDistanceLossTest(test.TestCase):
def setUp(self):
self._predictions = np.asarray([
[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]
]).reshape((3, 2, 3))
self._labels = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2,
weights=None)
def testAllCorrectNoWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
labels = np.matrix(('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = constant_op.constant(
predictions, shape=(3, 1, 3), dtype=dtypes.float32)
tf_labels = constant_op.constant(
labels, shape=(3, 1, 3), dtype=dtypes.float32)
loss = loss_ops.cosine_distance(tf_preds, tf_labels, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant([1, 0, 0]))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
tf_predictions = array_ops.placeholder(dtypes.float32)
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._labels.shape)
loss = loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3,)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 2)))
with self.test_session():
self.assertEqual(0, loss.eval())
class ComputeWeightedLossTest(test.TestCase):
def testHingeLoss(self):
logits = constant_op.constant([1.2, 0.4, -1.0, -1.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss = loss_ops.compute_weighted_loss(losses)
self.assertTrue(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(loss.eval(), 3.5 / 4.0, atol=1e-3)
class AddLossTest(test.TestCase):
def testAddExternalLoss(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses))
self.assertTrue(loss_ops.get_losses())
total_loss = loss_ops.get_total_loss()
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
self.assertAllClose(total_loss.eval(), 3.5 / 4.0, atol=1e-3)
def testNoneLossCollection(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses), loss_collection=None)
self.assertFalse(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
def testNoCollectLosses(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
def testNoCollectLossesBatch2(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
# (The MIT License)
#
# Copyright (c) 2014 Kura
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import namedtuple
from datetime import datetime
from .action import Action
from .region import Region
class Droplet(object):
"""
A Digital Ocean droplet.
:param droplet_data: A dictionary of droplet data from the API.
"""
_data = None
def __init__(self, droplet_data):
self._data = droplet_data
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<Droplet {0}>".format(self.name)
@property
def id(self):
"""
The droplet ID.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.id
1234
:rtype: `integer`.
"""
return self._data['id']
@property
def name(self):
"""
The name of the droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.name
"kura-test"
:rtype: `string`.
"""
return self._data['name']
@property
def memory(self):
"""
The amount of RAM of the droplet, including measurement unit.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.memory
'512MB'
:rtype: `string`.
"""
return self._data['memory']
@property
def cpus(self):
"""
The number of CPUs of the droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.cpus
1
:rtype: `integer`.
"""
return self._data['vcpus']
@property
def disk_size(self):
"""
The disk size of the droplet, including measurement unit.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.disk_size
'20GB'
:rtype: `string`.
"""
return "{0}GB".format(self._data['disk'])
@property
def region_name(self):
"""
The name of the region the droplet is in.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.region_name
<Region Amsterdam 2>
>>> droplet.region_name.name
'Amsterdam 2'
:rtype: `string`.
"""
return Region.name_from_slug(self._data['region']['slug'])
def region(self, client):
"""
Get an instance of `batfish.models.Region` from the droplet's region
information.
:param client: An instance of `batfish.client.Client`.
:rtype: An instance of `batfish.models.Region`.
"""
return client.region_from_slug(self._data['region']['slug'])
def image(self, client):
"""
Get an instance of `batfish.models.Image` from the droplet's image
information.
:param client: An instance of `batfish.client.Client`.
:rtype: An instance of `batfish.models.Image`.
"""
return client.image_from_id(self._data['image']['id'])
@property
def size(self):
"""
The size of the droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.size
Size(name='512MB', memory='512MB', disk='20GB',
hourly=0.00744, monthly=5.0)
:rtype: `collections.NamedTuple`.
"""
size = namedtuple('Size', 'name memory disk hourly monthly')
return size(name=self._data['size']['slug'].upper(),
memory=self._data['size']['slug'].upper(),
disk="{0}GB".format(self.disk_size),
hourly=self._data['size']['price_hourly'],
monthly=self._data['size']['price_monthly'])
@property
def locked(self):
"""
The lock status of the droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.locked
False
:rtype: `boolean`.
"""
return self._data['locked']
@property
def created(self):
"""
The date and time a droplet was created.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.created
datetime.datetime(2014, 1, 7, 23, 19, 49)
:rtype: `datetime.datime` object.
"""
return datetime.strptime(self._data['created_at'],
'%Y-%m-%dT%H:%M:%SZ')
@property
def status(self):
"""
The status of the droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.status
'active'
:rtype: `string`.
"""
return self._data['status']
@property
def networks(self):
"""
Network connections of a droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.networks
{'ipv4': [IPv4(ip='10.12.1.1', type='private',
gateway='10.12.0.1', netmask='255.255.0.0'),
IPv4(ip='95.85.62.206', type='public',
gateway='95.85.62.1', netmask='255.255.255.0')],
'ipv6': []}
:rtype: `dictionary` of `collection.NamedTuples`s.
"""
ipv4 = namedtuple('IPv4', 'ip type gateway netmask')
ipv6 = namedtuple('IPv6', 'ip type gateway')
networks = {}
networks['ipv4'] = [ipv4(ip=n['ip_address'], type=n['type'],
gateway=n['gateway'], netmask=n['netmask'])
for n in self._data['networks']['v4']]
networks['ipv6'] = [ipv6(ip=n['ip_address'], type=n['type'],
gateway=n['gateway'])
for n in self._data['networks']['v6']]
return networks
@property
def kernel(self):
"""
The currently active kernel of a droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.kernel
Kernel(id=140,
name='Debian 7.0 x64 vmlinuz-3.2.0-4-amd64 (3.2.41-2)',
version='3.2.0-4-amd64')
:rtype: `collections.NamedTuple`.
"""
kernel = namedtuple('Kernel', 'id name version')
return kernel(id=int(self._data['kernel']['id']),
name=self._data['kernel']['name'],
version=self._data['kernel']['version'])
@property
def backups(self):
"""
Existing backups of a droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.backups
[123, 124, 125]
:rtype: `list` of backup IDs.
"""
return self._data['backup_ids']
@property
def snapshots(self):
"""
Existing snapshops of a droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.backups
[123, 124, 125]
:rtype: `list` of snapshot IDs.
"""
return self._data['snapshot_ids']
def actions(self, client):
"""
A list of actions that have been performed on the droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.actions
[<Action power_cycle>, <Action create>]
:param client: An instance of `batfish.client.Client`.
:rtype: A list of `batfish.models.Action` instances.
"""
j = client.get("droplets/{0}/actions".format(self.id))
if 'actions' not in j:
return None
return [Action(a) for a in j['actions']]
@property
def features(self):
"""
A list of features currently active on a droplet.
>>> cli = batfish.Client()
>>> droplet = cli.droplet_from_id(1234)
>>> droplet.features
['private_networking', 'virtio']
:rtype: `list`.
"""
return self._data['features']
| |
# coding=utf-8
"""
The Collector class is a base class for all metric collectors.
"""
import os
import socket
import platform
import logging
import configobj
import traceback
import time
from diamond.metric import Metric
# Detect the architecture of the system and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
MAX_COUNTER = (2 ** 64) - 1
else:
MAX_COUNTER = (2 ** 32) - 1
def get_hostname(config, method=None):
"""
Returns a hostname as configured by the user
"""
if 'hostname' in config:
return config['hostname']
if method is None:
if 'hostname_method' in config:
method = config['hostname_method']
else:
method = 'smart'
# case insensitive method
method = method.lower()
if method in get_hostname.cached_results:
return get_hostname.cached_results[method]
if method == 'smart':
hostname = get_hostname(config, 'fqdn_short')
if hostname != 'localhost':
get_hostname.cached_results[method] = hostname
return hostname
hostname = get_hostname(config, 'hostname_short')
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn_short':
hostname = socket.getfqdn().split('.')[0]
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn':
hostname = socket.getfqdn().replace('.', '_')
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn_rev':
hostname = socket.getfqdn().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
return hostname
if method == 'uname_short':
hostname = os.uname()[1].split('.')[0]
get_hostname.cached_results[method] = hostname
return hostname
if method == 'uname_rev':
hostname = os.uname()[1].split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
return hostname
if method == 'hostname':
hostname = socket.gethostname()
get_hostname.cached_results[method] = hostname
return hostname
if method == 'hostname_short':
hostname = socket.gethostname().split('.')[0]
get_hostname.cached_results[method] = hostname
return hostname
if method == 'hostname_rev':
hostname = socket.gethostname().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
return hostname
if method == 'none':
get_hostname.cached_results[method] = None
return None
raise NotImplementedError(config['hostname_method'])
get_hostname.cached_results = {}
def str_to_bool(value):
"""
Converts string ('true', 'false') to bool
"""
if isinstance(value, basestring):
if value.strip().lower() == 'true':
return True
else:
return False
return value
class Collector(object):
"""
The Collector class is a base class for all metric collectors.
"""
def __init__(self, config, handlers):
"""
Create a new instance of the Collector class
"""
# Initialize Logger
self.log = logging.getLogger('diamond')
# Initialize Members
self.name = self.__class__.__name__
self.handlers = handlers
self.last_values = {}
# Get Collector class
cls = self.__class__
# Initialize config
self.config = configobj.ConfigObj()
# Check if default config is defined
if self.get_default_config() is not None:
# Merge default config
self.config.merge(self.get_default_config())
# Merge default Collector config
self.config.merge(config['collectors']['default'])
# Check if Collector config section exists
if cls.__name__ in config['collectors']:
# Merge Collector config section
self.config.merge(config['collectors'][cls.__name__])
# Check for config file in config directory
configfile = os.path.join(config['server']['collectors_config_path'],
cls.__name__) + '.conf'
if os.path.exists(configfile):
# Merge Collector config file
self.config.merge(configobj.ConfigObj(configfile))
# Handle some config file changes transparently
if isinstance(self.config['byte_unit'], basestring):
self.config['byte_unit'] = self.config['byte_unit'].split()
self.config['enabled'] = str_to_bool(self.config['enabled'])
self.config['measure_collector_time'] = str_to_bool(
self.config['measure_collector_time'])
self.collect_running = False
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this collector
"""
return {
'enabled': 'Enable collecting these metrics',
'byte_unit': 'Default numeric output(s)',
'measure_collector_time': 'Collect the collector run time in ms',
}
def get_default_config(self):
"""
Return the default config for the collector
"""
return {
### Defaults options for all Collectors
# Uncomment and set to hardcode a hostname for the collector path
# Keep in mind, periods are seperators in graphite
# 'hostname': 'my_custom_hostname',
# If you perfer to just use a different way of calculating the
# hostname
# Uncomment and set this to one of these values:
# fqdn_short = Default. Similar to hostname -s
# fqdn = hostname output
# fqdn_rev = hostname in reverse (com.example.www)
# uname_short = Similar to uname -n, but only the first part
# uname_rev = uname -r in reverse (com.example.www)
# 'hostname_method': 'fqdn_short',
# All collectors are disabled by default
'enabled': False,
# Path Prefix
'path_prefix': 'servers',
# Path Prefix for Virtual Machine metrics
'instance_prefix': 'instances',
# Path Suffix
'path_suffix': '',
# Default splay time (seconds)
'splay': 1,
# Default Poll Interval (seconds)
'interval': 300,
# Default collector threading model
'method': 'Sequential',
# Default numeric output
'byte_unit': 'byte',
# Collect the collector run time in ms
'measure_collector_time': False,
}
def get_stats_for_upload(self, config=None):
if config is None:
config = self.config
stats = {}
if 'enabled' in config:
stats['enabled'] = config['enabled']
else:
stats['enabled'] = False
if 'interval' in config:
stats['interval'] = config['interval']
return stats
def get_schedule(self):
"""
Return schedule for the collector
"""
# Return a dict of tuples containing (collector function,
# collector function args, splay, interval)
return {self.__class__.__name__: (self._run,
None,
int(self.config['splay']),
int(self.config['interval']))}
def get_metric_path(self, name, instance=None):
"""
Get metric path.
Instance indicates that this is a metric for a
virtual machine and should have a different
root prefix.
"""
if 'path' in self.config:
path = self.config['path']
else:
path = self.__class__.__name__
if instance is not None:
if 'instance_prefix' in self.config:
prefix = self.config['instance_prefix']
else:
prefix = 'instances'
if path == '.':
return '.'.join([prefix, instance, name])
else:
return '.'.join([prefix, instance, path, name])
if 'path_prefix' in self.config:
prefix = self.config['path_prefix']
else:
prefix = 'systems'
if 'path_suffix' in self.config:
suffix = self.config['path_suffix']
else:
suffix = None
hostname = get_hostname(self.config)
if hostname is not None:
if prefix:
prefix = ".".join((prefix, hostname))
else:
prefix = hostname
# if there is a suffix, add after the hostname
if suffix:
prefix = '.'.join((prefix, suffix))
if path == '.':
return '.'.join([prefix, name])
else:
return '.'.join([prefix, path, name])
def get_hostname(self):
return get_hostname(self.config)
def collect(self):
"""
Default collector method
"""
raise NotImplementedError()
def publish(self, name, value, raw_value=None, precision=0,
metric_type='GAUGE', instance=None):
"""
Publish a metric with the given name
"""
# Get metric Path
path = self.get_metric_path(name, instance=instance)
# Create Metric
metric = Metric(path, value, raw_value=raw_value, timestamp=None,
precision=precision, host=self.get_hostname(),
metric_type=metric_type)
# Publish Metric
self.publish_metric(metric)
def publish_metric(self, metric):
"""
Publish a Metric object
"""
# Process Metric
for handler in self.handlers:
handler._process(metric)
def publish_gauge(self, name, value, precision=0, instance=None):
return self.publish(name, value, precision=precision,
metric_type='GAUGE', instance=instance)
def publish_counter(self, name, value, precision=0, max_value=0,
time_delta=True, interval=None, allow_negative=False,
instance=None):
raw_value = value
value = self.derivative(name, value, max_value=max_value,
time_delta=time_delta, interval=interval,
allow_negative=allow_negative,
instance=instance)
return self.publish(name, value, raw_value=raw_value,
precision=precision, metric_type='COUNTER',
instance=instance)
def derivative(self, name, new, max_value=0,
time_delta=True, interval=None,
allow_negative=False, instance=None):
"""
Calculate the derivative of the metric.
"""
# Format Metric Path
path = self.get_metric_path(name, instance=instance)
if path in self.last_values:
old = self.last_values[path]
# Check for rollover
if new < old:
old = old - max_value
# Get Change in X (value)
derivative_x = new - old
# If we pass in a interval, use it rather then the configured one
if interval is None:
interval = int(self.config['interval'])
# Get Change in Y (time)
if time_delta:
derivative_y = interval
else:
derivative_y = 1
result = float(derivative_x) / float(derivative_y)
if result < 0 and not allow_negative:
result = 0
else:
result = 0
# Store Old Value
self.last_values[path] = new
# Return result
return result
def _run(self):
"""
Run the collector unless it's already running
"""
if self.collect_running:
return
# Log
self.log.debug("Collecting data from: %s" % self.__class__.__name__)
try:
try:
start_time = time.time()
self.collect_running = True
# Collect Data
self.collect()
end_time = time.time()
if 'measure_collector_time' in self.config:
if self.config['measure_collector_time']:
metric_name = 'collector_time_ms'
metric_value = int((end_time - start_time) * 1000)
self.publish(metric_name, metric_value)
except Exception:
# Log Error
self.log.error(traceback.format_exc())
finally:
self.collect_running = False
# After collector run, invoke a flush
# method on each handler.
for handler in self.handlers:
handler._flush()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iterator ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.make_saveable_from_iterator")
def make_saveable_from_iterator(iterator):
"""Returns a SaveableObject for saving/restore iterator state using Saver.
Args:
iterator: Iterator.
For example:
```python
with tf.Graph().as_default():
ds = tf.data.Dataset.range(10)
iterator = ds.make_initializable_iterator()
# Build the iterator SaveableObject.
saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator)
# Add the SaveableObject to the SAVEABLE_OBJECTS collection so
# it can be automatically saved using Saver.
tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
saver = tf.train.Saver()
while continue_training:
... Perform training ...
if should_save_checkpoint:
saver.save()
```
Note: When restoring the iterator, the existing iterator state is completely
discarded. This means that any changes you may have made to the Dataset
graph will be discarded as well! This includes the new Dataset graph
that you may have built during validation. So, while running validation,
make sure to run the initializer for the validation input pipeline after
restoring the checkpoint.
Note: Not all iterators support checkpointing yet. Attempting to save the
state of an unsupported iterator will throw an error.
"""
return _Saveable(iterator._iterator_resource) # pylint: disable=protected-access
class _Saveable(saver_lib.BaseSaverBuilder.SaveableObject):
"""SaveableObject for saving/restoring iterator state."""
def __init__(self, iterator_resource):
serialized_iterator = gen_dataset_ops.serialize_iterator(iterator_resource)
specs = [
saver_lib.BaseSaverBuilder.SaveSpec(serialized_iterator, "",
iterator_resource.name + "-state")
]
super(_Saveable, self).__init__(iterator_resource, specs,
iterator_resource.name)
def restore(self, restored_tensors, unused_restored_shapes):
with ops.colocate_with(self.op):
return gen_dataset_ops.deserialize_iterator(self.op, restored_tensors[0])
@tf_export("data.experimental.CheckpointInputPipelineHook")
class CheckpointInputPipelineHook(session_run_hook.SessionRunHook):
"""Checkpoints input pipeline state every N steps or seconds.
This hook saves the state of the iterators in the `Graph` so that when
training is resumed the input pipeline continues from where it left off.
This could potentially avoid overfitting in certain pipelines where the
number of training steps per eval are small compared to the dataset
size or if the training pipeline is pre-empted.
Differences from `CheckpointSaverHook`:
1. Saves only the input pipelines in the "iterators" collection and not the
global variables or other saveable objects.
2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.
Example of checkpointing the training pipeline:
```python
est = tf.estimator.Estimator(model_fn)
while True:
est.train(
train_input_fn,
hooks=[tf.data.experimental.CheckpointInputPipelineHook(est)],
steps=train_steps_per_eval)
# Note: We do not pass the hook here.
metrics = est.evaluate(eval_input_fn)
if should_stop_the_training(metrics):
break
```
This hook should be used if the input pipeline state needs to be saved
separate from the model checkpoint. Doing so may be useful for a few reasons:
1. The input pipeline checkpoint may be large, if there are large shuffle
or prefetch buffers for instance, and may bloat the checkpoint size.
2. If the input pipeline is shared between training and validation, restoring
the checkpoint during validation may override the validation input
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
`tf.data.experimental.make_saveable_from_iterator` directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS
collector when building the eval graph.
"""
def __init__(self, estimator):
"""Initializes a `CheckpointInputPipelineHook`.
Args:
estimator: Estimator.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of saver or scaffold should be set.
"""
# `checkpoint_basename` is "input.ckpt" for non-distributed pipelines or
# of the form "input_<task_type>_<task_id>.ckpt" for distributed pipelines.
# Note: The default `checkpoint_basename` used by `CheckpointSaverHook` is
# "model.ckpt". We intentionally choose the input pipeline checkpoint prefix
# to be different to avoid conflicts with the model checkpoint.
# pylint: disable=protected-access
checkpoint_prefix = "input"
if estimator._config.num_worker_replicas > 1:
# Distributed setting.
suffix = "_{}_{}".format(estimator._config.task_type,
estimator._config.task_id)
checkpoint_prefix += suffix
# pylint: enable=protected-access
# We use a composition paradigm instead of inheriting from
# `CheckpointSaverHook` because `Estimator` does an `isinstance` check
# to check whether a `CheckpointSaverHook` is already present in the list
# of hooks and if not, adds one. Inheriting from `CheckpointSaverHook`
# would thwart this behavior. This hook checkpoints *only the iterators*
# and not the graph variables.
self._checkpoint_saver_hook = basic_session_run_hooks.CheckpointSaverHook(
estimator.model_dir,
save_secs=estimator._config.save_checkpoints_secs, # pylint: disable=protected-access
save_steps=estimator._config.save_checkpoints_steps, # pylint: disable=protected-access
checkpoint_basename=checkpoint_prefix + ".ckpt")
# Name for the protocol buffer file that will contain the list of most
# recent checkpoints stored as a `CheckpointState` protocol buffer.
# This file, kept in the same directory as the checkpoint files, is
# automatically managed by the `Saver` to keep track of recent checkpoints.
# The default name used by the `Saver` for this file is "checkpoint". Here
# we use the name "checkpoint_<checkpoint_prefix>" so that in case the
# `checkpoint_dir` is the same as the model checkpoint directory, there are
# no conflicts during restore.
self._latest_filename = "checkpoint_" + checkpoint_prefix
self._first_run = True
def begin(self):
# Build a Saver that saves all iterators in the `GLOBAL_ITERATORS`
# collection if no `Saver` or `Scaffold` is provided.
# pylint: disable=protected-access
if (self._checkpoint_saver_hook._saver is None and
self._checkpoint_saver_hook._scaffold is None):
iterators = ops.get_collection(iterator_ops.GLOBAL_ITERATORS)
saveables = [_Saveable(i) for i in iterators]
self._checkpoint_saver_hook._saver = _CustomSaver(saveables,
self._latest_filename)
# pylint: enable=protected-access
self._checkpoint_saver_hook.begin()
def _restore_or_save_initial_ckpt(self, session):
# Ideally this should be run in after_create_session but is not for the
# following reason:
# Currently there is no way of enforcing an order of running the
# `SessionRunHooks`. Hence it is possible that the `_DatasetInitializerHook`
# is run *after* this hook. That is troublesome because
# 1. If a checkpoint exists and this hook restores it, the initializer hook
# will override it.
# 2. If no checkpoint exists, this hook will try to save an initialized
# iterator which will result in an exception.
#
# As a temporary fix we enter the following implicit contract between this
# hook and the _DatasetInitializerHook.
# 1. The _DatasetInitializerHook initializes the iterator in the call to
# after_create_session.
# 2. This hook saves the iterator on the first call to `before_run()`, which
# is guaranteed to happen after `after_create_session()` of all hooks
# have been run.
# Check if there is an existing checkpoint. If so, restore from it.
# pylint: disable=protected-access
latest_checkpoint_path = checkpoint_management.latest_checkpoint(
self._checkpoint_saver_hook._checkpoint_dir,
latest_filename=self._latest_filename)
if latest_checkpoint_path:
self._checkpoint_saver_hook._get_saver().restore(session,
latest_checkpoint_path)
else:
# The checkpoint saved here is the state at step "global_step".
# Note: We do not save the GraphDef or MetaGraphDef here.
global_step = session.run(self._checkpoint_saver_hook._global_step_tensor)
self._checkpoint_saver_hook._save(session, global_step)
self._checkpoint_saver_hook._timer.update_last_triggered_step(global_step)
# pylint: enable=protected-access
def before_run(self, run_context):
if self._first_run:
self._restore_or_save_initial_ckpt(run_context.session)
self._first_run = False
return self._checkpoint_saver_hook.before_run(run_context)
def after_run(self, run_context, run_values):
self._checkpoint_saver_hook.after_run(run_context, run_values)
def end(self, session):
self._checkpoint_saver_hook.end(session)
class _CustomSaver(saver_lib.Saver):
"""`Saver` with a different default `latest_filename`.
This is used in the `CheckpointInputPipelineHook` to avoid conflicts with
the model ckpt saved by the `CheckpointSaverHook`.
"""
def __init__(self, var_list, latest_filename):
super(_CustomSaver, self).__init__(var_list)
self._latest_filename = latest_filename
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False):
return super(_CustomSaver, self).save(
sess, save_path, global_step, latest_filename or self._latest_filename,
meta_graph_suffix, write_meta_graph, write_state, strip_default_attrs)
tf_export("data.experimental.Optional")(optional_ops.Optional)
tf_export("data.experimental.get_next_as_optional")(
iterator_ops.get_next_as_optional)
| |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import textwrap
from typing import Iterable, Type
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.goals.setup_py import (
AmbiguousOwnerError,
DependencyOwner,
ExportedTarget,
ExportedTargetRequirements,
FirstPartyDependencyVersionScheme,
InvalidEntryPoint,
InvalidSetupPyArgs,
NoOwnerError,
OwnedDependencies,
OwnedDependency,
SetupKwargs,
SetupKwargsRequest,
SetupPyChroot,
SetupPyChrootRequest,
SetupPyGeneration,
SetupPySources,
SetupPySourcesRequest,
declares_pkg_resources_namespace_package,
determine_setup_kwargs,
distutils_repr,
generate_chroot,
get_exporting_owner,
get_owned_dependencies,
get_requirements,
get_sources,
validate_commands,
)
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import (
PexBinary,
PythonDistribution,
PythonLibrary,
PythonRequirementLibrary,
)
from pants.backend.python.util_rules import python_sources
from pants.core.target_types import Files, Resources
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import SubsystemRule, rule
from pants.engine.target import Targets
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import QueryRule, RuleRunner
_namespace_decl = "__import__('pkg_resources').declare_namespace(__name__)"
def create_setup_py_rule_runner(*, rules: Iterable) -> RuleRunner:
rule_runner = RuleRunner(
rules=rules,
target_types=[
PexBinary,
PythonDistribution,
PythonLibrary,
PythonRequirementLibrary,
Resources,
Files,
],
objects={"setup_py": PythonArtifact},
)
rule_runner.set_options([], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
return rule_runner
# We use a trivial test that our SetupKwargs plugin hook works.
class PluginSetupKwargsRequest(SetupKwargsRequest):
@classmethod
def is_applicable(cls, _) -> bool:
return True
@rule
def setup_kwargs_plugin(request: PluginSetupKwargsRequest) -> SetupKwargs:
return SetupKwargs(
{**request.explicit_kwargs, "plugin_demo": "hello world"}, address=request.target.address
)
@pytest.fixture
def chroot_rule_runner() -> RuleRunner:
return create_setup_py_rule_runner(
rules=[
determine_setup_kwargs,
generate_chroot,
get_sources,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
*python_sources.rules(),
*target_types_rules.rules(),
setup_kwargs_plugin,
SubsystemRule(SetupPyGeneration),
UnionRule(SetupKwargsRequest, PluginSetupKwargsRequest),
QueryRule(SetupPyChroot, (SetupPyChrootRequest,)),
]
)
def assert_chroot(
rule_runner: RuleRunner, expected_files, expected_setup_kwargs, addr: Address
) -> None:
tgt = rule_runner.get_target(addr)
chroot = rule_runner.request(
SetupPyChroot,
[SetupPyChrootRequest(ExportedTarget(tgt), py2=False)],
)
snapshot = rule_runner.request(Snapshot, [chroot.digest])
assert sorted(expected_files) == sorted(snapshot.files)
assert expected_setup_kwargs == chroot.setup_kwargs.kwargs
def assert_chroot_error(rule_runner: RuleRunner, addr: Address, exc_cls: Type[Exception]) -> None:
tgt = rule_runner.get_target(addr)
with pytest.raises(ExecutionError) as excinfo:
rule_runner.request(
SetupPyChroot,
[SetupPyChrootRequest(ExportedTarget(tgt), py2=False)],
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def test_generate_chroot(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
textwrap.dedent(
"""
python_distribution(
name="baz-dist",
dependencies=[':baz'],
provides=setup_py(
name='baz',
version='1.1.1'
)
)
python_library()
"""
),
)
chroot_rule_runner.create_file("src/python/foo/bar/baz/baz.py")
chroot_rule_runner.add_to_build_file(
"src/python/foo/qux",
textwrap.dedent(
"""
python_library()
pex_binary(name="bin", entry_point="foo.qux.bin:main")
"""
),
)
chroot_rule_runner.create_file("src/python/foo/qux/__init__.py")
chroot_rule_runner.create_file("src/python/foo/qux/qux.py")
# Add a `.pyi` stub file to ensure we include it in the final result.
chroot_rule_runner.create_file("src/python/foo/qux/qux.pyi")
chroot_rule_runner.add_to_build_file(
"src/python/foo/resources", 'resources(sources=["js/code.js"])'
)
chroot_rule_runner.create_file("src/python/foo/resources/js/code.js")
chroot_rule_runner.add_to_build_file("files", 'files(sources=["README.txt"])')
chroot_rule_runner.create_file("files/README.txt")
chroot_rule_runner.add_to_build_file(
"src/python/foo",
textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[
':foo',
],
provides=setup_py(
name='foo', version='1.2.3'
).with_binaries(
foo_main='src/python/foo/qux:bin'
)
)
python_library(
dependencies=[
'src/python/foo/bar/baz',
'src/python/foo/qux',
'src/python/foo/resources',
'files',
]
)
"""
),
)
chroot_rule_runner.create_file("src/python/foo/__init__.py", _namespace_decl)
chroot_rule_runner.create_file("src/python/foo/foo.py")
assert_chroot(
chroot_rule_runner,
[
"src/files/README.txt",
"src/foo/qux/__init__.py",
"src/foo/qux/qux.py",
"src/foo/qux/qux.pyi",
"src/foo/resources/js/code.js",
"src/foo/__init__.py",
"src/foo/foo.py",
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"plugin_demo": "hello world",
"package_dir": {"": "src"},
"packages": ("foo", "foo.qux"),
"namespace_packages": ("foo",),
"package_data": {"foo": ("resources/js/code.js",)},
"install_requires": ("baz==1.1.1",),
"entry_points": {"console_scripts": ["foo_main=foo.qux.bin:main"]},
},
Address("src/python/foo", target_name="foo-dist"),
)
def test_invalid_binary(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.create_files("src/python/invalid_binary", ["app1.py", "app2.py"])
chroot_rule_runner.add_to_build_file(
"src/python/invalid_binary",
textwrap.dedent(
"""
python_library(name='not_a_binary', sources=[])
pex_binary(name='invalid_entrypoint_unowned1', entry_point='app1.py')
pex_binary(name='invalid_entrypoint_unowned2', entry_point='invalid_binary.app2')
python_distribution(
name='invalid_bin1',
provides=setup_py(
name='invalid_bin1', version='1.1.1'
).with_binaries(foo=':not_a_binary')
)
python_distribution(
name='invalid_bin2',
provides=setup_py(
name='invalid_bin2', version='1.1.1'
).with_binaries(foo=':invalid_entrypoint_unowned1')
)
python_distribution(
name='invalid_bin3',
provides=setup_py(
name='invalid_bin3', version='1.1.1'
).with_binaries(foo=':invalid_entrypoint_unowned2')
)
"""
),
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin1"),
InvalidEntryPoint,
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin2"),
InvalidEntryPoint,
)
assert_chroot_error(
chroot_rule_runner,
Address("src/python/invalid_binary", target_name="invalid_bin3"),
InvalidEntryPoint,
)
def test_binary_shorthand(chroot_rule_runner: RuleRunner) -> None:
chroot_rule_runner.create_file("src/python/project/app.py")
chroot_rule_runner.add_to_build_file(
"src/python/project",
textwrap.dedent(
"""
python_library()
pex_binary(name='bin', entry_point='app.py:func')
python_distribution(
name='dist',
provides=setup_py(
name='bin', version='1.1.1'
).with_binaries(foo=':bin')
)
"""
),
)
assert_chroot(
chroot_rule_runner,
["src/project/app.py", "setup.py", "MANIFEST.in"],
{
"name": "bin",
"version": "1.1.1",
"plugin_demo": "hello world",
"package_dir": {"": "src"},
"packages": ("project",),
"namespace_packages": (),
"install_requires": (),
"package_data": {},
"entry_points": {"console_scripts": ["foo=project.app:func"]},
},
Address("src/python/project", target_name="dist"),
)
def test_get_sources() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
get_sources,
*python_sources.rules(),
QueryRule(SetupPySources, (SetupPySourcesRequest,)),
]
)
rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
textwrap.dedent(
"""
python_library(name='baz1', sources=['baz1.py'])
python_library(name='baz2', sources=['baz2.py'])
"""
),
)
rule_runner.create_file("src/python/foo/bar/baz/baz1.py")
rule_runner.create_file("src/python/foo/bar/baz/baz2.py")
rule_runner.create_file("src/python/foo/bar/__init__.py", _namespace_decl)
rule_runner.add_to_build_file("src/python/foo/qux", "python_library()")
rule_runner.create_file("src/python/foo/qux/__init__.py")
rule_runner.create_file("src/python/foo/qux/qux.py")
rule_runner.add_to_build_file("src/python/foo/resources", 'resources(sources=["js/code.js"])')
rule_runner.create_file("src/python/foo/resources/js/code.js")
rule_runner.create_file("src/python/foo/__init__.py")
def assert_sources(
expected_files,
expected_packages,
expected_namespace_packages,
expected_package_data,
addrs,
):
targets = Targets(rule_runner.get_target(addr) for addr in addrs)
srcs = rule_runner.request(
SetupPySources,
[SetupPySourcesRequest(targets, py2=False)],
)
chroot_snapshot = rule_runner.request(Snapshot, [srcs.digest])
assert sorted(expected_files) == sorted(chroot_snapshot.files)
assert sorted(expected_packages) == sorted(srcs.packages)
assert sorted(expected_namespace_packages) == sorted(srcs.namespace_packages)
assert expected_package_data == dict(srcs.package_data)
assert_sources(
expected_files=["foo/bar/baz/baz1.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=[Address("src/python/foo/bar/baz", target_name="baz1")],
)
assert_sources(
expected_files=["foo/bar/baz/baz2.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=[Address("src/python/foo/bar/baz", target_name="baz2")],
)
assert_sources(
expected_files=["foo/qux/qux.py", "foo/qux/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.qux"],
expected_namespace_packages=[],
expected_package_data={},
addrs=[Address("src/python/foo/qux")],
)
assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=[
Address("src/python/foo/bar/baz", target_name="baz1"),
Address("src/python/foo/qux"),
Address("src/python/foo/resources"),
],
)
assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/baz/baz2.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=[
Address("src/python/foo/bar/baz", target_name="baz1"),
Address("src/python/foo/bar/baz", target_name="baz2"),
Address("src/python/foo/qux"),
Address("src/python/foo/resources"),
],
)
def test_get_requirements() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
SubsystemRule(SetupPyGeneration),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.add_to_build_file(
"3rdparty",
textwrap.dedent(
"""
python_requirement_library(
name='ext1',
requirements=['ext1==1.22.333'],
)
python_requirement_library(
name='ext2',
requirements=['ext2==4.5.6'],
)
python_requirement_library(
name='ext3',
requirements=['ext3==0.0.1'],
)
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
"python_library(dependencies=['3rdparty:ext1'], sources=[])",
)
rule_runner.add_to_build_file(
"src/python/foo/bar/qux",
"python_library(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'], sources=[])",
)
rule_runner.add_to_build_file(
"src/python/foo/bar",
textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=[':bar'],
provides=setup_py(name='bar', version='9.8.7'),
)
python_library(
sources=[],
dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'],
)
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo/corge",
textwrap.dedent(
"""
python_distribution(
name='corge-dist',
# Tests having a 3rdparty requirement directly on a python_distribution.
dependencies=[':corge', '3rdparty:ext3'],
provides=setup_py(name='corge', version='2.2.2'),
)
python_library(
sources=[],
dependencies=['src/python/foo/bar'],
)
"""
),
)
assert_requirements(
rule_runner,
["ext1==1.22.333", "ext2==4.5.6"],
Address("src/python/foo/bar", target_name="bar-dist"),
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar==9.8.7"],
Address("src/python/foo/corge", target_name="corge-dist"),
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar~=9.8.7"],
Address("src/python/foo/corge", target_name="corge-dist"),
version_scheme=FirstPartyDependencyVersionScheme.COMPATIBLE,
)
assert_requirements(
rule_runner,
["ext3==0.0.1", "bar"],
Address("src/python/foo/corge", target_name="corge-dist"),
version_scheme=FirstPartyDependencyVersionScheme.ANY,
)
def test_get_requirements_with_exclude() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
determine_setup_kwargs,
get_requirements,
get_owned_dependencies,
get_exporting_owner,
SubsystemRule(SetupPyGeneration),
QueryRule(ExportedTargetRequirements, (DependencyOwner,)),
]
)
rule_runner.add_to_build_file(
"3rdparty",
textwrap.dedent(
"""
python_requirement_library(
name='ext1',
requirements=['ext1==1.22.333'],
)
python_requirement_library(
name='ext2',
requirements=['ext2==4.5.6'],
)
python_requirement_library(
name='ext3',
requirements=['ext3==0.0.1'],
)
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
"python_library(dependencies=['3rdparty:ext1'], sources=[])",
)
rule_runner.add_to_build_file(
"src/python/foo/bar/qux",
"python_library(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'], sources=[])",
)
rule_runner.add_to_build_file(
"src/python/foo/bar",
textwrap.dedent(
"""
python_distribution(
name='bar-dist',
dependencies=['!!3rdparty:ext2',':bar'],
provides=setup_py(name='bar', version='9.8.7'),
)
python_library(
sources=[],
dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'],
)
"""
),
)
assert_requirements(
rule_runner, ["ext1==1.22.333"], Address("src/python/foo/bar", target_name="bar-dist")
)
def assert_requirements(
rule_runner,
expected_req_strs,
addr: Address,
*,
version_scheme: FirstPartyDependencyVersionScheme = FirstPartyDependencyVersionScheme.EXACT,
):
rule_runner.set_options(
[f"--setup-py-generation-first-party-dependency-version-scheme={version_scheme.value}"],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
tgt = rule_runner.get_target(addr)
reqs = rule_runner.request(
ExportedTargetRequirements,
[DependencyOwner(ExportedTarget(tgt))],
)
assert sorted(expected_req_strs) == list(reqs)
def test_owned_dependencies() -> None:
rule_runner = create_setup_py_rule_runner(
rules=[
get_owned_dependencies,
get_exporting_owner,
QueryRule(OwnedDependencies, (DependencyOwner,)),
]
)
rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
textwrap.dedent(
"""
python_library(name='baz1', sources=[])
python_library(name='baz2', sources=[])
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo/bar",
textwrap.dedent(
"""
python_distribution(
name='bar1-dist',
dependencies=[':bar1'],
provides=setup_py(name='bar1', version='1.1.1'),
)
python_library(
name='bar1',
sources=[],
dependencies=['src/python/foo/bar/baz:baz1'],
)
python_library(
name='bar2',
sources=[],
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resources(name='bar-resources', sources=[])
"""
),
)
rule_runner.add_to_build_file(
"src/python/foo",
textwrap.dedent(
"""
python_distribution(
name='foo-dist',
dependencies=[':foo'],
provides=setup_py(name='foo', version='3.4.5'),
)
python_library(
sources=[],
dependencies=['src/python/foo/bar:bar1', 'src/python/foo/bar:bar2'],
)
"""
),
)
def assert_owned(owned: Iterable[str], exported: Address):
tgt = rule_runner.get_target(exported)
assert sorted(owned) == sorted(
od.target.address.spec
for od in rule_runner.request(
OwnedDependencies,
[DependencyOwner(ExportedTarget(tgt))],
)
)
assert_owned(
["src/python/foo/bar:bar1", "src/python/foo/bar:bar1-dist", "src/python/foo/bar/baz:baz1"],
Address("src/python/foo/bar", target_name="bar1-dist"),
)
assert_owned(
[
"src/python/foo",
"src/python/foo:foo-dist",
"src/python/foo/bar:bar2",
"src/python/foo/bar:bar-resources",
"src/python/foo/bar/baz:baz2",
],
Address("src/python/foo", target_name="foo-dist"),
)
@pytest.fixture
def exporting_owner_rule_runner() -> RuleRunner:
return create_setup_py_rule_runner(
rules=[
get_exporting_owner,
QueryRule(ExportedTarget, (OwnedDependency,)),
]
)
def assert_is_owner(rule_runner: RuleRunner, owner: str, owned: Address):
tgt = rule_runner.get_target(owned)
assert (
owner
== rule_runner.request(
ExportedTarget,
[OwnedDependency(tgt)],
).target.address.spec
)
def assert_owner_error(rule_runner, owned: Address, exc_cls: Type[Exception]):
tgt = rule_runner.get_target(owned)
with pytest.raises(ExecutionError) as excinfo:
rule_runner.request(
ExportedTarget,
[OwnedDependency(tgt)],
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def assert_no_owner(rule_runner: RuleRunner, owned: Address):
assert_owner_error(rule_runner, owned, NoOwnerError)
def assert_ambiguous_owner(rule_runner: RuleRunner, owned: Address):
assert_owner_error(rule_runner, owned, AmbiguousOwnerError)
def test_get_owner_simple(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.add_to_build_file(
"src/python/foo/bar/baz",
textwrap.dedent(
"""
python_library(name='baz1', sources=[])
python_library(name='baz2', sources=[])
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/foo/bar",
textwrap.dedent(
"""
python_distribution(
name='bar1',
dependencies=['src/python/foo/bar/baz:baz1'],
provides=setup_py(name='bar1', version='1.1.1'),
)
python_library(
name='bar2',
sources=[],
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resources(name='bar-resources', sources=[])
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/foo",
textwrap.dedent(
"""
python_distribution(
name='foo1',
dependencies=['src/python/foo/bar/baz:baz2'],
provides=setup_py(name='foo1', version='0.1.2'),
)
python_library(name='foo2', sources=[])
python_distribution(
name='foo3',
dependencies=['src/python/foo/bar:bar2'],
provides=setup_py(name='foo3', version='3.4.5'),
)
"""
),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo/bar:bar1",
Address("src/python/foo/bar", target_name="bar1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo/bar:bar1",
Address("src/python/foo/bar/baz", target_name="baz1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo1",
Address("src/python/foo", target_name="foo1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo", target_name="foo3"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo/bar", target_name="bar2"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/foo:foo3",
Address("src/python/foo/bar", target_name="bar-resources"),
)
assert_no_owner(exporting_owner_rule_runner, Address("src/python/foo", target_name="foo2"))
assert_ambiguous_owner(
exporting_owner_rule_runner, Address("src/python/foo/bar/baz", target_name="baz2")
)
def test_get_owner_siblings(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.add_to_build_file(
"src/python/siblings",
textwrap.dedent(
"""
python_library(name='sibling1', sources=[])
python_distribution(
name='sibling2',
dependencies=['src/python/siblings:sibling1'],
provides=setup_py(name='siblings', version='2.2.2'),
)
"""
),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/siblings:sibling2",
Address("src/python/siblings", target_name="sibling1"),
)
assert_is_owner(
exporting_owner_rule_runner,
"src/python/siblings:sibling2",
Address("src/python/siblings", target_name="sibling2"),
)
def test_get_owner_not_an_ancestor(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.add_to_build_file(
"src/python/notanancestor/aaa",
textwrap.dedent(
"""
python_library(name='aaa', sources=[])
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/notanancestor/bbb",
textwrap.dedent(
"""
python_distribution(
name='bbb',
dependencies=['src/python/notanancestor/aaa'],
provides=setup_py(name='bbb', version='11.22.33'),
)
"""
),
)
assert_no_owner(exporting_owner_rule_runner, Address("src/python/notanancestor/aaa"))
assert_is_owner(
exporting_owner_rule_runner,
"src/python/notanancestor/bbb",
Address("src/python/notanancestor/bbb"),
)
def test_get_owner_multiple_ancestor_generations(exporting_owner_rule_runner: RuleRunner) -> None:
exporting_owner_rule_runner.add_to_build_file(
"src/python/aaa/bbb/ccc",
textwrap.dedent(
"""
python_library(name='ccc', sources=[])
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/aaa/bbb",
textwrap.dedent(
"""
python_distribution(
name='bbb',
dependencies=['src/python/aaa/bbb/ccc'],
provides=setup_py(name='bbb', version='1.1.1'),
)
"""
),
)
exporting_owner_rule_runner.add_to_build_file(
"src/python/aaa",
textwrap.dedent(
"""
python_distribution(
name='aaa',
dependencies=['src/python/aaa/bbb/ccc'],
provides=setup_py(name='aaa', version='2.2.2'),
)
"""
),
)
assert_is_owner(
exporting_owner_rule_runner, "src/python/aaa/bbb", Address("src/python/aaa/bbb/ccc")
)
assert_is_owner(
exporting_owner_rule_runner, "src/python/aaa/bbb", Address("src/python/aaa/bbb")
)
assert_is_owner(exporting_owner_rule_runner, "src/python/aaa", Address("src/python/aaa"))
def test_validate_args() -> None:
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("bdist_wheel", "upload"))
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("sdist", "-d", "new_distdir/"))
with pytest.raises(InvalidSetupPyArgs):
validate_commands(("--dist-dir", "new_distdir/", "sdist"))
validate_commands(("sdist",))
validate_commands(("bdist_wheel", "--foo"))
def test_distutils_repr() -> None:
testdata = {
"foo": "bar",
"baz": {"qux": [123, 456], "quux": ("abc", b"xyz"), "corge": {1, 2, 3}},
"various_strings": ["x'y", "aaa\nbbb"],
}
expected = """
{
'foo': 'bar',
'baz': {
'qux': [
123,
456,
],
'quux': (
'abc',
'xyz',
),
'corge': {
1,
2,
3,
},
},
'various_strings': [
'x\\\'y',
\"\"\"aaa\nbbb\"\"\",
],
}
""".strip()
assert expected == distutils_repr(testdata)
@pytest.mark.parametrize(
"python_src",
[
"__import__('pkg_resources').declare_namespace(__name__)",
"\n__import__('pkg_resources').declare_namespace(__name__) # type: ignore[attr-defined]",
"import pkg_resources; pkg_resources.declare_namespace(__name__)",
"from pkg_resources import declare_namespace; declare_namespace(__name__)",
],
)
def test_declares_pkg_resources_namespace_package(python_src: str) -> None:
assert declares_pkg_resources_namespace_package(python_src)
@pytest.mark.parametrize(
"python_src",
[
"",
"import os\n\nos.getcwd()",
"__path__ = 'foo'",
"import pkg_resources",
"add(1, 2); foo(__name__); self.shoot(__name__)",
"declare_namespace(bonk)",
"just nonsense, not even parseable",
],
)
def test_does_not_declare_pkg_resources_namespace_package(python_src: str) -> None:
assert not declares_pkg_resources_namespace_package(python_src)
| |
import timeit
from abc import abstractmethod, ABCMeta
from collections import namedtuple, OrderedDict
import inspect
from numba.core.compiler_lock import global_compiler_lock
from numba.core import errors, config, transforms, utils
from numba.core.tracing import event
from numba.core.postproc import PostProcessor
from numba.core.ir_utils import enforce_no_dels, legalize_single_scope
# terminal color markup
_termcolor = errors.termcolor()
class SimpleTimer(object):
"""
A simple context managed timer
"""
def __enter__(self):
self.ts = timeit.default_timer()
return self
def __exit__(self, *exc):
self.elapsed = timeit.default_timer() - self.ts
class CompilerPass(metaclass=ABCMeta):
""" The base class for all compiler passes.
"""
@abstractmethod
def __init__(self, *args, **kwargs):
self._analysis = None
self._pass_id = None
@classmethod
def name(cls):
"""
Returns the name of the pass
"""
return cls._name
@property
def pass_id(self):
"""
The ID of the pass
"""
return self._pass_id
@pass_id.setter
def pass_id(self, val):
"""
Sets the ID of the pass
"""
self._pass_id = val
@property
def analysis(self):
"""
Analysis data for the pass
"""
return self._analysis
@analysis.setter
def analysis(self, val):
"""
Set the analysis data for the pass
"""
self._analysis = val
def run_initialization(self, *args, **kwargs):
"""
Runs the initialization sequence for the pass, will run before
`run_pass`.
"""
return False
@abstractmethod
def run_pass(self, *args, **kwargs):
"""
Runs the pass itself. Must return True/False depending on whether
statement level modification took place.
"""
pass
def run_finalizer(self, *args, **kwargs):
"""
Runs the initialization sequence for the pass, will run before
`run_pass`.
"""
return False
def get_analysis_usage(self, AU):
""" Override to set analysis usage
"""
pass
def get_analysis(self, pass_name):
"""
Gets the analysis from a given pass
"""
return self._analysis[pass_name]
class SSACompliantMixin(object):
""" Mixin to indicate a pass is SSA form compliant. Nothing is asserted
about this condition at present.
"""
pass
class FunctionPass(CompilerPass):
""" Base class for function passes
"""
pass
class AnalysisPass(CompilerPass):
""" Base class for analysis passes (no modification made to state)
"""
pass
class LoweringPass(CompilerPass):
""" Base class for lowering passes
"""
pass
class AnalysisUsage(object):
"""This looks and behaves like LLVM's AnalysisUsage because its like that.
"""
def __init__(self):
self._required = set()
self._preserved = set()
def get_required_set(self):
return self._required
def get_preserved_set(self):
return self._preserved
def add_required(self, pss):
self._required.add(pss)
def add_preserved(self, pss):
self._preserved.add(pss)
def __str__(self):
return "required: %s\n" % self._required
_DEBUG = False
def debug_print(*args, **kwargs):
if _DEBUG:
print(*args, **kwargs)
pass_timings = namedtuple('pass_timings', 'init run finalize')
class PassManager(object):
"""
The PassManager is a named instance of a particular compilation pipeline
"""
# TODO: Eventually enable this, it enforces self consistency after each pass
_ENFORCING = False
def __init__(self, pipeline_name):
"""
Create a new pipeline with name "pipeline_name"
"""
self.passes = []
self.exec_times = OrderedDict()
self._finalized = False
self._analysis = None
self._print_after = None
self.pipeline_name = pipeline_name
def _validate_pass(self, pass_cls):
if (not (isinstance(pass_cls, str) or
(inspect.isclass(pass_cls) and
issubclass(pass_cls, CompilerPass)))):
msg = ("Pass must be referenced by name or be a subclass of a "
"CompilerPass. Have %s" % pass_cls)
raise TypeError(msg)
if isinstance(pass_cls, str):
pass_cls = _pass_registry.find_by_name(pass_cls)
else:
if not _pass_registry.is_registered(pass_cls):
raise ValueError("Pass %s is not registered" % pass_cls)
def add_pass(self, pss, description=""):
"""
Append a pass to the PassManager's compilation pipeline
"""
self._validate_pass(pss)
func_desc_tuple = (pss, description)
self.passes.append(func_desc_tuple)
self._finalized = False
def add_pass_after(self, pass_cls, location):
"""
Add a pass `pass_cls` to the PassManager's compilation pipeline after
the pass `location`.
"""
assert self.passes
self._validate_pass(pass_cls)
self._validate_pass(location)
for idx, (x, _) in enumerate(self.passes):
if x == location:
break
else:
raise ValueError("Could not find pass %s" % location)
self.passes.insert(idx + 1, (pass_cls, str(pass_cls)))
# if a pass has been added, it's not finalized
self._finalized = False
def _debug_init(self):
# determine after which passes IR dumps should take place
def parse(conf_item):
print_passes = []
if conf_item != "none":
if conf_item == "all":
print_passes = [x.name() for (x, _) in self.passes]
else:
# we don't validate whether the named passes exist in this
# pipeline the compiler may be used reentrantly and
# different pipelines may contain different passes
splitted = conf_item.split(',')
print_passes = [x.strip() for x in splitted]
return print_passes
ret = (parse(config.DEBUG_PRINT_AFTER),
parse(config.DEBUG_PRINT_BEFORE),
parse(config.DEBUG_PRINT_WRAP),)
return ret
def finalize(self):
"""
Finalize the PassManager, after which no more passes may be added
without re-finalization.
"""
self._analysis = self.dependency_analysis()
self._print_after, self._print_before, self._print_wrap = \
self._debug_init()
self._finalized = True
@property
def finalized(self):
return self._finalized
def _patch_error(self, desc, exc):
"""
Patches the error to show the stage that it arose in.
"""
newmsg = "{desc}\n{exc}".format(desc=desc, exc=exc)
exc.args = (newmsg,)
return exc
@global_compiler_lock # this need a lock, likely calls LLVM
def _runPass(self, index, pss, internal_state):
mutated = False
def check(func, compiler_state):
mangled = func(compiler_state)
if mangled not in (True, False):
msg = ("CompilerPass implementations should return True/False. "
"CompilerPass with name '%s' did not.")
raise ValueError(msg % pss.name())
return mangled
def debug_print(pass_name, print_condition, printable_condition):
if pass_name in print_condition:
fid = internal_state.func_id
args = (fid.modname, fid.func_qualname, self.pipeline_name,
printable_condition, pass_name)
print(("%s.%s: %s: %s %s" % args).center(120, '-'))
if internal_state.func_ir is not None:
internal_state.func_ir.dump()
else:
print("func_ir is None")
# debug print before this pass?
debug_print(pss.name(), self._print_before + self._print_wrap, "BEFORE")
# wire in the analysis info so it's accessible
pss.analysis = self._analysis
with SimpleTimer() as init_time:
mutated |= check(pss.run_initialization, internal_state)
with SimpleTimer() as pass_time:
mutated |= check(pss.run_pass, internal_state)
with SimpleTimer() as finalize_time:
mutated |= check(pss.run_finalizer, internal_state)
# Check that if the pass is an instance of a FunctionPass that it hasn't
# emitted ir.Dels.
if isinstance(pss, FunctionPass):
enforce_no_dels(internal_state.func_ir)
if self._ENFORCING:
# TODO: Add in self consistency enforcement for
# `func_ir._definitions` etc
if _pass_registry.get(pss.__class__).mutates_CFG:
if mutated: # block level changes, rebuild all
PostProcessor(internal_state.func_ir).run()
else: # CFG level changes rebuild CFG
internal_state.func_ir.blocks = transforms.canonicalize_cfg(
internal_state.func_ir.blocks)
# Check the func_ir has exactly one Scope instance
if not legalize_single_scope(internal_state.func_ir.blocks):
raise errors.CompilerError(
f"multiple scope in func_ir detected in {pss}",
)
# inject runtimes
pt = pass_timings(init_time.elapsed, pass_time.elapsed,
finalize_time.elapsed)
self.exec_times["%s_%s" % (index, pss.name())] = pt
# debug print after this pass?
debug_print(pss.name(), self._print_after + self._print_wrap, "AFTER")
def run(self, state):
"""
Run the defined pipelines on the state.
"""
from numba.core.compiler import _EarlyPipelineCompletion
if not self.finalized:
raise RuntimeError("Cannot run non-finalised pipeline")
# walk the passes and run them
for idx, (pss, pass_desc) in enumerate(self.passes):
try:
event("-- %s" % pass_desc)
pass_inst = _pass_registry.get(pss).pass_inst
if isinstance(pass_inst, CompilerPass):
self._runPass(idx, pass_inst, state)
else:
raise BaseException("Legacy pass in use")
except _EarlyPipelineCompletion as e:
raise e
except Exception as e:
if (utils.use_new_style_errors() and not
isinstance(e, errors.NumbaError)):
raise e
msg = "Failed in %s mode pipeline (step: %s)" % \
(self.pipeline_name, pass_desc)
patched_exception = self._patch_error(msg, e)
raise patched_exception
def dependency_analysis(self):
"""
Computes dependency analysis
"""
deps = dict()
for (pss, _) in self.passes:
x = _pass_registry.get(pss).pass_inst
au = AnalysisUsage()
x.get_analysis_usage(au)
deps[type(x)] = au
requires_map = dict()
for k, v in deps.items():
requires_map[k] = v.get_required_set()
def resolve_requires(key, rmap):
def walk(lkey, rmap):
dep_set = rmap[lkey] if lkey in rmap else set()
if dep_set:
for x in dep_set:
dep_set |= (walk(x, rmap))
return dep_set
else:
return set()
ret = set()
for k in key:
ret |= walk(k, rmap)
return ret
dep_chain = dict()
for k, v in requires_map.items():
dep_chain[k] = set(v) | (resolve_requires(v, requires_map))
return dep_chain
pass_info = namedtuple('pass_info', 'pass_inst mutates_CFG analysis_only')
class PassRegistry(object):
"""
Pass registry singleton class.
"""
_id = 0
_registry = dict()
def register(self, mutates_CFG, analysis_only):
def make_festive(pass_class):
assert not self.is_registered(pass_class)
assert not self._does_pass_name_alias(pass_class.name())
pass_class.pass_id = self._id
self._id += 1
self._registry[pass_class] = pass_info(pass_class(), mutates_CFG,
analysis_only)
return pass_class
return make_festive
def is_registered(self, clazz):
return clazz in self._registry.keys()
def get(self, clazz):
assert self.is_registered(clazz)
return self._registry[clazz]
def _does_pass_name_alias(self, check):
for k, v in self._registry.items():
if v.pass_inst.name == check:
return True
return False
def find_by_name(self, class_name):
assert isinstance(class_name, str)
for k, v in self._registry.items():
if v.pass_inst.name == class_name:
return v
else:
raise ValueError("No pass with name %s is registered" % class_name)
def dump(self):
for k, v in self._registry.items():
print("%s: %s" % (k, v))
_pass_registry = PassRegistry()
del PassRegistry
"""
register_pass is used to register a compiler pass class for use with PassManager
instances.
"""
register_pass = _pass_registry.register
| |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Parse naming definition files.
Network access control applications use definition files which contain
information about networks and services. This naming class
will provide an easy interface into using these definitions.
Sample usage with definition files contained in ./acl/defs:
defs = Naming('acl/defs/')
services = defs.GetService('DNS')
returns ['53/tcp', '53/udp', ...]
networks = defs.GetNet('INTERNAL')
returns a list of nacaddr.IPv4 object
The definition files are contained in a single directory and
may consist of multiple files ending in .net or .svc extensions,
indicating network or service definitions respectively. The
format of the files consists of a 'token' value, followed by a
list of values and optional comments, such as:
INTERNAL = 10.0.0.0/8 # RFC-1918
172.16.0.0/12 # RFC-1918
192.168.0.0/16 # RFC-1918
or
DNS = 53/tcp
53/udp
"""
import glob
import os
import re
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import port as portlib
class Error(Exception):
"""Create our own base error class to be inherited by other error classes."""
class NamespaceCollisionError(Error):
"""Used to report on duplicate symbol names found while parsing."""
class BadNetmaskTypeError(Error):
"""Used to report on duplicate symbol names found while parsing."""
class NoDefinitionsError(Error):
"""Raised if no definitions are found."""
class ParseError(Error):
"""Raised if an error occurs during parsing."""
class UndefinedAddressError(Error):
"""Raised if an address is referenced but not defined."""
class UndefinedServiceError(Error):
"""Raised if a service is referenced but not defined."""
class UndefinedPortError(Error):
"""Raised if a port/protocol pair has not been defined."""
class UnexpectedDefinitionTypeError(Error):
"""An unexpected/unknown definition type was used."""
class NamingSyntaxError(Error):
"""A general syntax error for the definition."""
class _ItemUnit:
"""This class is a container for an index key and a list of associated values.
An ItemUnit will contain the name of either a service or network group,
and a list of the associated values separated by spaces.
Attributes:
name: A string representing a unique token value.
items: a list of strings containing values for the token.
"""
def __init__(self, symbol):
self.name = symbol
self.items = []
class Naming:
"""Object to hold naming objects from NETWORK and SERVICES definition files.
Attributes:
current_symbol: The current token being handled while parsing data.
services: A collection of all of the current service item tokens.
networks: A collection of all the current network item tokens.
unseen_services: Undefined service entries.
unseen_networks: Undefined network entries.
port_re: Regular Expression matching valid port entries.
"""
def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
"""Set the default values for a new Naming object."""
self.current_symbol = None
self.services = {}
self.networks = {}
self.unseen_services = {}
self.unseen_networks = {}
self.port_re = re.compile(r'(^\d+-\d+|^\d+)\/\w+$|^[\w\d-]+$',
re.IGNORECASE | re.DOTALL)
self.token_re = re.compile(r'(^[-_A-Z0-9]+$)', re.IGNORECASE)
if naming_file and naming_type:
filename = os.path.sep.join([naming_dir, naming_file])
with open(filename, 'r') as file_handle:
self._ParseFile(file_handle, naming_type)
elif naming_dir:
self._Parse(naming_dir, 'services')
self._CheckUnseen('services')
self._Parse(naming_dir, 'networks')
self._CheckUnseen('networks')
def _CheckUnseen(self, def_type):
if def_type == 'services':
if self.unseen_services:
raise UndefinedServiceError('%s %s' % (
'The following tokens were nested as a values, but not defined',
self.unseen_services))
if def_type == 'networks':
if self.unseen_networks:
raise UndefinedAddressError('%s %s' % (
'The following tokens were nested as a values, but not defined',
self.unseen_networks))
def GetIpParents(self, query):
"""Return network tokens that contain IP in query.
Args:
query: an ip string ('10.1.1.1') or nacaddr.IP object
Returns:
A sorted list of unique parent tokens.
"""
base_parents = []
recursive_parents = []
# convert string to nacaddr, if arg is ipaddr then convert str() to nacaddr
if (not isinstance(query, nacaddr.IPv4) and
not isinstance(query, nacaddr.IPv6)):
if query[:1].isdigit():
query = nacaddr.IP(query)
# Get parent token for an IP
if isinstance(query, nacaddr.IPv4) or isinstance(query, nacaddr.IPv6):
for token in self.networks:
for item in self.networks[token].items:
item = item.split('#')[0].strip()
if not item[:1].isdigit():
continue
try:
supernet = nacaddr.IP(item, strict=False)
if supernet.supernet_of(query):
base_parents.append(token)
except ValueError:
# item was not an IP
pass
# Get parent token for another token
else:
for token in self.networks:
for item in self.networks[token].items:
item = item.split('#')[0].strip()
if item[:1].isalpha() and item == query:
base_parents.append(token)
# look for nested tokens
for bp in base_parents:
done = False
for token in self.networks:
if bp in [item.split('#')[0].strip() for item in
self.networks[token].items]:
# ignore IPs, only look at token values
if bp[:1].isalpha():
if bp not in recursive_parents:
recursive_parents.append(bp)
recursive_parents.extend(self.GetIpParents(bp))
done = True
# if no nested tokens, just append value
if not done:
if bp[:1].isalpha() and bp not in recursive_parents:
recursive_parents.append(bp)
return sorted(list(set(recursive_parents)))
def GetServiceParents(self, query):
"""Given a query token, return list of services definitions with that token.
Args:
query: a service token name.
Returns:
List of service definitions containing the token.
"""
return self._GetParents(query, self.services)
def GetNetParents(self, query):
"""Given a query token, return list of network definitions with that token.
Args:
query: a network token name.
Returns:
A list of network definitions containing the token.
"""
return self._GetParents(query, self.networks)
def _GetParents(self, query, query_group):
"""Given a naming item dict, return any tokens containing the value.
Args:
query: a service or token name, such as 53/tcp or DNS
query_group: either services or networks dict
Returns:
Returns a list of definitions containing the token in desired group.
"""
base_parents = []
recursive_parents = []
# collect list of tokens containing query
for token in query_group:
if query in [item.split('#')[0].strip() for item in
query_group[token].items]:
base_parents.append(token)
if not base_parents:
return []
# iterate through tokens containing query, doing recursion if necessary
for bp in base_parents:
for token in query_group:
if bp in query_group[token].items and bp not in recursive_parents:
recursive_parents.append(bp)
recursive_parents.extend(self._GetParents(bp, query_group))
if bp not in recursive_parents:
recursive_parents.append(bp)
return recursive_parents
def GetNetChildren(self, query):
"""Given a query token, return list of network definitions tokens within provided token.
This will only return children, not descendants of provided token.
Args:
query: a network token name.
Returns:
A list of network definitions tokens within this token.
"""
return self._GetChildren(query, self.networks)
def _GetChildren(self, query, query_group):
"""Given a naming item dict, return tokens (not IPs) contained within this value.
Args:
query: a token name
query_group: networks dict
Returns:
Returns a list of definitions tokens within (children) target token.
"""
children = []
if query in query_group:
for item in query_group[query].items:
child = item.split('#')[0].strip()
# Determine if item a token, then it's a child
if not self._IsIpFormat(child):
children.append(child)
return children
def _IsIpFormat(self, item):
"""Helper function for _GetChildren to detect if string is IP format.
Args:
item: string either a IP or token.
Returns:
True if string is a IP
False if string is not a IP
"""
try:
item = item.strip()
nacaddr.IP(item, strict=False)
return True
except ValueError:
return False
def GetServiceNames(self):
"""Returns the list of all known service names."""
return list(self.services.keys())
def GetService(self, query):
"""Given a service name, return a list of associated ports and protocols.
Args:
query: Service name symbol or token.
Returns:
A list of service values such as ['80/tcp', '443/tcp', '161/udp', ...]
Raises:
UndefinedServiceError: If the service name isn't defined.
"""
expandset = set()
already_done = set()
data = []
service_name = ''
data = query.split('#') # Get the token keyword and remove any comment
service_name = data[0].split()[0] # strip and cast from list to string
if service_name not in self.services:
raise UndefinedServiceError('\nNo such service: %s' % query)
already_done.add(service_name)
for next_item in self.services[service_name].items:
# Remove any trailing comment.
service = next_item.split('#')[0].strip()
# Recognized token, not a value.
if '/' not in service:
# Make sure we are not descending into recursion hell.
if service not in already_done:
already_done.add(service)
try:
expandset.update(self.GetService(service))
except UndefinedServiceError as e:
# One of the services in query is undefined, refine the error msg.
raise UndefinedServiceError('%s (in %s)' % (e, query))
else:
expandset.add(service)
return sorted(expandset)
def GetPortParents(self, query, proto):
"""Returns a list of all service tokens containing the port/protocol pair.
Args:
query: port number ('22') as str
proto: protocol name ('tcp') as str
Returns:
A list of service tokens: ['SSH', 'HTTPS']
Raises:
UndefinedPortError: If the port/protocol pair isn't used in any
service tokens.
"""
# turn the given port and protocol into a PortProtocolPair object
given_ppp = portlib.PPP(query + '/' + proto)
base_parents = []
matches = set()
# check each service token to see if it's a PPP or a nested group.
# if it's a PPP, see if there's a match with given_ppp
# otherwise, add nested group to a list to recurisvely check later.
# if there's no match, do nothing.
for service_token in self.services:
for port_child in self.services[service_token].items:
ppp = portlib.PPP(port_child)
# check for exact match
if ppp.is_single_port and ppp == given_ppp:
matches.add(service_token)
# check if it's within ppp's port range
elif ppp.is_range and given_ppp in ppp:
matches.add(service_token)
# if it's a nested token, add to a list to recurisvely
# check later.
elif ppp.nested:
if service_token not in base_parents:
base_parents.append(service_token)
# break down the nested service tokens into PPP objects and check
# against given_ppp
for bp in base_parents:
for port_child in self.GetService(bp):
ppp = portlib.PPP(port_child)
# check for exact match
if ppp.is_single_port and ppp == given_ppp:
matches.add(bp)
# check if it's within ppp's port range
elif ppp.is_range and given_ppp in ppp:
matches.add(bp)
# error if the port/protocol pair is not found.
if not matches:
raise UndefinedPortError(
'%s/%s is not found in any service tokens' % (query, proto))
return sorted(matches)
def GetServiceByProto(self, query, proto):
"""Given a service name, return list of ports in the service by protocol.
Args:
query: Service name to lookup.
proto: A particular protocol to restrict results by, such as 'tcp'.
Returns:
A list of service values of type 'proto', such as ['80', '443', ...]
Raises:
UndefinedServiceError: If the service name isn't defined.
"""
services_set = set()
proto = proto.upper()
data = []
servicename = ''
data = query.split('#') # Get the token keyword and remove any comment
servicename = data[0].split()[0] # strip and cast from list to string
if servicename not in self.services:
raise UndefinedServiceError('%s %s' % ('\nNo such service,', servicename))
for service in self.GetService(servicename):
if service and '/' in service:
parts = service.split('/')
if parts[1].upper() == proto:
services_set.add(parts[0])
return sorted(services_set)
def GetNetAddr(self, token):
"""Given a network token, return a list of nacaddr.IPv4 or nacaddr.IPv6 objects.
Args:
token: A name of a network definition, such as 'INTERNAL'
Returns:
A list of nacaddr.IPv4 or nacaddr.IPv6 objects.
Raises:
UndefinedAddressError: if the network name isn't defined.
"""
return self.GetNet(token)
def GetNet(self, query):
"""Expand a network token into a list of nacaddr.IPv4 or nacaddr.IPv6 objects.
Args:
query: Network definition token which may include comment text
Raises:
BadNetmaskTypeError: Results when an unknown netmask_type is
specified. Acceptable values are 'cidr', 'netmask', and 'hostmask'.
Returns:
List of nacaddr.IPv4 or nacaddr.IPv6 objects
Raises:
UndefinedAddressError: for an undefined token value
"""
returnlist = []
data = []
token = ''
data = query.split('#') # Get the token keyword and remove any comment
token = data[0].split()[0] # Remove whitespace and cast from list to string
if token not in self.networks:
raise UndefinedAddressError('%s %s' % ('\nUNDEFINED:', str(token)))
for i in self.networks[token].items:
comment = ''
if i.find('#') > -1:
(net, comment) = i.split('#', 1)
else:
net = i
net = net.strip()
if self.token_re.match(net):
returnlist.extend(self.GetNet(net))
else:
try:
# TODO(robankeny): Fix using error to continue processing.
addr = nacaddr.IP(net, strict=False)
addr.text = comment.lstrip()
addr.token = token
returnlist.append(addr)
except ValueError:
# if net was something like 'FOO', or the name of another token which
# needs to be dereferenced, nacaddr.IP() will return a ValueError
returnlist.extend(self.GetNet(net))
for i in returnlist:
i.parent_token = token
return returnlist
def _Parse(self, defdirectory, def_type):
"""Parse files of a particular type for tokens and values.
Given a directory name and the type (services|networks) to
process, grab all the appropriate files in that directory
and parse them for definitions.
Args:
defdirectory: Path to directory containing definition files.
def_type: Type of definitions to parse
Raises:
NoDefinitionsError: if no definitions are found.
"""
file_names = []
get_files = {'services': lambda: glob.glob(defdirectory + '/*.svc'),
'networks': lambda: glob.glob(defdirectory + '/*.net')}
if def_type in get_files:
file_names = get_files[def_type]()
else:
raise NoDefinitionsError('Definitions type %s is unknown.' % def_type)
if not file_names:
raise NoDefinitionsError('No definition files for %s in %s found.' %
(def_type, defdirectory))
for current_file in file_names:
try:
with open(current_file, 'r') as file_handle:
self._ParseFile(file_handle, def_type)
except IOError as error_info:
raise NoDefinitionsError('%s' % error_info)
def _ParseFile(self, file_handle, def_type):
for line in file_handle:
self._ParseLine(line, def_type)
def ParseServiceList(self, data):
"""Take an array of service data and import into class.
This method allows us to pass an array of data that contains service
definitions that are appended to any definitions read from files.
Args:
data: array of text lines containing service definitions.
"""
for line in data:
self._ParseLine(line, 'services')
def ParseNetworkList(self, data):
"""Take an array of network data and import into class.
This method allows us to pass an array of data that contains network
definitions that are appended to any definitions read from files.
Args:
data: array of text lines containing net definitions.
"""
for line in data:
self._ParseLine(line, 'networks')
def _ParseLine(self, line, definition_type):
"""Parse a single line of a service definition file.
This routine is used to parse a single line of a service
definition file, building a list of 'self.services' objects
as each line of the file is iterated through.
Args:
line: A single line from a service definition files.
definition_type: Either 'networks' or 'services'
Raises:
UnexpectedDefinitionTypeError: called with unexpected type of definitions.
NamespaceCollisionError: when overlapping tokens are found.
ParseError: If errors occur
NamingSyntaxError: Syntax error parsing config.
"""
if definition_type not in ['services', 'networks']:
raise UnexpectedDefinitionTypeError('%s %s' % (
'Received an unexpected definition type:', definition_type))
line = line.strip()
if not line or line.startswith('#'): # Skip comments and blanks.
return
comment = ''
if line.find('#') > -1: # if there is a comment, save it
(line, comment) = line.split('#', 1)
line_parts = line.split('=') # Split on var = val lines.
# the value field still has the comment at this point
# If there was '=', then do var and value
if len(line_parts) > 1:
current_symbol = line_parts[0].strip() # varname left of '='
if not self.token_re.match(current_symbol):
logging.info('\nService name does not match recommended criteria: %s\nOnly A-Z, a-z, 0-9, -, and _ allowed' % current_symbol)
self.current_symbol = current_symbol
if definition_type == 'services':
for port in line_parts[1].strip().split():
if not self.port_re.match(port):
raise NamingSyntaxError('%s: %s' % (
'The following line has a syntax error', line))
if self.current_symbol in self.services:
raise NamespaceCollisionError('%s %s' % (
'\nMultiple definitions found for service: ',
self.current_symbol))
elif definition_type == 'networks':
if self.current_symbol in self.networks:
raise NamespaceCollisionError('%s %s' % (
'\nMultiple definitions found for service: ',
self.current_symbol))
self.unit = _ItemUnit(self.current_symbol)
if definition_type == 'services':
self.services[self.current_symbol] = self.unit
# unseen_services is a list of service TOKENS found in the values
# of newly defined services, but not previously defined themselves.
# When we define a new service, we should remove it (if it exists)
# from the list of unseen_services.
if self.current_symbol in self.unseen_services:
self.unseen_services.pop(self.current_symbol)
elif definition_type == 'networks':
self.networks[self.current_symbol] = self.unit
if self.current_symbol in self.unseen_networks:
self.unseen_networks.pop(self.current_symbol)
else:
raise ParseError('Unknown definitions type.')
values = line_parts[1]
# No '=', so this is a value only line
else:
values = line_parts[0] # values for previous var are continued this line
for value_piece in values.split():
if not value_piece:
continue
if not self.current_symbol:
break
if comment:
self.unit.items.append(value_piece + ' # ' + comment)
else:
self.unit.items.append(value_piece)
# token?
if value_piece[0].isalpha() and ':' not in value_piece:
if definition_type == 'services':
# already in top definitions list?
if value_piece not in self.services:
# already have it as an unused value?
if value_piece not in self.unseen_services:
self.unseen_services[value_piece] = True
if definition_type == 'networks':
if value_piece not in self.networks:
if value_piece not in self.unseen_networks:
self.unseen_networks[value_piece] = True
| |
#!/usr/bin/env python2.7
"""Adds a GTK status-bar icon allowing one-click control of the screensaver."""
LICENSE = """\
Copyright (c) 2012 Ian Good <ian.good@rackspace.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
""" # NOQA
import os
import sys
import subprocess
import argparse
import pkg_resources
import dbus
import gobject
import gtk
__version__ = pkg_resources.require('screensaver-icon')[0].version
class State(object):
def __init__(self, args):
self.icon = Icon(self, args)
self.screensaver = XScreensaver(self, args)
self.pidgin = Pidgin(self, args)
def main(self):
gobject.timeout_add(2500, self.screensaver.refresh_on_status)
try:
gtk.main()
finally:
self.screensaver.kill_watch_process()
def on_status_changed(self, new):
self.icon.set_status(new)
def refresh_on_status(self, *args):
self.screensaver.refresh_on_status()
def got_blank_trigger(self):
if self.icon.get_away_on_lock():
self.pidgin.set_away()
def got_unblank_trigger(self):
if self.icon.get_away_on_lock():
self.pidgin.remove_away()
def got_lock_trigger(self):
if self.icon.get_away_on_lock():
self.pidgin.set_away()
def icon_clicked(self):
self.screensaver.toggle_on()
class Pidgin(object):
def __init__(self, state, args):
self.state = state
self.prev = None
def _set_status(self, to_away):
purple = self._get_purple()
if not purple:
return
away = purple.PurpleSavedstatusGetIdleaway()
current = purple.PurpleSavedstatusGetCurrent()
if to_away:
self.prev = current
purple.PurpleSavedstatusActivate(away)
else:
purple.PurpleSavedstatusActivate(self.prev)
self.prev = None
def set_away(self):
self._set_status(True)
def remove_away(self):
if self.prev:
gobject.timeout_add(1000, self._set_status, False)
def _get_purple(self):
try:
bus = dbus.SessionBus()
dbus_obj = bus.get_object('im.pidgin.purple.PurpleService',
'/im/pidgin/purple/PurpleObject')
p = dbus.Interface(dbus_obj, 'im.pidgin.purple.PurpleInterface')
return p
except dbus.exceptions.DBusException:
return None
class XScreensaver(object):
def __init__(self, state, args):
self.state = state
self._toggling = False
self._on_status_process = None
self._watch_process = None
self._start_watch()
def kill_watch_process(self):
try:
self._watch_process.terminate()
except OSError:
pass
def toggle_on(self):
self._toggling = True
self.refresh_on_status()
def refresh_on_status(self):
if self._on_status_process:
return
p = subprocess.Popen(['xscreensaver-command', '-version'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.stdin.close()
gobject.io_add_watch(p.stdout, gobject.IO_HUP,
self._on_status_finished)
self._on_status_process = p
def _start_watch(self, *extras):
if self._watch_process and not self._watch_process.poll():
self._watch_process.wait()
p = subprocess.Popen(['xscreensaver-command', '-watch'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
p.stdin.close()
self._watch_process = p
gobject.io_add_watch(p.stdout, gobject.IO_IN, self._get_watch_data)
gobject.io_add_watch(p.stdout, gobject.IO_HUP, self._start_watch)
return False
def _on_status_finished(self, f, cond):
p = self._on_status_process
if p:
self._on_status_process = None
p.wait()
self.state.on_status_changed(p.returncode == 0)
if self._toggling:
self._toggling = False
if p.returncode == 0:
self._turn_off()
else:
self._turn_on()
return False
def _turn_on(self):
devnull = open('/dev/null', 'w')
p = subprocess.Popen(['xscreensaver', '-nosplash'],
stdin=subprocess.PIPE,
stdout=devnull, stderr=devnull)
p.stdin.close()
gobject.timeout_add(1000, self.refresh_on_status)
def _turn_off(self):
p = subprocess.Popen(['xscreensaver-command', '-exit'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
gobject.timeout_add(500, self.refresh_on_status)
def _get_watch_data(self, f, cond):
line = f.readline()
if line.startswith("BLANK"):
self.state.got_blank_trigger()
elif line.startswith("UNBLANK"):
self.state.got_unblank_trigger()
elif line.startswith("LOCK"):
self.state.got_lock_trigger()
return True
class Icon(object):
def __init__(self, state, args):
self.state = state
self.icon = None
self.status = None
self._away_on_lock = True
self._load_icons(args)
def _load_icons(self, args):
def load_default(name):
from pkg_resources import resource_filename
resource_name = 'icons/{0}.png'.format(name)
fn = resource_filename(__name__, resource_name)
return gtk.gdk.pixbuf_new_from_file(fn)
if args.onicon:
self._on_icon = gtk.gdk.pixbuf_new_from_file(args.onicon)
else:
self._on_icon = load_default('on')
if args.officon:
self._off_icon = gtk.gdk.pixbuf_new_from_file(args.officon)
else:
self._off_icon = load_default('off')
def _set_icon_pixbuf(self, icon):
pixbuf = self._on_icon if self.status else self._off_icon
icon.set_from_pixbuf(pixbuf)
def _create_icon(self):
self.icon = gtk.StatusIcon()
self._set_icon_pixbuf(self.icon)
self.icon.connect('popup-menu', self._right_click)
self.icon.connect('activate', self._left_click)
self.icon.set_tooltip("Screensaver Icon")
def _change_away_on_lock(self, item):
self._away_on_lock = item.get_active()
def get_away_on_lock(self):
return self._away_on_lock
def set_status(self, status):
self.status = status
if not self.icon:
self._create_icon()
else:
self._set_icon_pixbuf(self.icon)
def _right_click(self, icon, button, timestamp):
menu = gtk.Menu()
if self.status:
status = gtk.MenuItem("Running")
else:
status = gtk.MenuItem("Stopped")
status.set_sensitive(False)
aol = gtk.CheckMenuItem("Away On Lock")
aol.set_active(self._away_on_lock)
refresh = gtk.ImageMenuItem("Refresh")
about = gtk.ImageMenuItem("About")
quit = gtk.ImageMenuItem("Quit")
img = gtk.image_new_from_stock(gtk.STOCK_REFRESH, gtk.ICON_SIZE_MENU)
img.show()
refresh.set_image(img)
img = gtk.image_new_from_stock(gtk.STOCK_ABOUT, gtk.ICON_SIZE_MENU)
img.show()
about.set_image(img)
img = gtk.image_new_from_stock(gtk.STOCK_QUIT, gtk.ICON_SIZE_MENU)
img.show()
quit.set_image(img)
aol.connect("toggled", self._change_away_on_lock)
refresh.connect("activate", self.state.refresh_on_status)
about.connect("activate", self._show_about_dialog)
quit.connect("activate", gtk.main_quit)
menu.append(status)
menu.append(gtk.SeparatorMenuItem())
menu.append(aol)
menu.append(refresh)
menu.append(about)
menu.append(gtk.SeparatorMenuItem())
menu.append(quit)
menu.show_all()
menu.popup(None, None, gtk.status_icon_position_menu,
button, timestamp, icon)
def _left_click(self, icon):
self.state.icon_clicked()
def _show_about_dialog(self, widget):
about = gtk.AboutDialog()
about.set_destroy_with_parent(True)
about.set_name("Screensaver Icon")
about.set_version(__version__)
about.set_authors(["Ian Good <ian.good@rackspace.com>"])
about.set_license(LICENSE)
about.set_comments("""Provides a status-bar icon letting single-click \
enabling a disabling of the xscreensaver daemon. Also, pidgin sessions will \
be marked "Away" while the screensaver is engaged.""")
about.run()
about.destroy()
def _parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--version', action='version',
version='%(prog)s '+__version__)
parser.add_argument('-f', '--foreground', action='store_true',
help='Run in the foreground, do not daemonize.')
parser.add_argument('--on-icon', dest='onicon', metavar='FILE',
help='Use FILE icon indicating screensaver is on and '
'activated.')
parser.add_argument('--off-icon', dest='officon', metavar='FILE',
help='Use FILE icon indicating screensaver is off and '
'disabled.')
return parser.parse_args()
# Daemonize the current process.
def _daemonize():
# Fork once.
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError:
return
# Set some options to detach from the terminal.
os.chdir('/')
os.setsid()
os.umask(0)
# Fork again.
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError:
return
# Find the OS /dev/null equivalent.
nullfile = getattr(os, 'devnull', '/dev/null')
# Redirect all standard I/O to /dev/null.
sys.stdout.flush()
sys.stderr.flush()
si = file(nullfile, 'r')
so = file(nullfile, 'a+')
se = file(nullfile, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def main():
args = _parse_args()
state = State(args)
if not args.foreground:
_daemonize()
state.main()
if __name__ == '__main__':
main()
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| |
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
# import htmlentitydefs
import re
import sys
import email
import io
from elementtree import ElementTree
class recollector:
def __init__(self):
self.res = {}
self.regs = {}
def add(self, name, reg, mods=None):
self.regs[name] = reg % self.regs
# print "%s = %s" % (name, self.regs[name])
if mods:
self.res[name] = re.compile(self.regs[
name], mods) # check that it is valid
else:
self.res[name] = re.compile(self.regs[
name]) # check that it is valid
collector = recollector()
a = collector.add
a("TextSE", "[^<]+")
a("UntilHyphen", "[^-]*-")
a("Until2Hyphens", "%(UntilHyphen)s(?:[^-]%(UntilHyphen)s)*-")
a("CommentCE", "%(Until2Hyphens)s>?")
a("UntilRSBs", "[^\\]]*](?:[^\\]]+])*]+")
a("CDATA_CE", "%(UntilRSBs)s(?:[^\\]>]%(UntilRSBs)s)*>")
a("S", "[ \\n\\t\\r]+")
a("NameStrt", "[A-Za-z_:]|[^\\x00-\\x7F]")
a("NameChar", "[A-Za-z0-9_:.-]|[^\\x00-\\x7F]")
a("Name", "(?:%(NameStrt)s)(?:%(NameChar)s)*")
a("QuoteSE", "\"[^\"]*\"|'[^']*'")
a("DT_IdentSE", "%(S)s%(Name)s(?:%(S)s(?:%(Name)s|%(QuoteSE)s))*")
# http://bugs.activestate.com/show_bug.cgi?id=28765
# a("MarkupDeclCE" , "(?:[^\\]\"'><]+|%(QuoteSE)s)*>" )
a("MarkupDeclCE", "(?:[^\\]\"'> \\n\\t\\r<]+|%(QuoteSE)s)*>")
a("S1", "[\\n\\r\\t ]")
a("UntilQMs", "[^?]*\\?+")
a("PI_Tail", "\\?>|%(S1)s%(UntilQMs)s(?:[^>?]%(UntilQMs)s)*>")
a("DT_ItemSE",
"<(?:!(?:--%(Until2Hyphens)s>|[^-]%(MarkupDeclCE)s)|\\?%(Name)s(?:%(PI_Tail)s))|%%%(Name)s;|%(S)s"
)
a("DocTypeCE",
"%(DT_IdentSE)s(?:%(S)s)?(?:\\[(?:%(DT_ItemSE)s)*](?:%(S)s)?)?>?")
a("DeclCE",
"--(?:%(CommentCE)s)?|\\[CDATA\\[(?:%(CDATA_CE)s)?|DOCTYPE(?:%(DocTypeCE)s)?")
a("PI_CE", "%(Name)s(?:%(PI_Tail)s)?")
a("EndTagCE", "(?P<endtag>%(Name)s)(?:%(S)s)?>?")
a("AttValSE", "\"[^<\"]*\"|'[^<']*'")
a("ElemTagCE",
"(?P<tag>%(Name)s)(?P<attrs>(?:%(S)s%(Name)s(?:%(S)s)?=(?:%(S)s)?(?:%(AttValSE)s))*)(?:%(S)s)?/?>?")
a("MarkupSPE",
"<(?:!(?:%(DeclCE)s)?|\\?(?:%(PI_CE)s)?|/(?:%(EndTagCE)s)?|(?:%(ElemTagCE)s)?)")
a("XML_SPE", "%(TextSE)s|%(MarkupSPE)s")
a("XML_MARKUP_ONLY_SPE", "%(MarkupSPE)s")
a("DOCTYPE",
r'<!DOCTYPE\s+(?P<type>\S+)\s+(?P<ident>PUBLIC|SYSTEM)\s+(?P<data1>%(QuoteSE)s)\s*(?P<data2>%(QuoteSE)s)?\s*(?:\[|>)', re.S)
a("attrfinderRE",
"(?:[\n \t]*)(%(Name)s)(?:%(S)s)?=(?:%(S)s)?(%(AttValSE)s)", re.S | re.U)
attrfinder = collector.res["attrfinderRE"]
is_not_ascii = re.compile(eval(r'u"[\u0080-\uffff]"')).search
def parseiter(data, markuponly=0):
if markuponly:
reg = "XML_MARKUP_ONLY_SPE"
else:
reg = "XML_SPE"
regex = collector.res[reg]
return regex.finditer(data)
def strip_quotes(str):
if not str:
return None
if str[0] in ["'", '"']:
return str[1:-1]
return str
# XXX this should realy be done via DTD/Schema, but that would be a major
# pain. For general purposes, this will work fine and be faster
# these tags are defined to NOT ALLOW end tags at all in html. They never
# have children and never have end tags
# defined in dtd as ELEMENT NAME - O EMPTY
html_no_close_tags = set([
"basefont", "br", "area", "link", "img", "param", "hr", "input",
"col", "frame", "isindex", "base", "meta"
])
# defined in dtd as ELEMENT NAME - O *
html_optional_close_tags = set([
"p", "dt", "dd", "li", "option", "thead", "tfoot", "colgroup",
"col", "tr", "th", "td"
])
html_block_tags = set([
"p", "h1", "h2", "h3", "h4", "h5", "h6", "ul", "ol", "pre", "dl", "div", "noscript",
"blockquote", "form", "hr", "table", "fieldset", "address"
])
# these are optional end tag and cannot contain other block tags defined above
html_cannot_contain_block_tags = set([
"p", "dt"
])
html_close_tag_unnecessary = html_no_close_tags.union(html_optional_close_tags)
class HTMLTreeBuilder(ElementTree.TreeBuilder):
def __init__(self, encoding="iso-8859-1"):
ElementTree.TreeBuilder.__init__(self)
self.encoding = encoding
self.nodes = []
self.nodemap = {} # {child_elem: parent_elem, ... }
self._rootnodes = []
self.current = None
def start(self, tag, attrs, loc_start, loc_end):
if not tag:
return
# print loc
if tag == "meta":
# look for encoding directives
http_equiv = content = None
for k, v in attrs:
if k == "http-equiv":
http_equiv = v.lower()
elif k == "content":
content = v
if http_equiv == "content-type" and content:
# use email to parse the http header
header = email.Message(
io.StringIO("%s: %s\n\n" % (http_equiv, content))
)
encoding = header.getparam("charset")
if encoding:
self.encoding = encoding
l_tag = tag.lower()
if self._elem:
p_tag = self._elem[-1].tag.lower()
# if the parent and child are the same tag, then close the
# parent if it uses optional close tags
if l_tag in html_optional_close_tags and p_tag == l_tag:
self.end(tag)
# special case table tags that should be autoclosed only when
# hitting a new table row
elif p_tag in ("td", "th") and l_tag == "tr":
self.end_tag(p_tag)
# if the parent and child are block tags, close the parent
elif p_tag in html_cannot_contain_block_tags and l_tag in html_block_tags:
self.end_tag(p_tag)
attrib = {}
for attr in attrs:
attrib[attr[0]] = strip_quotes(attr[1])
ElementTree.TreeBuilder.start(self, tag, attrib)
el = self._elem[-1]
self.current = el
el.ns = None
el.localName = el.tag
el.start = loc_start
el.end = None
self.nodes.append(el)
if len(self._elem) > 1:
self.nodemap[el] = self._elem[-2]
else:
self.nodemap[el] = None
if l_tag in html_no_close_tags:
self.end_tag(tag, loc_end)
def end(self, tag, loc=None):
if not self._elem:
return None
l_tag = tag
l_lasttag = lasttag = self._elem[-1].tag
if l_tag:
l_tag = l_tag.lower()
if l_lasttag:
l_lasttag = lasttag.lower()
while (l_tag != l_lasttag
and l_lasttag in html_optional_close_tags
and len(self._elem) > 1
and self._last.start[2] < self._elem[-1].start[2]):
self.end_tag(lasttag)
if self._elem:
lasttag = self._elem[-1].tag
l_lasttag = lasttag.lower()
else:
self.current = self._last
return self._last
# protect against a previous close of this tag
if l_tag in html_close_tag_unnecessary and l_tag != self._elem[-1].tag.lower():
return None
return self.end_tag(tag, loc)
def end_tag(self, tag, loc=None):
if not tag:
return None
self._flush()
# find this tag:
tags = [e.localName for e in self._elem]
if tag not in tags:
# invalid end tag?
return None
last = self._elem.pop()
while last.tag != tag:
last = self._elem.pop()
self._last = last
if not self._elem:
self._rootnodes.append(self._last)
if loc:
self._last.end = loc
self._tail = 1
self.current = self._last
return self._last
def data(self, data):
if isinstance(data, type('')) and is_not_ascii(data):
# convert to unicode, but only if necessary
data = str(data, self.encoding, "ignore")
ElementTree.TreeBuilder.data(self, data)
def close(self):
if self._elem:
return self._elem[0]
return self._last
class Parser:
def __init__(self, builder=None):
if not builder:
builder = ElementTree.TreeBuilder()
self._builder = builder
self.doctype = None
self.publicId = None
self.systemId = None
self.locator = {}
self._lastloc = None
self.data = None
def parse_doctype(self, data):
m = collector.res["DOCTYPE"].match(data)
if m is None:
return
result = m.groupdict()
self.doctype = result
self.publicId = None
if result['ident'] == "PUBLIC":
self.publicId = strip_quotes(result['data1'])
self.systemId = strip_quotes(result['data2'])
else:
self.systemId = strip_quotes(result['data1'])
def getLocation(self, loc):
pos = 0
last_lines = 0
if self._lastloc:
pos = self._lastloc
last_lines = self.locator[pos][0]
lines = last_lines + self.data.count("\n", pos, loc)
col = 0
if lines > last_lines:
col = loc - self.data.rfind("\n", pos, loc) - 1
elif pos in self.locator:
col = loc - pos + self.locator[pos][1]
self.locator[loc] = [lines, col]
self._lastloc = loc
return (lines + 1, col, loc)
def feed(self, data, markuponly=0):
no_close_tag = []
opt_close_tag = []
self.data = data
for matchObj in parseiter(data, markuponly):
x = matchObj.group(0)
m = matchObj.groupdict()
if x.startswith("<!"):
continue
# XXX
if x.startswith("<!DOCTYPE"):
self.parse_doctype(x)
elif x.startswith("<?"):
# processing tag
continue
elif x.startswith("</"):
self._builder.end(m[
"endtag"], self.getLocation(matchObj.end(0)))
elif x.startswith("<"):
# get the tag and attrs
attrs = []
if "attrs" in m and m["attrs"] is not None:
attrs = attrfinder.findall(m["attrs"])
start = self.getLocation(matchObj.start(0))
end = self.getLocation(matchObj.end(0))
self._builder.start(m["tag"], attrs, start, end)
if x.endswith("/>"):
self._builder.end(m["tag"], end)
else:
self._builder.data(x)
def close(self):
return self._builder.close()
try:
import sgmlop
ReParser = Parser
class SgmlopParser(ReParser):
def __init__(self, builder=None):
ReParser.__init__(self, builder)
self.__parser = sgmlop.XMLParser()
self.__parser.register(self)
def finish_starttag(self, tag, attrib, loc_start, loc_end):
# builder expects a list of tuples
attrs = list(attrib.items())
self._builder.start(tag, attrs, self.getLocation(
loc_start), self.getLocation(loc_end))
def finish_endtag(self, tag, loc):
self._builder.end(tag, self.getLocation(loc))
def handle_data(self, data):
self._builder.data(data)
def handle_special(self, data, token_type=None):
# here's where we figure out if we've got a doctype
if (token_type == 0x105 or # from sgmlop.c
data and data.startswith("DOCTYPE")):
# we get everything inside <!...>
self.parse_doctype("<!%s>" % data)
def feed(self, data, markuponly=0):
self.data = data
return self.__parser.feed(data)
def close(self):
if self.__parser:
self.__parser.close()
self.__parser = None
return ReParser.close(self)
Parser = SgmlopParser
except:
pass
def HTML(data, ParserClass=Parser):
p = ParserClass(HTMLTreeBuilder())
p.feed(data)
return p.close()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
import time
# read the file and parse it to get a time.
f = open(sys.argv[1])
data = f.read()
f.close()
t1 = time.time()
tree = HTML(data, ReParser)
t2 = time.time()
print("RE parsing took %s" % (t2-t1))
t1 = time.time()
tree = HTML(data, SgmlopParser)
t2 = time.time()
print("sgmlop parsing took %s" % (t2-t1))
sys.exit(0)
data = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head> <title>my title</title> </head>
<body>
<p>blah blah...
<img src="somefile.jpg" alt="blah">
</img>
</p>
</body>
</html>"""
tree = HTML(data)
print(ElementTree.tostring(tree))
sys.exit(0)
data = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
"""
tree = HTML(data)
print(ElementTree.tostring(tree))
sys.exit(0)
data = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<HTML lang="en">
<BODY>
<p>
<img>
<p>
<br>
</p>
<hr>
<p>"""
# <br>
# <dl>
# <li>
# <li>
# <li>
# </dl>
# <p>
# <hr>
#</p>
#</BODY>
#</HTML>
#"""
data = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<!-- Copyright (c) 2000-2006 ActiveState Software Inc. -->
<!-- See the file LICENSE.txt for licensing information. -->
<html>
<head>
<link rel="stylesheet" type="text/css" href="aspn.css">
<script language="JavaScript" src="displayToc.js"></script>
<script language="JavaScript" src="tocParas.js"></script>
<script language="JavaScript" src="tocTab.js"></script>
<link rel="icon" href="favicon.ico" type="image/x-icon"/>
<link rel="shortcut icon" href="favicon.ico" type="image/x-icon"/>
<title>XML Catalogs</title>
</head>
<body>
<table>
<tr>
<td>
<h1><a name="xml_catalogs_top">XML Catalogs</a></h1>
<p>Komodo can add <a href=komodo-doc-editor.html#XML_AutoComplete">XML
autocompletion</a> support for any XML dialect with a DTD or RelaxNG Schema.
This is done by mapping external identifier entries to local copies of the DTD
or RelaxNG Schema for that document type using <a target="_blank"
href="http://www.oasis-open.org/committees/entity/spec.html">XML
Catalogs</a>.</p>
<p><script>writelinks('xml_catalogs_top');</script> </p>
<h2><a name="using_xml_catalogs">Using an Existing XML Catalog</a></h2>
<p>Some toolkits bundle DTDs or RelaxNG Schemas with their own XML
catalogs. As long as the relative path from the catalog to the .dtd or
.rng file is preserved on the local filesystem, you can add support for
the dialect by specifying the catalog file in Preferences under <a
href="komodo-doc-prefs.html#xml_catalogs">SGML/XML Catalogs</a>.</p>
<p><script>writelinks('using_xml_catalogs');</script> </p>
<h2><a name="creating_xml_catalogs">Creating an XML Catalog</a></h2>
<p>If the DTD or RelaxNG Schema for the dialect does not have a catalog
file, you can create one by mapping the external identifiers and URI
references in the document's namespace declaration to a local filesystem
URI. For example, the <a target="_blank"
href="http://www.xspf.org/specs/">
<acronym title="XML Shareable Playlist Format">XSPF</acronym></a>
playlist format uses the following namespace declaration:</p>
<pre>
<playlist version="1" xmlns="http://xspf.org/ns/0/">
</pre>
<p>A simple catalog for this XML dialect would look like this:</p>
<pre>
<?xml version='1.0'?>
<catalog xmlns="urn:oasis:names:tc:entity:xmlns:xml:catalog"
prefer="public">
<uri name="http://xspf.org/ns/0/" uri="xspf-draft8.rng"/>
</catalog>
</pre>
<p>If your documents use the DOCTYPE declaration, you can add support
for that in the catalog by using the public and system identifier. For
example, <a target="_blank" href="http://www.mozilla.org/projects/xul/">
<acronym title="XML User Interface Language">XUL</acronym></a> uses
DOCTYPE declarations like this one:</p>
<pre>
<!DOCTYPE overlay PUBLIC "-//MOZILLA//DTD XUL V1.0//EN"
"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
</pre>
<p>Komodo's catalog for XUL uses <code>publicId</code> and
<code>systemId</code> in addition to <code>uri</code> for the
mapping.</p>
<pre>
<?xml version='1.0'?>
<catalog xmlns="urn:oasis:names:tc:entity:xmlns:xml:catalog" prefer="public">
<public publicId="-//MOZILLA//DTD XUL V1.0//EN"
uri="xul.dtd"/>
<system systemId="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
uri="xul.dtd"/>
<uri name="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
uri="xul.dtd"/>
</catalog>
</pre>
<p><script>writelinks('creating_xml_catalogs');</script> </p>
<h2><a name="xml_catalog_resources">XML Catalog Resources</a></h2>
<p>The XML Catalog specification can be found at:</p>
<ul>
<li><a target="_blank"
href="http://www.oasis-open.org/committees/entity/spec.html">
http://www.oasis-open.org/committees/entity/spec.html</a></li>
</ul>
<p>Examples of XML catalog files can be found in the Komodo installation
under:</p>
<ul>
<li><em><komodo-install-directory>\lib\support\catalogs</em>
(Windows)</li>
<li><em>/Applications/Komodo.app/Contents/SharedSupport/catalogs/ (OS
X)</em></li>
<li><em><komodo-install-directory>/lib/support/catalogs</em>
(Linux)</li>
</ul>
<p><script>writelinks('xml_catalog_resources');</script> </p>
<!-- Footer Start -->
<hr>
</td>
</tr>
</table>
</body>
</html>
"""
tree = HTML(data)
# print ElementTree.tostring(tree)
data = """<html>
<HEAD>
<?php print $javascript->link('calendar') ?>
<?php $othAuth->init($othAuth->data);?>
<!--[if lte IE 6]-->
<?php echo $html->css{'hack'};?>
<!--[endif]-->
<script type="text/javascript">
function fadeTableRow(rowid, opts) {
if (!spts) {
opts = {};
}
}
</script>
</head>
<body>"""
tree = HTML(data)
# print ElementTree.tostring(tree)
data = """<%= error_messages_for 'product' %>
<!--[form:product]-->
<p><label for="product_title">Title</label><br/>
<%= text_field 'product', 'title' %></p>
<p><label for="product_description">Description</label><br/>
<%= text_area 'product', 'description' %></p>
<p><label for="product_image_url">Image url</label><br/>
<%= text_field 'product', 'image_url' %></p>
<p><label for="product_price">Price</label><br/>
<%= text_field 'product', 'price' %></p>
<p><label for="product_date_available">Date available</label><br/>
<%= datetime_select 'product', 'date_available' %></p>
<!--[eoform:product]-->
"""
tree = HTML(data)
print(ElementTree.tostring(tree))
p = Parser(HTMLTreeBuilder())
p.feed(data)
p.close()
| |
# Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
N_HIDDEN_CONFIG = 32
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 64])
b_conv1 = bias_varibale([64])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = h_conv1
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 64, 64])
b_conv2 = weight_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = h_conv2
# Third Convolutional Layer
W_conv3 = weight_variable([3, 3, 64, 1])
b_conv3 = weight_variable([1])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = h_conv3
h_pool3 = tf.reshape(h_pool3, shape=[-1, 32, 36])
feature_mat = h_pool3
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close()
| |
# google_custom_search/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
# See also WeVoteServer/import_export_twitter/models.py for the code that interfaces with twitter (or other) servers
import wevote_functions.admin
from django.db import models
from config.base import get_environment_variable
from wevote_functions.functions import positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
GOOGLE_SEARCH_ENGINE_ID = get_environment_variable("GOOGLE_SEARCH_ENGINE_ID")
GOOGLE_SEARCH_API_KEY = get_environment_variable("GOOGLE_SEARCH_API_KEY")
GOOGLE_SEARCH_API_NAME = get_environment_variable("GOOGLE_SEARCH_API_NAME")
GOOGLE_SEARCH_API_VERSION = get_environment_variable("GOOGLE_SEARCH_API_VERSION")
BALLOTPEDIA_LOGO_URL = "ballotpedia-logo-square"
MAXIMUM_GOOGLE_SEARCH_USERS = 10
MAXIMUM_CHARACTERS_LENGTH = 1024
class GoogleSearchUser(models.Model):
"""
These are Google search results that might match a candidate or organization
"""
candidate_campaign_we_vote_id = models.CharField(verbose_name="candidate we vote id", max_length=255, unique=False)
search_term_used = models.CharField(verbose_name="", max_length=255, unique=False)
item_title = models.CharField(verbose_name="searched item title", max_length=255, null=True, blank=True)
item_link = models.URLField(verbose_name="url where searched item pointing", null=True, blank=True)
item_snippet = models.CharField(verbose_name="searched item snippet", max_length=1024, null=True, blank=True)
item_image = models.URLField(verbose_name='image url for searched item', blank=True, null=True)
item_formatted_url = models.URLField(verbose_name="item formatted url", null=True, blank=True)
item_meta_tags_description = models.CharField(verbose_name="searched item meta tags description", max_length=1000,
null=True, blank=True)
search_request_url = models.URLField(verbose_name="search request url", max_length=255, null=True, blank=True)
from_ballotpedia = models.BooleanField(default=False, verbose_name="searched link from ballotpedia")
from_facebook = models.BooleanField(default=False, verbose_name="searched link from facebook")
from_linkedin = models.BooleanField(default=False, verbose_name="searched link from linkedin")
from_twitter = models.BooleanField(default=False, verbose_name="searched link from twitter")
from_wikipedia = models.BooleanField (default=False, verbose_name="searched link from wikipedia")
not_a_match = models.BooleanField(default=False, verbose_name="this candidate does not match")
likelihood_score = models.IntegerField(verbose_name="score for a match", null=True, unique=False)
chosen_and_updated = models.BooleanField(default=False,
verbose_name="when search detail updated in candidate table")
facebook_search_found = models.BooleanField(default=False, verbose_name="user found from facebook search")
facebook_name = models.CharField(verbose_name="name from facebook search", max_length=255, null=True, blank=True)
facebook_emails = models.CharField(verbose_name="emails from facebook", max_length=255, null=True, blank=True)
facebook_about = models.CharField(verbose_name="about from facebook", max_length=255, null=True, blank=True)
facebook_location = models.CharField(verbose_name="location from facebook", max_length=255, null=True, blank=True)
facebook_photos = models.CharField(verbose_name="photos from facebook", max_length=1024, null=True, blank=True)
facebook_bio = models.CharField(verbose_name="bio from facebook", max_length=1024, null=True, blank=True)
facebook_general_info = models.CharField(verbose_name="general information from facebook", max_length=1024,
null=True, blank=True)
facebook_description = models.CharField(verbose_name="description from facebook", max_length=1024,
null=True, blank=True)
facebook_features = models.CharField(verbose_name="features from facebook", max_length=255, null=True, blank=True)
facebook_contact_address = models.CharField(verbose_name="contact address from facebook", max_length=255,
null=True, blank=True)
facebook_mission = models.CharField(verbose_name="mission from facebook", max_length=1024, null=True, blank=True)
facebook_category = models.CharField(verbose_name="category from facebook", max_length=255, null=True, blank=True)
facebook_website = models.URLField(verbose_name="website from facebook", null=True, blank=True)
facebook_personal_info = models.CharField(verbose_name="personal information from facebook", max_length=1024,
null=True, blank=True)
facebook_personal_interests = models.CharField(verbose_name="personal interests from facebook", max_length=255,
null=True, blank=True)
facebook_posts = models.CharField(verbose_name="posts from facebook", max_length=1024, null=True, blank=True)
class GoogleSearchUserManager(models.Manager):
def __unicode__(self):
return "TwitterUserManager"
def update_or_create_google_search_user_possibility(self, candidate_campaign_we_vote_id, google_json, search_term,
likelihood_score, facebook_json=None, from_ballotpedia=False,
from_facebook=False, from_linkedin=False, from_twitter=False,
from_wikipedia=False):
google_search_user_on_stage = None
google_search_user_created = False
try:
google_search_user_on_stage, google_search_user_created = GoogleSearchUser.objects.update_or_create(
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
item_link=google_json['item_link'],
defaults={
'likelihood_score': likelihood_score,
'search_term_used': search_term,
'from_ballotpedia': from_ballotpedia,
'from_facebook': from_facebook,
'from_linkedin': from_linkedin,
'from_twitter': from_twitter,
'from_wikipedia': from_wikipedia,
'item_title': google_json['item_title'],
'item_snippet': google_json['item_snippet'],
'item_image': google_json['item_image'],
'item_formatted_url': google_json['item_formatted_url'],
'item_meta_tags_description': google_json['item_meta_tags_description'],
'search_request_url': google_json['search_request_url']
}
)
if positive_value_exists(facebook_json):
if facebook_json['facebook_search_found']:
google_search_user_on_stage, google_search_user_updated = GoogleSearchUser.objects.update_or_create(
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
item_link=google_json['item_link'],
defaults={
'facebook_search_found': facebook_json['facebook_search_found'],
'facebook_name': facebook_json['name'],
'facebook_emails': facebook_json['emails'],
'facebook_about': facebook_json['about'],
'facebook_location': facebook_json['location'],
'facebook_photos': facebook_json['photos'][:MAXIMUM_CHARACTERS_LENGTH],
'facebook_bio': facebook_json['bio'][:MAXIMUM_CHARACTERS_LENGTH],
'facebook_general_info': facebook_json['general_info'][:MAXIMUM_CHARACTERS_LENGTH],
'facebook_description': facebook_json['description'][:MAXIMUM_CHARACTERS_LENGTH],
'facebook_features': facebook_json['features'],
'facebook_contact_address': facebook_json['contact_address'],
'facebook_mission': facebook_json['mission'][:MAXIMUM_CHARACTERS_LENGTH],
'facebook_category': facebook_json['category'],
'facebook_website': facebook_json['website'],
'facebook_personal_info': facebook_json['personal_info'][:MAXIMUM_CHARACTERS_LENGTH],
'facebook_personal_interests': facebook_json['personal_interests'],
'facebook_posts': facebook_json['posts'][:MAXIMUM_CHARACTERS_LENGTH]
}
)
if google_search_user_created:
status = "GOOGLE_SEARCH_USER_POSSIBILITY_CREATED"
else:
status = "GOOGLE_SEARCH_USER_POSSIBILITY_UPDATED"
success = True
except Exception as e:
status = "GOOGLE_SEARCH_USER_POSSIBILITY_NOT_CREATED"
success = False
results = {
'success': success,
'status': status,
'google_search_user': google_search_user_on_stage,
'google_search_user_created': google_search_user_created
}
return results
def retrieve_google_search_user_from_item_link(self, candidate_campaign_we_vote_id, item_link):
google_search_user = GoogleSearchUser()
try:
if positive_value_exists(candidate_campaign_we_vote_id):
google_search_user = GoogleSearchUser.objects.get(
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
item_link=item_link)
success = True
google_search_user_found = True
status = "RETRIEVE_GOOGLE_SEARCH_USER_BY_WE_VOTE_ID"
except GoogleSearchUser.DoesNotExist:
google_search_user_found = False
success = True
status = "RETRIEVE_GOOGLE_SEARCH_USER_NOT_FOUND"
except Exception as e:
google_search_user_found = False
success = False
status = 'FAILED retrieve_googgle_search_user'
results = {
'success': success,
'status': status,
'google_search_user_found': google_search_user_found,
'google_search_user': google_search_user,
}
return results
def retrieve_google_search_users_list(self, candidate_campaign_we_vote_id):
google_search_users_list = []
try:
google_search_users_queryset = GoogleSearchUser.objects.all()
google_search_users_queryset = google_search_users_queryset.filter(
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id)
google_search_users_list = google_search_users_queryset
if len(google_search_users_list):
status = "GOOGLE_SEARCH_USERS_LIST_FOUND"
success = True
google_search_users_found = True
else:
status = "GOOGLE_SEARCH_USERS_LIST_NOT_FOUND"
success = True
google_search_users_found = False
except Exception as e:
status = "FAILED_RETRIEVE_GOOGLE_SEARCH_USERS_LIST"
success = False
google_search_users_found = False
results = {
'success': success,
'status': status,
'google_search_users_found': google_search_users_found,
'google_search_users_list': google_search_users_list,
}
return results
def delete_google_search_users_possibilities(self, candidate_campaign_we_vote_id):
try:
GoogleSearchUser.objects.filter(candidate_campaign_we_vote_id=candidate_campaign_we_vote_id).delete()
status = "GOOGLE_SEARCH_USERS_POSSIBILITY_DELETED"
success = True
except Exception as e:
status = "GOOGLE_SEARCH_USERS_POSSIBILITY_NOT_DELETED"
success = False
results = {
'success': success,
'status': status,
}
return results
| |
import csv
import redis
from sqlalchemy.sql import exists, text
from collections import Counter
from .models import Grocery, List
from .app import db
from .decorators import timer
__CSV_FILE__ = '/Users/hannes/Downloads/groceries.csv'
def is_grocery(name):
''' Checks if grocery item exists '''
return db.session.query(exists().where(Grocery.name == name)).scalar()
def is_grocery_by_id(id):
''' Checks if grocery item exists (by id) '''
return db.session.query(exists().where(Grocery.id == id)).scalar()
def get_grocery(name):
''' Returns a grocery item by name '''
return Grocery.query.filter_by(name=name).first()
def get_grocery_by_id(id):
''' Returns a grocery item by id '''
return Grocery.query.get(id)
def get_or_create_grocery(name):
''' creates if not existing '''
item = get_grocery(name)
if not item:
item = Grocery(name=name)
db.session.add(item)
db.session.commit()
return item
def get_existing_groceries(query_string):
if query_string:
groceries = query_string.replace(' ', '').split(',')
# filter all non existing groceries
tmp = [grocery for grocery in groceries if is_grocery(grocery)]
# return ids
return [get_grocery(grocery).id for grocery in tmp]
return None
def get_sql_query_params(grocery_ids):
''' returns params for the sql query for list selection '''
num_grocery_ids = len(grocery_ids)
if num_grocery_ids == 1:
grocery_ids = "".join(["(", str(grocery_ids[0]), ")"])
else:
grocery_ids = tuple(grocery_ids)
return (grocery_ids, num_grocery_ids)
def get_grocery_lists(grocery_ids):
''' Returns the lists with given grocery ids (no matches are excluded)'''
params = get_sql_query_params(grocery_ids)
# create db connection for raw execution
connection = db.engine.connect()
sql_query = \
''' SELECT DISTINCT list_id FROM grocery_to_list
WHERE list_id IN( SELECT list_id
FROM grocery_to_list
WHERE grocery_id IN %s
GROUP BY list_id
HAVING COUNT(*) = %s)''' \
% (params[0], params[1])
lists = connection.execute(text(sql_query))
return [List.query.get(x) for x in lists]
def remove_grocery_item_from_recommendation(grocery_list, existing_groceries):
for grocery in existing_groceries:
remove_item = get_grocery_by_id(grocery)
while remove_item in grocery_list:
grocery_list.remove(remove_item)
return grocery_list
def get_highest_match(lists, existing_groceries, num_most_common=3):
tmp = []
for grocery_list in lists:
for groceries in grocery_list.groceries:
tmp.append(groceries)
# exclude existing groceries
tmp = remove_grocery_item_from_recommendation(tmp, existing_groceries)
# compute most common
num_groceries = len(tmp)
common_groceries = (Counter(tmp).most_common(num_most_common))
return [(i[0], i[1] / num_groceries) for i in common_groceries]
@timer
def get_highest_match_from_groceries(query_string, num_most_common=4):
'''
The method `get_highest_match_from_groceries` takes a raw string of
params and returns the n most common groceries missing in the shopping list
'''
groceries = get_existing_groceries(query_string)
if groceries:
grocery_lists = get_grocery_lists(groceries)
return get_highest_match(grocery_lists, groceries, num_most_common)
return None
def set_all_grocery_to_db():
''' creates grocery items in db if no record found '''
grocery_list = []
with open(__CSV_FILE__, 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
for grocery_list in data:
for item in grocery_list:
if item not in grocery_list:
grocery_list.append(item)
''' creating a list first and write to db is faster
than write individual items to db '''
for item in grocery_list:
get_or_create_grocery(item)
def set_all_grocery_lists():
''' set grocery lists for given sample set '''
with open(__CSV_FILE__, 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
for grocery_list in data:
glist = List()
for item in grocery_list:
glist.groceries.append(get_or_create_grocery(item))
db.session.add(glist)
db.session.commit()
if glist.id % 1000 == 0:
print("Added grocery list %s" % glist.id)
#######
# REDIS
#######
def set_all_grocery_lists_to_redis():
''' set grocery lists for given sample set '''
# import time
conn = redis.Redis()
with open(__CSV_FILE__, 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
index = 0
for grocery_list in data:
for item in grocery_list:
conn.sadd(index, item)
conn.sadd(item, index)
index += 1
if index % 1000 == 0:
print("Added grocery list %s" % index)
def get_union_list(sublist, union_list):
return [val for val in union_list if val in sublist]
def get_grocery_lists_from_redis(grocery_params):
''' '''
conn = redis.Redis()
grocery_items = grocery_params.split(',')
results = []
for grocery in grocery_items:
results.append(list(conn.smembers(grocery)))
redis_results = sorted(results, key=len)
lists = results[0]
for result in redis_results:
lists = get_union_list(result, lists)
return lists, grocery_items
@timer
def get_highest_match_from_redis(lists, existing_groceries, num_most_common=3):
conn = redis.Redis()
tmp = []
for grocery_list in lists:
for groceries in conn.smembers(grocery_list):
tmp.append(groceries)
# exclude existing groceries
# tmp = remove_grocery_item_from_recommendation(tmp, existing_groceries)
for i in existing_groceries:
while str.encode(i) in tmp:
tmp.remove(str.encode(i))
# compute most common
num_groceries = len(tmp)
common_groceries = (Counter(tmp).most_common(num_most_common))
return [(i[0], i[1] / num_groceries) for i in common_groceries]
@timer
def get_highest_match_from_groceries_from_redis(
query_string, num_most_common=4):
tmp = get_grocery_lists_from_redis(query_string)
if tmp[0] is None:
return (500, None)
return (200, get_highest_match_from_redis(tmp[0], tmp[1], num_most_common))
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sckit-learn classification utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import pickle
from absl import flags
from absl import logging
import numpy as np
from sklearn import model_selection
from sklearn.compose import make_column_transformer
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
# pylint: disable=invalid-name
flags.DEFINE_boolean(
"transform_inputs", True,
"If enabled, will scale the numeric features and convert categorical "
"features to one-hot encoding.")
flags.DEFINE_list(
"classifiers", ["LogisticRegression"],
"Type of the classifier. One of: \"LogisticRegression\", \"SVM\", "
"\"RidgeRegression\", \"RandomForest\", \"AdaBoost\", \"LDA\", \"QDA\", "
"\"GaussianProcess\", \"DecisionTree\", \"DNN\", \"GaussianNaiveBayes\", "
"\"BaggingEnsemble\".")
flags.DEFINE_boolean(
"use_implicationals", True, "If True, use the implicational features.")
flags.DEFINE_string(
"best_configurations_file", "",
"File containing the JSON dictionary from feature names to the "
"respective best model and data configurations. When `--cross_validate` "
"is enabled, this is the output file to be generated. In all other modes "
"this is an input file.")
FLAGS = flags.FLAGS
# List of all supported classifiers.
ALL_MODELS = [
"AdaBoost", "DNN", "DecisionTree", "GaussianProcess", "LDA",
"LogisticRegression", "QDA", "RandomForest", "RidgeRegression", "SVM",
"GaussianNaiveBayes", "BaggingEnsemble"
]
# Model information keys.
MODEL_INFO_NAME_KEY = "name"
MODEL_INFO_SPARSITY_KEY = "no_cv" # Not enough data.
MODEL_INFO_SCORE_KEY = "accuracy"
MODEL_INFO_CANDIDATES_KEY = "candidates"
# Random seed.
_RANDOM_STATE = 4611170
# WALS language code.
_LANGUAGE_CODE = "wals_code"
def _prepare_data(input_df):
"""Splits data into features and labels."""
class_label = "target_value"
y = input_df[class_label].copy()
X_columns_to_drop = [class_label, _LANGUAGE_CODE, "target_feature"]
X = input_df.drop(columns=X_columns_to_drop)
return X, y
def _split_into_features_and_labels(feature_name, feature_maker,
training_df, dev_df,
transform_inputs):
"""Preprocesses the data and returns the features and labels."""
# Get the label class counts for the training data.
train_class_counts = training_df.target_value.value_counts()
train_class_counts = list(zip(train_class_counts.index,
train_class_counts.values))
logging.info("%s: Class counts: %s", feature_name, train_class_counts)
# Perform the split into features and labels of the training set.
X_train, y_train = _prepare_data(training_df)
logging.info("%s: Input feature dimensions: %s", feature_name,
X_train.shape[1])
# Split dev set.
X_dev, y_dev = _prepare_data(dev_df)
# Numeric columns are transformed using standard scaler and categorical
# columns are converted to one-hot.
if transform_inputs:
numeric_cols = ["latitude", "longitude"]
categorical_cols = []
for col_name in X_train.columns:
if (col_name in feature_maker.prob_features or
col_name in feature_maker.count_features):
numeric_cols.append(col_name) # Counts, probabilities.
elif col_name in feature_maker.categorical_features:
categorical_cols.append(col_name) # Categorical feature values.
inputs_transformer = make_column_transformer(
(StandardScaler(), numeric_cols),
(OneHotEncoder(handle_unknown="ignore"), categorical_cols),
remainder="passthrough")
X_train = inputs_transformer.fit_transform(X_train)
if X_dev.shape[0]: # Do we have enough samples?
X_dev = inputs_transformer.transform(X_dev)
else:
logging.warning("Feature %s not found in the dev set. This is likely to "
"crash the evaluation mode!", feature_name)
else:
# Transform data frames to Numpy. The input transformer in the branch above
# returns Numpy arrays.
X_train = X_train.to_numpy()
X_dev = X_dev.to_numpy()
return (
X_train, y_train.to_numpy(), X_dev, y_dev.to_numpy(), train_class_counts)
def prepare_data(feature_maker, feature_name, use_implicationals=True,
prediction_mode=False):
"""Prepares the features and labels for the given WALS feature name."""
# Process training and dev data for the feature. Store the WALS language codes
# for the development set aside.
training_df, dev_df = feature_maker.process_data(
feature_name, prediction_mode=prediction_mode)
assert _LANGUAGE_CODE in dev_df.columns
dev_language_codes = list(dev_df[_LANGUAGE_CODE].values)
if not use_implicationals:
logging.info("Discarding implicational features")
training_df = feature_maker.select_columns(training_df,
discard_implicationals=True)
dev_df = feature_maker.select_columns(dev_df,
discard_implicationals=True)
# Split the data into features and labels.
X_train, y_train, X_dev, y_dev, train_class_counts = (
_split_into_features_and_labels(
feature_name, feature_maker, training_df, dev_df,
FLAGS.transform_inputs))
return X_train, y_train, X_dev, y_dev, dev_language_codes, train_class_counts
def _make_classifier(classifier_name):
"""Classifier factory."""
# Class weights: if you set this to None, you'd get much better accuracies,
# but it's likely that the classifier will be overpredicting the majority
# class.
class_weight_strategy = None # Note: this may set "balanced" as default.
max_iters = 10000
if classifier_name == "AdaBoost":
model = AdaBoostClassifier(n_estimators=100)
elif classifier_name == "LogisticRegression":
model = LogisticRegression(max_iter=max_iters,
class_weight=class_weight_strategy)
elif classifier_name == "LDA":
model = LinearDiscriminantAnalysis(tol=1E-6)
elif classifier_name == "QDA":
model = QuadraticDiscriminantAnalysis()
elif classifier_name == "DNN":
model = MLPClassifier(random_state=_RANDOM_STATE,
hidden_layer_sizes=[200])
elif classifier_name == "DecisionTree":
model = DecisionTreeClassifier(random_state=_RANDOM_STATE,
min_samples_leaf=3,
criterion="entropy",
class_weight="balanced")
elif classifier_name == "GaussianProcess":
model = GaussianProcessClassifier(random_state=_RANDOM_STATE,
max_iter_predict=200)
elif classifier_name == "RandomForest":
model = RandomForestClassifier(n_estimators=200,
random_state=_RANDOM_STATE,
min_samples_leaf=3,
criterion="entropy",
class_weight="balanced_subsample")
elif classifier_name == "RidgeRegression":
model = RidgeClassifier(normalize=True, tol=1E-5,
class_weight=class_weight_strategy)
elif classifier_name == "SVM":
model = LinearSVC(max_iter=max_iters, class_weight=class_weight_strategy)
elif classifier_name == "GaussianNaiveBayes":
model = GaussianNB()
elif classifier_name == "BaggingEnsemble":
model = BaggingClassifier(random_state=_RANDOM_STATE)
else:
raise ValueError("Unsupported classifier: %s" % classifier_name)
return model
def cross_validate(feature_name, classifier_name, X, y,
cv_num_folds, cv_num_repeats):
"""Runs repeated stratified $k$-fold cross-validation.
Returns multiple cross-validation metrics as a dictionary, where for each
metric mean and variance across multiple repeats and folds is summarized.
Args:
feature_name: (string) Name of the WALS feature.
classifier_name: (string) Classifier name.
X: (numpy array) Input features.
y: (numpy array) Labels.
cv_num_folds: (int) Number of folds ($k$).
cv_num_repeats: (int) Number of repetitions.
Returns:
Dictionary containing cross-validation scores and stats.
"""
model = _make_classifier(classifier_name)
scoring = ["f1_micro", "precision_micro", "recall_micro", "accuracy"]
try:
# Really primitive logic to figure out class distribution.
_, y_counts = np.unique(y, return_counts=True)
y_max_freq = np.max(y_counts)
# Check if the class counts are not reliable to run cross-validation.
if y_max_freq < cv_num_folds:
logging.warning("[%s] %s: Not enough data. Fitting the model instead "
"of running CV", feature_name, classifier_name)
# Simply fit the model.
model.fit(X, y)
cv_scores = {}
cv_scores["accuracy"] = (model.score(X, y), 0.0)
cv_scores[MODEL_INFO_SPARSITY_KEY] = True
return cv_scores
else:
logging.info("[%s] Running cross-validation of %s (k=%d, n=%d) ...",
feature_name, classifier_name, cv_num_folds, cv_num_repeats)
# Run cross-validation.
cv = RepeatedStratifiedKFold(n_splits=cv_num_folds,
n_repeats=cv_num_repeats,
random_state=_RANDOM_STATE)
cv_scores = model_selection.cross_validate(
model, X, y, cv=cv, scoring=scoring, n_jobs=cv_num_folds)
cv_scores[MODEL_INFO_SPARSITY_KEY] = False
except Exception as e: # pylint: disable=broad-except
logging.error("[%s] %s: CV: Exception: %s", feature_name, classifier_name,
e)
return None
del cv_scores["fit_time"]
del cv_scores["score_time"]
for score_name in scoring:
scores_vec_key = "test_" + score_name
cv_scores[score_name] = (np.mean(cv_scores[scores_vec_key]),
np.var(cv_scores[scores_vec_key]))
del cv_scores[scores_vec_key]
# Sanity check.
if math.isnan(cv_scores["accuracy"][0]):
return None
logging.info("[train] %s: CV scores for %s: %s", feature_name,
classifier_name, cv_scores)
return cv_scores
def train_classifier(feature_name, classifier_name, X, y, model_path=None):
"""Trains classifier."""
model = _make_classifier(classifier_name)
logging.info("%s: Fitting %s model ...",
feature_name, classifier_name)
model.fit(X, y)
logging.info("%s: %s: Score: %s", feature_name, classifier_name,
model.score(X, y))
if model_path:
logging.info("Saving model to \"%s\" ...", model_path)
pickle.dump(model, open(model_path, "wb"))
return model
def select_best_model(classifiers, feature_name, X_train, y_train,
cv_num_folds, cv_num_repeats):
"""Performs cross-validation of various classifiers for a given feature.
Returns a dictionary with the best classifier name, its score and the number
of candidates it was selected from.
Args:
classifiers: (list) Names of the classifiers to choose from.
feature_name: (string) WALS feature name.
X_train: (numpy array) Training features.
y_train: (numpy array) Training labels.
cv_num_folds: (int) Number of folds ($k$).
cv_num_repeats: (int) Number of repetitions.
Returns:
Dictionary containing best configuration.
"""
scores = []
for classifier_name in classifiers:
clf_scores = cross_validate(feature_name, classifier_name, X_train, y_train,
cv_num_folds, cv_num_repeats)
if clf_scores: # Cross-validation may fail for some settings.
scores.append((classifier_name, clf_scores))
# Sort the scores by the highest accuracy mean. For some reason F1 and
# accuracy are the same (as is the precision and recall). Investigate.
scores = sorted(scores, key=lambda score: score[1]["accuracy"][0],
reverse=True)
if len(scores) < 5:
raise ValueError("Expected at least five candidate classifiers!")
best_model = scores[0]
return {
MODEL_INFO_NAME_KEY: best_model[0], # Model name.
# Accuracy mean.
MODEL_INFO_SCORE_KEY: best_model[1]["accuracy"][0],
# Boolean sparsity marker.
MODEL_INFO_SPARSITY_KEY: best_model[1][MODEL_INFO_SPARSITY_KEY],
# Overall number of successful evals.
MODEL_INFO_CANDIDATES_KEY: len(scores)
}
| |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Enable 'with' statements in Python 2.5
from __future__ import with_statement
import os.path
import re
import subprocess
import sys
from buildbot_lib import (
BuildContext, BuildStatus, Command, EnsureDirectoryExists,
ParseStandardCommandLine, RemoveDirectory, RunBuild, SCons, Step, StepLink,
StepText, TryToCleanContents)
# Windows-specific environment manipulation
def SetupWindowsEnvironment(context):
# Blow away path for now if on the bots (to be more hermetic).
if os.environ.get('BUILDBOT_SLAVENAME'):
paths = [
r'c:\b\depot_tools',
r'c:\b\depot_tools\python_bin',
r'c:\b\build_internal\tools',
r'e:\b\depot_tools',
r'e:\b\depot_tools\python_bin',
r'e:\b\build_internal\tools',
r'C:\WINDOWS\system32',
r'C:\WINDOWS\system32\WBEM',
]
context.SetEnv('PATH', os.pathsep.join(paths))
# Poke around looking for MSVC. We should do something more principled in
# the future.
# The name of Program Files can differ, depending on the bittage of Windows.
program_files = r'c:\Program Files (x86)'
if not os.path.exists(program_files):
program_files = r'c:\Program Files'
if not os.path.exists(program_files):
raise Exception('Cannot find the Program Files directory!')
# The location of MSVC can differ depending on the version.
msvc_locs = [
('Microsoft Visual Studio 10.0', 'VS100COMNTOOLS', '2010'),
('Microsoft Visual Studio 9.0', 'VS90COMNTOOLS', '2008'),
('Microsoft Visual Studio 8.0', 'VS80COMNTOOLS', '2005'),
]
for dirname, comntools_var, gyp_msvs_version in msvc_locs:
msvc = os.path.join(program_files, dirname)
context.SetEnv('GYP_MSVS_VERSION', gyp_msvs_version)
if os.path.exists(msvc):
break
else:
# The break statement did not execute.
raise Exception('Cannot find MSVC!')
# Put MSVC in the path.
vc = os.path.join(msvc, 'VC')
comntools = os.path.join(msvc, 'Common7', 'Tools')
perf = os.path.join(msvc, 'Team Tools', 'Performance Tools')
context.SetEnv('PATH', os.pathsep.join([
context.GetEnv('PATH'),
vc,
comntools,
perf]))
# SCons needs this variable to find vsvars.bat.
# The end slash is needed because the batch files expect it.
context.SetEnv(comntools_var, comntools + '\\')
# This environment variable will SCons to print debug info while it searches
# for MSVC.
context.SetEnv('SCONS_MSCOMMON_DEBUG', '-')
# Needed for finding devenv.
context['msvc'] = msvc
# The context on other systems has GYP_DEFINES set, set it for windows to be
# able to save and restore without KeyError.
context.SetEnv('GYP_DEFINES', '')
def SetupGypDefines(context, extra_vars=[]):
context.SetEnv('GYP_DEFINES', ' '.join(context['gyp_vars'] + extra_vars))
def SetupLinuxEnvironment(context):
SetupGypDefines(context, ['target_arch='+context['gyp_arch']])
def SetupMacEnvironment(context):
SetupGypDefines(context)
context.SetEnv('GYP_GENERATORS', 'ninja')
def SetupContextVars(context):
# The branch is set to native_client on the main bots, on the trybots it's
# set to ''. Otherwise, we should assume a particular branch is being used.
context['branch'] = os.environ.get('BUILDBOT_BRANCH', 'native_client')
context['off_trunk'] = context['branch'] not in ['native_client', '']
def ValidatorTest(context, architecture, validator, warn_only=False):
cmd=[
sys.executable,
'tests/abi_corpus/validator_regression_test.py',
'--keep-going',
'--validator', validator,
'--arch', architecture
]
if warn_only:
cmd.append('--warn-only')
Command(context, cmd=cmd)
def CommandGypBuild(context):
if context.Windows():
Command(
context,
cmd=[os.path.join(context['msvc'], 'Common7', 'IDE', 'devenv.com'),
r'build\all.sln',
'/build', context['gyp_mode']])
elif context.Linux():
Command(context, cmd=['make', '-C', '..', '-k',
'-j%d' % context['max_jobs'], 'V=1',
'BUILDTYPE=' + context['gyp_mode']])
elif context.Mac():
Command(context, cmd=[
'ninja', '-k', '0', '-C', '../out/' + context['gyp_mode']])
else:
raise Exception('Unknown platform')
def CommandGypGenerate(context):
Command(
context,
cmd=[
sys.executable,
'native_client/build/gyp_nacl',
'native_client/build/all.gyp',
],
cwd='..')
def CommandGclientRunhooks(context):
if context.Windows():
gclient = 'gclient.bat'
else:
gclient = 'gclient'
print 'Running gclient runhooks...'
print 'GYP_DEFINES=' + context.GetEnv('GYP_DEFINES')
Command(context, cmd=[gclient, 'runhooks', '--force'])
def RemoveGypBuildDirectories():
# Remove all directories on all platforms. Overkill, but it allows for
# straight-line code.
# Windows
RemoveDirectory('build/Debug')
RemoveDirectory('build/Release')
RemoveDirectory('build/Debug-Win32')
RemoveDirectory('build/Release-Win32')
RemoveDirectory('build/Debug-x64')
RemoveDirectory('build/Release-x64')
# Linux and Mac
RemoveDirectory('hg')
RemoveDirectory('../xcodebuild')
RemoveDirectory('../sconsbuild')
RemoveDirectory('../out')
RemoveDirectory('src/third_party/nacl_sdk/arm-newlib')
def BuildScript(status, context):
inside_toolchain = context['inside_toolchain']
# Clean out build directories.
with Step('clobber', status):
RemoveDirectory(r'scons-out')
RemoveGypBuildDirectories()
with Step('cleanup_temp', status):
# Picking out drive letter on which the build is happening so we can use
# it for the temp directory.
if context.Windows():
build_drive = os.path.splitdrive(os.path.abspath(__file__))[0]
tmp_dir = os.path.join(build_drive, os.path.sep + 'temp')
context.SetEnv('TEMP', tmp_dir)
context.SetEnv('TMP', tmp_dir)
else:
tmp_dir = '/tmp'
print 'Making sure %s exists...' % tmp_dir
EnsureDirectoryExists(tmp_dir)
print 'Cleaning up the contents of %s...' % tmp_dir
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
TryToCleanContents(tmp_dir, file_name_filter)
# Mac has an additional temporary directory; clean it up.
# TODO(bradnelson): Fix Mac Chromium so that these temp files are created
# with open() + unlink() so that they will not get left behind.
if context.Mac():
subprocess.call(
"find /var/folders -name '.org.chromium.*' -exec rm -rfv '{}' ';'",
shell=True)
subprocess.call(
"find /var/folders -name '.com.google.Chrome*' -exec rm -rfv '{}' ';'",
shell=True)
# Skip over hooks when run inside the toolchain build because
# download_toolchains would overwrite the toolchain build.
if inside_toolchain:
with Step('gyp_generate_only', status):
CommandGypGenerate(context)
else:
with Step('gclient_runhooks', status):
CommandGclientRunhooks(context)
if context['clang']:
with Step('update_clang', status):
Command(context, cmd=['../tools/clang/scripts/update.sh'])
# Just build both bitages of validator and test for --validator mode.
if context['validator']:
with Step('build ncval-x86-32', status):
SCons(context, platform='x86-32', parallel=True, args=['ncval'])
with Step('build ncval-x86-64', status):
SCons(context, platform='x86-64', parallel=True, args=['ncval'])
with Step('clobber dfa_validator', status):
Command(context, cmd=['rm', '-rf', 'dfa_validator'])
with Step('clone dfa_validator', status):
Command(context, cmd=[
'git', 'clone',
'git://github.com/mseaborn/x86-decoder.git', 'dfa_validator32'])
Command(context, cmd=[
'git', 'checkout', '1a5963fa48739c586d5bbd3d46d0a8a7f25112f2'],
cwd='dfa_validator32')
Command(context, cmd=[
'git', 'clone',
'git://github.com/mseaborn/x86-decoder.git', 'dfa_validator64'])
Command(context, cmd=[
'git', 'checkout', '6ffa36f44cafd2cdad37e1e27254c498030ff712'],
cwd='dfa_validator64')
with Step('build dfa_validator_32', status):
Command(context, cmd=['make'], cwd='dfa_validator32')
with Step('build dfa_validator_64', status):
Command(context, cmd=['make'], cwd='dfa_validator64')
with Step('build ragel_validator-32', status):
SCons(context, platform='x86-32', parallel=True, args=['ncval_new'])
with Step('build ragel_validator-64', status):
SCons(context, platform='x86-64', parallel=True, args=['ncval_new'])
with Step('predownload validator corpus', status):
Command(context,
cmd=[sys.executable,
'tests/abi_corpus/validator_regression_test.py',
'--download-only'])
with Step('validator_regression_test current x86-32', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-32', 'scons-out/opt-linux-x86-32/staging/ncval')
with Step('validator_regression_test current x86-64', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-64', 'scons-out/opt-linux-x86-64/staging/ncval')
with Step('validator_regression_test dfa x86-32', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-32', 'dfa_validator32/dfa_ncval', warn_only=True)
with Step('validator_regression_test dfa x86-64', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-64', 'dfa_validator64/dfa_ncval', warn_only=True)
with Step('validator_regression_test ragel x86-32', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-32',
'scons-out/opt-linux-x86-32/staging/ncval_new')
with Step('validator_regression_test ragel x86-64', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-64',
'scons-out/opt-linux-x86-64/staging/ncval_new')
with Step('validator_diff_tests', status, halt_on_fail=False):
SCons(context, args=['validator_diff_tests'])
return
# Run checkdeps script to vet #includes.
with Step('checkdeps', status):
Command(context, cmd=[sys.executable, 'tools/checkdeps/checkdeps.py'])
# Make sure our Gyp build is working.
if not context['no_gyp']:
with Step('gyp_compile', status):
CommandGypBuild(context)
# The main compile step.
with Step('scons_compile', status):
SCons(context, parallel=True, args=[])
### BEGIN tests ###
with Step('small_tests', status, halt_on_fail=False):
SCons(context, args=['small_tests'])
with Step('medium_tests', status, halt_on_fail=False):
SCons(context, args=['medium_tests'])
with Step('large_tests', status, halt_on_fail=False):
SCons(context, args=['large_tests'])
with Step('compile IRT tests', status):
SCons(context, parallel=True, mode=['nacl_irt_test'])
with Step('small_tests under IRT', status, halt_on_fail=False):
SCons(context, mode=context['default_scons_mode'] + ['nacl_irt_test'],
args=['small_tests_irt'])
with Step('medium_tests under IRT', status, halt_on_fail=False):
SCons(context, mode=context['default_scons_mode'] + ['nacl_irt_test'],
args=['medium_tests_irt'])
# TODO(eugenis): reenable this on clang/opt once the LLVM issue is fixed
# http://code.google.com/p/nativeclient/issues/detail?id=2473
bug2473 = (context['clang'] or context['asan']) and context['mode'] == 'opt'
if context.Mac() and context['arch'] != 'arm' and not bug2473:
# x86-64 is not fully supported on Mac. Not everything works, but we
# want to stop x86-64 sel_ldr from regressing, so do a minimal test here.
with Step('minimal x86-64 test', status, halt_on_fail=False):
SCons(context, parallel=True, platform='x86-64',
args=['run_hello_world_test'])
### END tests ###
if not context['no_gyp']:
# Build with ragel-based validator using GYP.
gyp_defines_save = context.GetEnv('GYP_DEFINES')
context.SetEnv('GYP_DEFINES',
' '.join([gyp_defines_save, 'nacl_validator_ragel=1']))
with Step('gyp_compile_ragel', status):
# Clobber GYP build to recompile necessary files with new preprocessor macro
# definitions. It is done because some build systems (such as GNU Make,
# MSBuild etc.) do not consider compiler arguments as a dependency.
RemoveGypBuildDirectories()
CommandGypGenerate(context)
CommandGypBuild(context)
context.SetEnv('GYP_DEFINES', gyp_defines_save)
# Build with ragel-based validator using scons.
with Step('scons_compile_ragel', status):
SCons(context, parallel=True, args=['validator_ragel=1'])
# Smoke tests for the R-DFA validator.
with Step('validator_ragel_tests', status):
args = ['validator_ragel=1',
'small_tests',
'medium_tests',
'large_tests',
]
# Add nacl_irt_test mode to be able to run run_hello_world_test_irt that
# tests validation of the IRT.
SCons(context,
mode=context['default_scons_mode'] + ['nacl_irt_test'],
args=args)
def Main():
# TODO(ncbray) make buildbot scripts composable to support toolchain use case.
context = BuildContext()
status = BuildStatus(context)
ParseStandardCommandLine(context)
SetupContextVars(context)
if context.Windows():
SetupWindowsEnvironment(context)
elif context.Linux():
SetupLinuxEnvironment(context)
elif context.Mac():
SetupMacEnvironment(context)
else:
raise Exception("Unsupported platform.")
RunBuild(BuildScript, status)
if __name__ == '__main__':
Main()
| |
# -*- coding: UTF-8 -*-
import datetime
import os.path
from collections import defaultdict
from conference.models import Conference, AttendeeProfile
from django.conf import settings
from django.core.urlresolvers import reverse
from p3 import models as p3models
def conference_ticket_badge(tickets):
"""
vedi conference.settings.TICKET_BADGE_PREPARE_FUNCTION
"""
conferences = {}
for c in Conference.objects.all():
conferences[c.code] = {
'obj': c,
'days': c.days(),
}
groups = {}
qs = tickets\
.select_related('fare', 'p3_conference', 'orderitem__order__user__user')
for t in qs:
if t.fare.conference not in groups:
groups[t.fare.conference] = {
'name': t.fare.conference,
'plugin': os.path.join(settings.OTHER_STUFF, 'badge', t.fare.conference, 'conf.py'),
'tickets': [],
}
try:
p3c = t.p3_conference
except p3models.TicketConference.DoesNotExist:
p3c = None
if p3c is None:
tagline = ''
days = '1'
experience = 0
badge_image = None
else:
tagline = p3c.tagline
experience = p3c.python_experience
tdays = map(lambda x: datetime.date(*map(int, x.split('-'))), filter(None, p3c.days.split(',')))
cdays = conferences[t.fare.conference]['days']
days = ','.join(map(str,[cdays.index(x)+1 for x in tdays]))
badge_image = p3c.badge_image.path if p3c.badge_image else None
try:
if p3c and p3c.assigned_to:
profile = AttendeeProfile.objects\
.select_related('user')\
.get(user__email=p3c.assigned_to)
else:
profile = t.user.attendeeprofile
except p3models.AttendeeProfile.DoesNotExist:
profile = None
name = t.name.strip()
if not name:
if t.user.first_name or t.user.last_name:
name = '%s %s' % (t.user.first_name, t.user.last_name)
else:
name = t.orderitem.order.user.name()
if p3c and p3c.assigned_to:
name = p3c.assigned_to + ' (%s)' % name
tdata = {
'name': name,
'tagline': tagline,
'days': days,
'fare': {
'code': t.fare.code,
'type': t.fare.recipient_type,
},
'experience': experience,
'badge_image': badge_image,
'staff': t.ticket_type == 'staff',
}
if profile:
tdata['profile-link'] = settings.DEFAULT_URL_PREFIX + reverse(
'conference-profile-link', kwargs={'uuid': profile.uuid})
groups[t.fare.conference]['tickets'].append(tdata)
return groups.values()
def gravatar(email, size=80, default='identicon', rating='r', protocol='https'):
import urllib, hashlib
if protocol == 'https':
host = 'https://secure.gravatar.com'
else:
host = 'http://www.gravatar.com'
gravatar_url = host + "/avatar/" + hashlib.md5(email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({
'default': default,
'size': size,
'rating': rating,
})
return gravatar_url
def spam_recruiter_by_conf(conf):
"""
Restituisce un queryset con gli User che hanno accettato di essere
contattati via email per motivi di recruiting
"""
from django.contrib.auth.models import User
tickets = settings.CONFERENCE_TICKETS(conf, ticket_type='conference')
owned = tickets.filter(p3_conference__assigned_to='')
assigned = tickets.exclude(p3_conference__assigned_to='')
first_run = User.objects\
.filter(\
id__in=owned.values('user'),\
attendeeprofile__p3_profile__spam_recruiting=True)
second_run = User.objects\
.filter(\
email__in=assigned.values('p3_conference__assigned_to'),\
attendeeprofile__p3_profile__spam_recruiting=True)
return first_run | second_run
from django.core.cache import cache
from django.utils.http import urlquote
from django.utils.hashcompat import md5_constructor
def template_cache_name(fragment_name, *variables):
args = md5_constructor(u':'.join([urlquote(var) for var in variables]))
return 'template.cache.%s.%s' % (fragment_name, args.hexdigest())
def invalidate_template_cache(fragment_name, *variables):
args = md5_constructor(u':'.join([urlquote(var) for var in variables]))
cache_key = 'template.cache.%s.%s' % (fragment_name, args.hexdigest())
cache.delete(cache_key)
return
def conference2ical(conf, user=None, abstract=False):
from conference import dataaccess
from conference import models as cmodels
from datetime import timedelta
curr = cmodels.Conference.objects.current()
try:
hotel = cmodels.SpecialPlace.objects.get(type='conf-hq')
except cmodels.SpecialPlace.DoesNotExist:
hotel = None
else:
if not hotel.lat or not hotel.lng:
hotel = None
def altf(data, component):
if component == 'calendar':
if user is None:
url = reverse('p3-schedule', kwargs={'conference': conf})
else:
url = reverse('p3-schedule-my-schedule', kwargs={'conference': conf})
data['uid'] = settings.DEFAULT_URL_PREFIX + url
if curr.code == conf:
data['ttl'] = timedelta(seconds=3600)
else:
data['ttl'] = timedelta(days=365)
elif component == 'event':
eid = data['uid']
data['uid'] = settings.DEFAULT_URL_PREFIX + '/p3/event/' + str(data['uid'])
data['organizer'] = ('mailto:info@pycon.it', {'CN': 'Python Italia'})
if hotel:
data['coordinates'] = [hotel.lat, hotel.lng]
if not isinstance(data['summary'], tuple):
# this is a custom event, if it starts with an anchor I can
# extract the reference
import re
m = re.match(r'<a href="(.*)">(.*)</a>', data['summary'])
if m:
url = m.group(1)
if url.startswith('/'):
url = settings.DEFAULT_URL_PREFIX + url
data['summary'] = (m.group(2), {'ALTREP': url})
if abstract:
e = dataaccess.event_data(eid)
if e['talk']:
from conference.templatetags.conference import name_abbrv
speakers = [ name_abbrv(s['name']) for s in e['talk']['speakers'] ]
speakers = ", ".join(speakers)
data['summary'] = (data['summary'][0] + ' by ' + speakers, data['summary'][1])
ab = e['talk']['abstract'] if e['talk'] else e['abstract']
data['description'] = ab
return data
if user is None:
from conference.utils import conference2ical as f
cal = f(conf, altf=altf)
else:
from conference.utils import TimeTable2
from conference.utils import timetables2ical as f
qs = cmodels.Event.objects\
.filter(eventinterest__user=user, eventinterest__interest__gt=0)\
.filter(schedule__conference=conf)\
.values('id', 'schedule')
events = defaultdict(list)
for x in qs:
events[x['schedule']].append(x['id'])
sids = sorted(events.keys())
timetables = [ TimeTable2.fromEvents(x, events[x]) for x in sids ]
cal = f(timetables, altf=altf)
return cal
class RawSubquery(object):
"""
An utility class to use a raw query as a subquery without incurring in the
performance loss caused by evaluating the two queries independently.
Given this raw query (rawq):
SELECT t1.user_id
FROM (
...
) t1 INNER JOIN (
...
) t2
ON t1.something = t2.something_else
You can write this:
MyModel.objects.filter(id__in=RawSubquery(rawq))
instead of:
cursor = connection.cursor()
data = [ x[0] for x in cursor.execute(rawq, []).fetchall() ]
MyModel.objects.filter(id__in=data)
"""
def __init__(self, raw, params=()):
self.raw = raw
self.params = params
def prepare(self):
return self
def as_sql(self):
return (self.raw, self.params)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.