gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""
An interface for the ACE processor.
"""
from typing import (
Any, Iterator, Iterable, Mapping, Dict, List, Tuple, Pattern, IO)
import logging
import os
from pathlib import Path
import argparse
import re
from subprocess import (
check_call,
check_output,
CalledProcessError,
Popen,
PIPE
)
from platform import platform # portable system information
from getpass import getuser # portable way to get username
from socket import gethostname # portable way to get host name
from datetime import datetime
import locale
from delphin import interface
from delphin import util
from delphin.exceptions import PyDelphinException
# Default modules need to import the PyDelphin version
from delphin.__about__ import __version__ # noqa: F401
logger = logging.getLogger(__name__)
# do this right away to avoid some encoding issues
locale.setlocale(locale.LC_ALL, '')
encoding = locale.getpreferredencoding(False)
class ACEProcessError(PyDelphinException):
"""Raised when the ACE process has crashed and cannot be recovered."""
class ACEProcess(interface.Processor):
"""
The base class for interfacing ACE.
This manages most subprocess communication with ACE, but does not
interpret the response returned via ACE's stdout. Subclasses
override the :meth:`receive` method to interpret the task-specific
response formats.
Note that not all arguments to this class are used by every
subclass; the documentation for each subclass specifies which are
available.
Args:
grm (str): path to a compiled grammar image
cmdargs (list, optional): a list of command-line arguments
for ACE; note that arguments and their values should be
separate entries, e.g. `['-n', '5']`
executable (str, optional): the path to the ACE binary; if
`None`, ACE is assumed to be callable via `ace`
env (dict): environment variables to pass to the ACE
subprocess
tsdbinfo (bool): if `True` and ACE's version is compatible,
all information ACE reports for [incr tsdb()] processing
is gathered and returned in the response
full_forest (bool): if `True` and *tsdbinfo* is `True`, output
the full chart for each parse result
stderr (file): stream used for ACE's stderr
"""
_cmdargs: List[str] = []
_termini: List[Pattern[str]] = []
def __init__(self,
grm: util.PathLike,
cmdargs: List[str] = None,
executable: util.PathLike = None,
env: Mapping[str, str] = None,
tsdbinfo: bool = True,
full_forest: bool = False,
stderr: IO[Any] = None):
self.grm = str(Path(grm).expanduser())
self.cmdargs = cmdargs or []
# validate the arguments
_ace_argparser.parse_args(self.cmdargs)
self.executable = 'ace'
if executable:
self.executable = str(Path(executable).expanduser())
ace_version = self.ace_version
if ace_version >= (0, 9, 14):
self.cmdargs.append('--tsdb-notes')
if tsdbinfo and ace_version >= (0, 9, 24):
self.cmdargs.extend(['--tsdb-stdout', '--report-labels'])
setattr(self, 'receive', self._tsdb_receive)
if full_forest:
self._cmdargs.append('--itsdb-forest')
else:
setattr(self, 'receive', self._default_receive)
self.env = env or os.environ
self._run_id = -1
self.run_infos: List[Dict[str, Any]] = []
self._stderr = stderr
self._open()
@property
def ace_version(self) -> Tuple[int, ...]:
"""The version of the specified ACE binary."""
return _ace_version(self.executable)
@property
def run_info(self) -> Dict[str, Any]:
"""Contextual information about the the running process."""
return self.run_infos[-1]
def _open(self) -> None:
self._p = Popen(
[self.executable, '-g', self.grm] + self._cmdargs + self.cmdargs,
stdin=PIPE,
stdout=PIPE,
stderr=self._stderr,
env=self.env,
universal_newlines=True
)
self._run_id += 1
self.run_infos.append({
'run-id': self._run_id,
'application': 'ACE {} via PyDelphin v{}'.format(
'.'.join(map(str, self.ace_version)), __version__),
'environment': ' '.join(self.cmdargs),
'user': getuser(),
'host': gethostname(),
'os': platform(),
'start': datetime.now()
})
if self._p.poll() is not None and self._p.returncode != 0:
raise ACEProcessError("ACE process closed on startup")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False # don't try to handle any exceptions
def _result_lines(self, termini: List[Pattern[str]] = None) -> List[str]:
poll = self._p.poll
assert self._p.stdout is not None, 'cannot receive output from ACE'
next_line = self._p.stdout.readline
if termini is None:
termini = self._termini
i, end = 0, len(termini)
cur_terminus = termini[i]
lines = []
while i < end:
s = next_line()
if s == '' and poll() is not None:
logger.info(
'Process closed unexpectedly; giving up.'
)
self.close()
break
# The 'run' note should appear when the process is opened, but
# handle it here to avoid potential deadlocks if it gets buffered
elif s.startswith('NOTE: tsdb run:'):
self._read_run_info(s.rstrip())
# the rest should be normal result lines
else:
lines.append(s.rstrip())
if cur_terminus.search(s):
i += 1
return [line for line in lines if line != '']
def _read_run_info(self, line: str) -> None:
assert line.startswith('NOTE: tsdb run:')
for key, value in _sexpr_data(line[15:].lstrip()):
if key == ':application':
continue # PyDelphin sets 'application'
self.run_info[key.lstrip(':')] = value
def send(self, datum: str) -> None:
"""
Send *datum* (e.g. a sentence or MRS) to ACE.
Warning:
Sending data without reading (e.g., via :meth:`receive`) can
fill the buffer and cause data to be lost. Use the
:meth:`interact` method for most data-processing tasks with
ACE.
"""
assert self._p.stdin is not None, 'cannot send inputs to ACE'
try:
self._p.stdin.write((datum.rstrip() + '\n'))
self._p.stdin.flush()
except (IOError, OSError): # ValueError if file was closed manually
logger.info(
'Attempted to write to a closed process; attempting to reopen'
)
self._open()
self._p.stdin.write((datum.rstrip() + '\n'))
self._p.stdin.flush()
def receive(self) -> interface.Response:
"""
Return the stdout response from ACE.
Warning:
Reading beyond the last line of stdout from ACE can cause
the process to hang while it waits for the next line. Use
the :meth:`interact` method for most data-processing tasks
with ACE.
"""
raise NotImplementedError()
def _default_receive(self) -> interface.Response:
raise NotImplementedError()
def _tsdb_receive(self) -> interface.Response:
lines = self._result_lines()
response, lines = _make_response(lines, self.run_info)
# now it should be safe to reopen a closed process (if necessary)
if self._p.poll() is not None:
logger.info('Attempting to restart ACE.')
self._open()
line = ' '.join(lines) # ACE 0.9.24 on Mac puts superfluous newlines
response = _tsdb_response(response, line)
return response
def interact(self, datum: str) -> interface.Response:
"""
Send *datum* to ACE and return the response.
This is the recommended method for sending and receiving data
to/from an ACE process as it reduces the chances of
over-filling or reading past the end of the buffer. It also
performs a simple validation of the input to help ensure that
one complete item is processed at a time.
If input item identifiers need to be tracked throughout
processing, see :meth:`process_item`.
Args:
datum (str): the input sentence or MRS
Returns:
:class:`~delphin.interface.Response`
"""
validated = self._validate_input(datum)
if validated:
self.send(validated)
result = self.receive()
else:
result, lines = _make_response(
[('NOTE: PyDelphin could not validate the input and '
'refused to send it to ACE'),
f'SKIP: {datum}'],
self.run_info)
result['input'] = datum
return result
def process_item(self,
datum: str,
keys: Dict[str, Any] = None) -> interface.Response:
"""
Send *datum* to ACE and return the response with context.
The *keys* parameter can be used to track item identifiers
through an ACE interaction. If the `task` member is set on
the ACEProcess instance (or one of its subclasses), it is
kept in the response as well.
Args:
datum (str): the input sentence or MRS
keys (dict): a mapping of item identifier names and values
Returns:
:class:`~delphin.interface.Response`
"""
response = self.interact(datum)
if keys is not None:
response['keys'] = keys
if 'task' not in response and self.task is not None:
response['task'] = self.task
return response
def close(self) -> int:
"""
Close the ACE process and return the process's exit code.
"""
self.run_info['end'] = datetime.now()
if self._p.stdin is not None:
self._p.stdin.close()
if self._p.stdout is not None:
for line in self._p.stdout:
if line.startswith('NOTE: tsdb run:'):
self._read_run_info(line)
else:
logger.debug('ACE cleanup: %s', line.rstrip())
retval = self._p.wait()
return retval
def _validate_input(self, datum: str) -> str:
raise NotImplementedError()
class ACEParser(ACEProcess):
"""
A class for managing parse requests with ACE.
See :class:`ACEProcess` for initialization parameters.
"""
task = 'parse'
_termini = [re.compile(r'^$'), re.compile(r'^$')]
def _validate_input(self, datum: str):
# valid input for parsing is non-empty
# (this relies on an empty string evaluating to False)
return isinstance(datum, str) and datum.strip()
def _default_receive(self):
lines = self._result_lines()
response, lines = _make_response(lines, self.run_info)
response['results'] = [
dict(zip(('mrs', 'derivation'), map(str.strip, line.split(' ; '))))
for line in lines
]
return response
class ACETransferer(ACEProcess):
"""
A class for managing transfer requests with ACE.
See :class:`ACEProcess` for initialization parameters.
"""
task = 'transfer'
_termini = [re.compile(r'^$')]
def __init__(self,
grm: util.PathLike,
cmdargs: List[str] = None,
executable: util.PathLike = None,
env: Mapping[str, str] = None,
stderr: IO[Any] = None):
super().__init__(grm, cmdargs=cmdargs, executable=executable, env=env,
tsdbinfo=False, full_forest=False, stderr=stderr)
def _validate_input(self, datum):
return _possible_mrs(datum)
def _default_receive(self):
lines = self._result_lines()
response, lines = _make_response(lines, self.run_info)
response['results'] = [{'mrs': line.strip()} for line in lines]
return response
class ACEGenerator(ACEProcess):
"""
A class for managing realization requests with ACE.
See :class:`ACEProcess` for initialization parameters.
"""
task = 'generate'
_cmdargs = ['-e', '--tsdb-notes']
_termini = [re.compile(r'NOTE: tsdb parse: ')]
def __init__(self,
grm: util.PathLike,
cmdargs: List[str] = None,
executable: util.PathLike = None,
env: Mapping[str, str] = None,
tsdbinfo: bool = True,
stderr: IO[Any] = None):
super().__init__(grm, cmdargs=cmdargs, executable=executable, env=env,
tsdbinfo=tsdbinfo, full_forest=False, stderr=stderr)
def _validate_input(self, datum):
return _possible_mrs(datum)
def _default_receive(self):
show_tree = '--show-realization-trees' in self.cmdargs
show_mrs = '--show-realization-mrses' in self.cmdargs
lines = self._result_lines()
response, lines = _make_response(lines, self.run_info)
i, numlines = 0, len(lines)
results = []
while i < numlines:
result = {'SENT': lines[i].strip()}
i += 1
if show_tree and lines[i].startswith('DTREE = '):
result['derivation'] = lines[i][8:].strip()
i += 1
if show_mrs and lines[i].startswith('MRS = '):
result['mrs'] = lines[i][6:].strip()
i += 1
results.append(result)
response['results'] = results
return response
def _tsdb_receive(self):
# with --tsdb-stdout, the notes line is not printed
lines = self._result_lines(termini=[re.compile(r'\(:results \.')])
response, lines = _make_response(lines, self.run_info)
line = ' '.join(lines) # ACE 0.9.24 on Mac puts superfluous newlines
response = _tsdb_response(response, line)
return response
def compile(cfg_path: util.PathLike,
out_path: util.PathLike,
executable: util.PathLike = None,
env: Mapping[str, str] = None,
stdout: IO[Any] = None,
stderr: IO[Any] = None) -> None:
"""
Use ACE to compile a grammar.
Args:
cfg_path (str): the path to the ACE config file
out_path (str): the path where the compiled grammar will be
written
executable (str, optional): the path to the ACE binary; if
`None`, the `ace` command will be used
env (dict, optional): environment variables to pass to the ACE
subprocess
stdout (file, optional): stream used for ACE's stdout
stderr (file, optional): stream used for ACE's stderr
"""
cfg_path = str(Path(cfg_path).expanduser())
out_path = str(Path(out_path).expanduser())
try:
check_call(
[(executable or 'ace'), '-g', cfg_path, '-G', out_path],
stdout=stdout, stderr=stderr, close_fds=True,
env=(env or os.environ)
)
except (CalledProcessError, OSError):
logger.error(
'Failed to compile grammar with ACE. See %s',
getattr(stderr, 'name', '<stderr>')
)
raise
def parse_from_iterable(
grm: util.PathLike,
data: Iterable[str],
**kwargs: Any) -> Iterator[interface.Response]:
"""
Parse each sentence in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): the sentences to parse
**kwargs: additional keyword arguments to pass to the ACEParser
Yields:
:class:`~delphin.interface.Response`
Example:
>>> sentences = ['Dogs bark.', 'It rained']
>>> responses = list(ace.parse_from_iterable('erg.dat', sentences))
NOTE: parsed 2 / 2 sentences, avg 723k, time 0.01026s
"""
with ACEParser(grm, **kwargs) as parser:
for datum in data:
yield parser.interact(datum)
def parse(grm: util.PathLike,
datum: str,
**kwargs: Any) -> interface.Response:
"""
Parse sentence *datum* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
datum (str): the sentence to parse
**kwargs: additional keyword arguments to pass to the ACEParser
Returns:
:class:`~delphin.interface.Response`
Example:
>>> response = ace.parse('erg.dat', 'Dogs bark.')
NOTE: parsed 1 / 1 sentences, avg 797k, time 0.00707s
"""
return next(parse_from_iterable(grm, [datum], **kwargs))
def transfer_from_iterable(
grm: util.PathLike,
data: Iterable[str],
**kwargs: Any) -> Iterator[interface.Response]:
"""
Transfer from each MRS in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): source MRSs as SimpleMRS strings
**kwargs: additional keyword arguments to pass to the
ACETransferer
Yields:
:class:`~delphin.interface.Response`
"""
with ACETransferer(grm, **kwargs) as transferer:
for datum in data:
yield transferer.interact(datum)
def transfer(grm: util.PathLike,
datum: str,
**kwargs: Any) -> interface.Response:
"""
Transfer from the MRS *datum* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
datum: source MRS as a SimpleMRS string
**kwargs: additional keyword arguments to pass to the
ACETransferer
Returns:
:class:`~delphin.interface.Response`
"""
return next(transfer_from_iterable(grm, [datum], **kwargs))
def generate_from_iterable(
grm: util.PathLike,
data: Iterable[str],
**kwargs: Any) -> Iterator[interface.Response]:
"""
Generate from each MRS in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): MRSs as SimpleMRS strings
**kwargs: additional keyword arguments to pass to the
ACEGenerator
Yields:
:class:`~delphin.interface.Response`
"""
with ACEGenerator(grm, **kwargs) as generator:
for datum in data:
yield generator.interact(datum)
def generate(grm: util.PathLike,
datum: str,
**kwargs: Any) -> interface.Response:
"""
Generate from the MRS *datum* with ACE using *grm*.
Args:
grm (str): path to a compiled grammar image
datum: the SimpleMRS string to generate from
**kwargs: additional keyword arguments to pass to the
ACEGenerator
Returns:
:class:`~delphin.interface.Response`
"""
return next(generate_from_iterable(grm, [datum], **kwargs))
# The following defines the command-line options available for users to
# specify in ACEProcess tasks. For a description of these options, see:
# http://moin.delph-in.net/AceOptions
# thanks: https://stackoverflow.com/a/14728477/1441112
class _ACEArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ValueError(message)
_ace_argparser = _ACEArgumentParser()
_ace_argparser.add_argument('-n', type=int)
_ace_argparser.add_argument('-1', action='store_const', const=1, dest='n')
_ace_argparser.add_argument('-r')
_ace_argparser.add_argument('-p', action='store_true')
_ace_argparser.add_argument('-X', action='store_true')
_ace_argparser.add_argument('-L', action='store_true')
_ace_argparser.add_argument('-y', action='store_true')
_ace_argparser.add_argument('--max-chart-megabytes', type=int)
_ace_argparser.add_argument('--max-unpack-megabytes', type=int)
_ace_argparser.add_argument('--timeout', type=int)
_ace_argparser.add_argument('--disable-subsumption-test', action='store_true')
_ace_argparser.add_argument('--show-realization-trees', action='store_true')
_ace_argparser.add_argument('--show-realization-mrses', action='store_true')
_ace_argparser.add_argument('--show-probability', action='store_true')
_ace_argparser.add_argument('--disable-generalization', action='store_true')
_ace_argparser.add_argument('--ubertagging', nargs='?', type=float)
_ace_argparser.add_argument('--pcfg', type=argparse.FileType())
_ace_argparser.add_argument('--rooted-derivations', action='store_true')
_ace_argparser.add_argument('--udx', nargs='?', choices=('all',))
_ace_argparser.add_argument('--yy-rules', action='store_true')
_ace_argparser.add_argument('--max-words', type=int)
def _ace_version(executable: str) -> Tuple[int, ...]:
# 0.9.0 is the initial public release of ACE
version: Tuple[int, ...] = (0, 9, 0)
try:
out = check_output([executable, '-V'], universal_newlines=True)
except (CalledProcessError, OSError):
logger.error('Failed to get ACE version number.')
raise
else:
match = re.search(r'ACE version ([.0-9]+)', out)
if match is not None:
version = tuple(map(int, match.group(1).split('.')))
return version
def _possible_mrs(s: str) -> str:
start, end = -1, -1
depth = 0
for i, c in enumerate(s):
if c == '[':
if depth == 0:
start = i
depth += 1
elif c == ']':
depth -= 1
if depth == 0:
end = i + 1
break
# only valid if neither start nor end is -1
# note: this ignores any secondary MRSs on the same line
if start != -1 and end != -1:
# only log if taking a substring
if start != 0 and end != len(s):
logger.debug('Possible MRS found at <%d:%d>: %s', start, end, s)
s = s[start:end]
return s
else:
return ''
def _make_response(lines, run) -> Tuple[interface.Response, List[str]]:
response = interface.Response({
'NOTES': [],
'WARNINGS': [],
'ERRORS': [],
'run': run,
'input': None,
'surface': None,
'results': []
})
content_lines = []
for line in lines:
if line.startswith('NOTE: '):
response['NOTES'].append(line[6:])
elif line.startswith('WARNING: '):
response['WARNINGS'].append(line[9:])
elif line.startswith('ERROR: '):
response['ERRORS'].append(line[7:])
elif line.startswith('SENT: ') or line.startswith('SKIP: '):
response['surface'] = line[6:]
else:
content_lines.append(line)
return response, content_lines
def _sexpr_data(line: str) -> Iterator[Tuple[str, Any]]:
while line:
try:
expr = util.SExpr.parse(line)
except IndexError:
expr = util.SExprResult(
(':error', 'incomplete output from ACE'),
'')
if len(expr.data) != 2:
logger.error('Could not read output from ACE: %s', line)
break
key, val = expr.data
assert isinstance(key, str)
yield key, val
line = expr.remainder.lstrip()
def _tsdb_response(response: interface.Response,
line: str) -> interface.Response:
for key, val in _sexpr_data(line):
if key == ':p-input':
response.setdefault('tokens', {})['initial'] = val.strip()
elif key == ':p-tokens':
response.setdefault('tokens', {})['internal'] = val.strip()
elif key == ':results':
for result in val:
res = {}
for reskey, resval in result:
if reskey == ':derivation':
res['derivation'] = resval.strip()
elif reskey == ':mrs':
res['mrs'] = resval.strip()
elif reskey == ':surface':
res['surface'] = resval.strip()
elif isinstance(resval, str):
res[reskey[1:]] = resval.strip()
else:
res[reskey[1:]] = resval
response['results'].append(res)
elif key == ':chart':
response['chart'] = chart = []
for edge in val:
chart.append({edgekey[1:]: edgeval
for edgekey, edgeval in edge})
elif isinstance(val, str):
response[key[1:]] = val.strip()
else:
response[key[1:]] = val
return response
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.gaming_v1.services.realms_service import pagers
from google.cloud.gaming_v1.types import common
from google.cloud.gaming_v1.types import realms
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import RealmsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import RealmsServiceGrpcTransport
from .transports.grpc_asyncio import RealmsServiceGrpcAsyncIOTransport
class RealmsServiceClientMeta(type):
"""Metaclass for the RealmsService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[RealmsServiceTransport]]
_transport_registry["grpc"] = RealmsServiceGrpcTransport
_transport_registry["grpc_asyncio"] = RealmsServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[RealmsServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class RealmsServiceClient(metaclass=RealmsServiceClientMeta):
"""A realm is a grouping of game server clusters that are
considered interchangeable.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "gameservices.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RealmsServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RealmsServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> RealmsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
RealmsServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def realm_path(project: str, location: str, realm: str,) -> str:
"""Returns a fully-qualified realm string."""
return "projects/{project}/locations/{location}/realms/{realm}".format(
project=project, location=location, realm=realm,
)
@staticmethod
def parse_realm_path(path: str) -> Dict[str, str]:
"""Parses a realm path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/realms/(?P<realm>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, RealmsServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the realms service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, RealmsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, RealmsServiceTransport):
# transport is a RealmsServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_realms(
self,
request: Union[realms.ListRealmsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListRealmsPager:
r"""Lists realms in a given project and location.
.. code-block:: python
from google.cloud import gaming_v1
def sample_list_realms():
# Create a client
client = gaming_v1.RealmsServiceClient()
# Initialize request argument(s)
request = gaming_v1.ListRealmsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_realms(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.gaming_v1.types.ListRealmsRequest, dict]):
The request object. Request message for
RealmsService.ListRealms.
parent (str):
Required. The parent resource name, in the following
form: ``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.services.realms_service.pagers.ListRealmsPager:
Response message for
RealmsService.ListRealms.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a realms.ListRealmsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, realms.ListRealmsRequest):
request = realms.ListRealmsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_realms]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListRealmsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_realm(
self,
request: Union[realms.GetRealmRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> realms.Realm:
r"""Gets details of a single realm.
.. code-block:: python
from google.cloud import gaming_v1
def sample_get_realm():
# Create a client
client = gaming_v1.RealmsServiceClient()
# Initialize request argument(s)
request = gaming_v1.GetRealmRequest(
name="name_value",
)
# Make the request
response = client.get_realm(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.gaming_v1.types.GetRealmRequest, dict]):
The request object. Request message for
RealmsService.GetRealm.
name (str):
Required. The name of the realm to retrieve, in the
following form:
``projects/{project}/locations/{location}/realms/{realm}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.types.Realm:
A realm resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a realms.GetRealmRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, realms.GetRealmRequest):
request = realms.GetRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_realm]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_realm(
self,
request: Union[realms.CreateRealmRequest, dict] = None,
*,
parent: str = None,
realm: realms.Realm = None,
realm_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a new realm in a given project and location.
.. code-block:: python
from google.cloud import gaming_v1
def sample_create_realm():
# Create a client
client = gaming_v1.RealmsServiceClient()
# Initialize request argument(s)
realm = gaming_v1.Realm()
realm.time_zone = "time_zone_value"
request = gaming_v1.CreateRealmRequest(
parent="parent_value",
realm_id="realm_id_value",
realm=realm,
)
# Make the request
operation = client.create_realm(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.gaming_v1.types.CreateRealmRequest, dict]):
The request object. Request message for
RealmsService.CreateRealm.
parent (str):
Required. The parent resource name, in the following
form: ``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
realm (google.cloud.gaming_v1.types.Realm):
Required. The realm resource to be
created.
This corresponds to the ``realm`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
realm_id (str):
Required. The ID of the realm
resource to be created.
This corresponds to the ``realm_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gaming_v1.types.Realm` A realm
resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, realm, realm_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a realms.CreateRealmRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, realms.CreateRealmRequest):
request = realms.CreateRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if realm is not None:
request.realm = realm
if realm_id is not None:
request.realm_id = realm_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_realm]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
realms.Realm,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def delete_realm(
self,
request: Union[realms.DeleteRealmRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a single realm.
.. code-block:: python
from google.cloud import gaming_v1
def sample_delete_realm():
# Create a client
client = gaming_v1.RealmsServiceClient()
# Initialize request argument(s)
request = gaming_v1.DeleteRealmRequest(
name="name_value",
)
# Make the request
operation = client.delete_realm(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.gaming_v1.types.DeleteRealmRequest, dict]):
The request object. Request message for
RealmsService.DeleteRealm.
name (str):
Required. The name of the realm to delete, in the
following form:
``projects/{project}/locations/{location}/realms/{realm}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a realms.DeleteRealmRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, realms.DeleteRealmRequest):
request = realms.DeleteRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_realm]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def update_realm(
self,
request: Union[realms.UpdateRealmRequest, dict] = None,
*,
realm: realms.Realm = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Patches a single realm.
.. code-block:: python
from google.cloud import gaming_v1
def sample_update_realm():
# Create a client
client = gaming_v1.RealmsServiceClient()
# Initialize request argument(s)
realm = gaming_v1.Realm()
realm.time_zone = "time_zone_value"
request = gaming_v1.UpdateRealmRequest(
realm=realm,
)
# Make the request
operation = client.update_realm(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.gaming_v1.types.UpdateRealmRequest, dict]):
The request object. Request message for
RealmsService.UpdateRealm.
realm (google.cloud.gaming_v1.types.Realm):
Required. The realm to be updated. Only fields specified
in update_mask are updated.
This corresponds to the ``realm`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gaming_v1.types.Realm` A realm
resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([realm, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a realms.UpdateRealmRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, realms.UpdateRealmRequest):
request = realms.UpdateRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if realm is not None:
request.realm = realm
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_realm]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("realm.name", request.realm.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
realms.Realm,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def preview_realm_update(
self,
request: Union[realms.PreviewRealmUpdateRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> realms.PreviewRealmUpdateResponse:
r"""Previews patches to a single realm.
.. code-block:: python
from google.cloud import gaming_v1
def sample_preview_realm_update():
# Create a client
client = gaming_v1.RealmsServiceClient()
# Initialize request argument(s)
realm = gaming_v1.Realm()
realm.time_zone = "time_zone_value"
request = gaming_v1.PreviewRealmUpdateRequest(
realm=realm,
)
# Make the request
response = client.preview_realm_update(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.gaming_v1.types.PreviewRealmUpdateRequest, dict]):
The request object. Request message for
RealmsService.PreviewRealmUpdate.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.types.PreviewRealmUpdateResponse:
Response message for
RealmsService.PreviewRealmUpdate.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a realms.PreviewRealmUpdateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, realms.PreviewRealmUpdateRequest):
request = realms.PreviewRealmUpdateRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.preview_realm_update]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("realm.name", request.realm.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-game-servers",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("RealmsServiceClient",)
| |
# Copyright 2002-2011 Nick Mathewson. See LICENSE for licensing information.
"""mixminion.server.PacketHandler: Code to process mixminion packets"""
import binascii
import logging
import threading
import types
from mixminion.Common import encodeBase64, formatBase64
import mixminion.Crypto as Crypto
import mixminion.Packet as Packet
import mixminion.BuildMessage
from mixminion.ServerInfo import PACKET_KEY_BYTES
from mixminion.Common import MixError, MixFatalError, isPrintingAscii
__all__ = [ 'PacketHandler', 'ContentError', 'DeliveryPacket', 'RelayedPacket']
log = logging.getLogger(__name__)
class ContentError(MixError):
"""Exception raised when a packed is malformatted or unacceptable."""
pass
class PacketHandler:
"""Class to handle processing packets. Given an incoming packet,
it removes one layer of encryption, does all necessary integrity
checks, swaps headers if necessary, re-pads, and decides whether
to drop the packet, relay the packet, or send the packet to
an exit handler."""
## Fields:
# privatekeys: a list of 2-tuples of
# (1) a RSA private key that we accept
# (2) a HashLog objects corresponding to the given key
def __init__(self, privatekeys=(), hashlogs=()):
"""Constructs a new packet handler, given a sequence of
private key object for header encryption, and a sequence of
corresponding hashlog object to prevent replays.
The lists must be equally long. When a new packet is
processed, we try each of the private keys in sequence. If
the packet is decodeable with one of the keys, we log it in
the corresponding entry of the hashlog list.
"""
self.privatekeys = []
self.lock = threading.Lock()
assert type(privatekeys) in (types.ListType, types.TupleType)
assert type(hashlogs) in (types.ListType, types.TupleType)
self.setKeys(privatekeys, hashlogs)
def setKeys(self, keys, hashlogs):
"""Change the keys and hashlogs used by this PacketHandler.
Arguments are as to PacketHandler.__init__
"""
self.lock.acquire()
newKeys = {}
try:
# Build a set of asn.1-encoded public keys in *new* set.
for k in keys:
newKeys[k.encode_key(1)] = 1
if k.get_modulus_bytes() != PACKET_KEY_BYTES:
raise MixFatalError("Incorrect packet key length")
# For all old public keys, if they aren't in the new set, close
# their hashlogs.
for k, h in self.privatekeys:
if not newKeys.get(k.encode_key(1)):
h.close()
# Now, set the keys.
self.privatekeys = zip(keys, hashlogs)
finally:
self.lock.release()
def syncLogs(self):
"""Sync all this PacketHandler's hashlogs."""
try:
self.lock.acquire()
for _, h in self.privatekeys:
h.sync()
finally:
self.lock.release()
def close(self):
"""Close all this PacketHandler's hashlogs."""
try:
self.lock.acquire()
for _, h in self.privatekeys:
h.close()
finally:
self.lock.release()
def processPacket(self, msg):
"""Given a 32K mixminion packet, processes it completely.
Return one of:
None [if the packet should be dropped.]
a DeliveryPacket object
a RelayedPacket object
May raise CryptoError, ParseError, or ContentError if the packet
is malformatted, misencrypted, unparseable, repeated, or otherwise
unhandleable.
WARNING: This implementation does nothing to prevent timing
attacks: dropped packets, packets with bad digests, replayed
packets, and exit packets are all processed faster than
forwarded packets. You must prevent timing attacks elsewhere."""
# Break into headers and payload
pkt = Packet.parsePacket(msg)
header1 = Packet.parseHeader(pkt.header1)
encSubh = header1[:Packet.ENC_SUBHEADER_LEN]
header1 = header1[Packet.ENC_SUBHEADER_LEN:]
assert len(header1) == Packet.HEADER_LEN - Packet.ENC_SUBHEADER_LEN
assert len(header1) == (128*16) - 256 == 1792
# Try to decrypt the first subheader. Try each private key in
# order. Only fail if all private keys fail.
subh = None
e = None
self.lock.acquire()
try:
for pk, hashlog in self.privatekeys:
try:
subh = Crypto.pk_decrypt(encSubh, pk)
break
except Crypto.CryptoError, err:
e = err
finally:
self.lock.release()
if not subh:
# Nobody managed to get us the first subheader. Raise the
# most-recently-received error.
raise e
if len(subh) != Packet.MAX_SUBHEADER_LEN:
raise ContentError("Bad length in RSA-encrypted part of subheader")
subh = Packet.parseSubheader(subh) #may raise ParseError
# Check the version: can we read it?
if subh.major != Packet.MAJOR_NO or subh.minor != Packet.MINOR_NO:
raise ContentError("Invalid protocol version")
# Check the digest of all of header1 but the first subheader.
if subh.digest != Crypto.sha1(header1):
raise ContentError("Invalid digest")
# Get ready to generate packet keys.
keys = Crypto.Keyset(subh.secret)
# Replay prevention
replayhash = keys.get(Crypto.REPLAY_PREVENTION_MODE, Crypto.DIGEST_LEN)
if hashlog.seenHash(replayhash):
raise ContentError("Duplicate packet detected.")
else:
hashlog.logHash(replayhash)
# If we're meant to drop, drop now.
rt = subh.routingtype
if rt == Packet.DROP_TYPE:
return None
# Prepare the key to decrypt the header in counter mode. We'll be
# using this more than once.
header_sec_key = Crypto.aes_key(keys.get(Crypto.HEADER_SECRET_MODE))
# Prepare key to generate padding
junk_key = Crypto.aes_key(keys.get(Crypto.RANDOM_JUNK_MODE))
# Pad the rest of header 1
header1 += Crypto.prng(junk_key,
Packet.OAEP_OVERHEAD + Packet.MIN_SUBHEADER_LEN
+ subh.routinglen)
assert len(header1) == (Packet.HEADER_LEN - Packet.ENC_SUBHEADER_LEN
+ Packet.OAEP_OVERHEAD+Packet.MIN_SUBHEADER_LEN
+ subh.routinglen)
assert len(header1) == 1792 + 42 + 42 + subh.routinglen == \
1876 + subh.routinglen
# Decrypt the rest of header 1, encrypting the padding.
header1 = Crypto.ctr_crypt(header1, header_sec_key)
# If the subheader says that we have extra routing info that didn't
# fit in the RSA-encrypted part, get it now.
overflowLength = subh.getOverflowLength()
if overflowLength:
subh.appendOverflow(header1[:overflowLength])
header1 = header1[overflowLength:]
assert len(header1) == (
1876 + subh.routinglen
- max(0,subh.routinglen-Packet.MAX_ROUTING_INFO_LEN))
header1 = subh.underflow + header1
assert len(header1) == Packet.HEADER_LEN
# Decrypt the payload.
payload = Crypto.lioness_decrypt(pkt.payload,
keys.getLionessKeys(Crypto.PAYLOAD_ENCRYPT_MODE))
# If we're an exit node, there's no need to process the headers
# further.
if rt >= Packet.MIN_EXIT_TYPE:
return DeliveryPacket(rt, subh.getExitAddress(0),
keys.get(Crypto.APPLICATION_KEY_MODE),
payload)
# If we're not an exit node, make sure that what we recognize our
# routing type.
if rt not in (Packet.SWAP_FWD_IPV4_TYPE, Packet.FWD_IPV4_TYPE,
Packet.SWAP_FWD_HOST_TYPE, Packet.FWD_HOST_TYPE):
raise ContentError("Unrecognized Mixminion routing type")
# Decrypt header 2.
header2 = Crypto.lioness_decrypt(pkt.header2,
keys.getLionessKeys(Crypto.HEADER_ENCRYPT_MODE))
# If we're the swap node, (1) decrypt the payload with a hash of
# header2... (2) decrypt header2 with a hash of the payload...
# (3) and swap the headers.
if Packet.typeIsSwap(rt):
hkey = Crypto.lioness_keys_from_header(header2)
payload = Crypto.lioness_decrypt(payload, hkey)
hkey = Crypto.lioness_keys_from_payload(payload)
header2 = Crypto.lioness_decrypt(header2, hkey)
header1, header2 = header2, header1
# Build the address object for the next hop
address = Packet.parseRelayInfoByType(rt, subh.routinginfo)
# Construct the packet for the next hop.
pkt = Packet.Packet(header1, header2, payload).pack()
return RelayedPacket(address, pkt)
class RelayedPacket:
"""A packet that is to be relayed to another server; returned by
returned by PacketHandler.processPacket."""
## Fields:
# address -- an instance of IPV4Info DOCDOC
# msg -- a 32K packet.
def __init__(self, address, msg):
"""Create a new packet, given an instance of IPV4Info or
MMTPHostInfo and a 32K packet."""
assert isinstance(address, Packet.IPV4Info) or isinstance(address, Packet.MMTPHostInfo)
assert len(msg) == 1<<15
self.address = address
self.msg = msg
def isDelivery(self):
"""Return true iff this packet is a delivery (non-relay) packet."""
return 0
def getAddress(self):
"""Return an instance of IPV4Info or MMTPHostInfo indicating
the address where this packet is to be delivered."""
return self.address
def getPacket(self):
"""Returns the 32K contents of this packet."""
return self.msg
class DeliveryPacket:
"""A packet that is to be delivered via some exit module; returned by
PacketHandler.processPacket"""
##Fields:
# exitType -- a 2-byte integer indicating which exit module to use.
# address -- a string encoding the address to deliver to.
# key -- the 16-byte application key
# tag -- the 20-byte delivery handle
# payload -- the unencoded 28K payload
# contents -- until decode is called, None. After decode is called,
# the actual contents of this message as delivered.
# type -- until decode is called, None. After decode is called,
# one of 'plain' (plaintext message), 'long' (overcompressed message),
# 'enc' (encrypted message), or 'err' (malformed message).
# headers -- a map from key to value for the delivery headers in
# this message's payload. In the case of a fragment, or a
# non-plaintext message, the map is empty.
# isfrag -- Is this packet a fragment of a complete message? If so, the
# type must be 'plain'.
# dPayload -- An instance of mixminion.Packet.Payload for this object.
# error -- None, or a string containing an error encountered while trying
# to decode the payload.
def __init__(self, routingType, routingInfo, applicationKey, payload):
"""Construct a new DeliveryPacket."""
assert 0 <= routingType <= 0xFFFF
assert len(applicationKey) == 16
assert len(payload) == 28*1024
self.exitType = routingType
self.address = routingInfo
self.key = applicationKey
self.tag = ""
self.payload = payload
self.contents = None
self.type = None
self.headers = None
self.isfrag = 0
self.dPayload = None
self.error = None
def setTagged(self,tagged=1):
"""Re-frame the routingInfo in this packet. If 'tagged' is true,
then the routingInfo starts with TAG_LEN bytes of decoding
handle, and the rest is address. If 'tagged' is false, then
it's all address.
"""
x = self.tag+self.address
if tagged:
if len(x)<Packet.TAG_LEN:
raise Packet.ParseError("Missing decoding handle for exit type")
self.tag = x[:Packet.TAG_LEN]
self.address = x[Packet.TAG_LEN:]
else:
self.tag = ""
self.address = x
def __getstate__(self):
return "V0", self.__dict__
def __setstate__(self, state):
if type(state) == types.TupleType:
if state[0] == 'V0':
self.__dict__.update(state[1])
else:
raise MixError("Unrecognized state version %s" % state[0])
else:
raise MixError("Unrecognized state type %s"% type(state))
def isDelivery(self):
"""Return true iff this packet is a delivery (non-relay) packet."""
return 1
def getExitType(self): return self.exitType
def getAddress(self): return self.address
def getTag(self): return self.tag
def getApplicationKey(self): return self.key
def getPayload(self): return self.payload
def getContents(self):
"""Return the decoded contents of this packet."""
if self.type is None: self.decode()
return self.contents
def getDecodedPayload(self):
"""Return an instance of mixminion.Packet.Payload for this packet."""
if self.type is None: self.decode()
return self.dPayload
def isPlaintext(self):
"""Return true iff this packet is a plaintext, forward packet."""
if self.type is None: self.decode()
return self.type == 'plain'
def isOvercompressed(self):
"""Return true iff this packet is an overcompressed, plaintext, forward
packet."""
if self.type is None: self.decode()
return self.type == 'long'
def isFragment(self):
"""Return true iff this packet is part of a fragmented message."""
if self.type is None: self.decode()
return self.isfrag
def isEncrypted(self):
"""Return true iff this packet may be an encrypted forward or
reply packet."""
if self.type is None: self.decode()
return self.type == 'enc'
def isPrintingAscii(self):
"""Return true iff this packets contents are printing characters
suitable for inclusion in a text transport medium."""
if self.type is None: self.decode()
return isPrintingAscii(self.contents, allowISO=1)
def isError(self):
"""Return true iff this packet is malformed."""
if self.type is None: self.decode()
return self.type == 'err'
def decode(self):
"""Helper method: Determines this message's type and contents."""
if self.payload is None:
return
message = self.payload
self.contents = None
try:
self.dPayload = mixminion.BuildMessage.decodePayload(message, "")
if self.dPayload is None:
# encrypted message
self.type = 'enc'
self.contents = message
self.headers = {}
elif self.dPayload.isSingleton():
# forward message, singleton.
self.type = 'plain'
body = self.dPayload.getUncompressedContents()
self.contents, self.headers = \
Packet.parseMessageAndHeaders(body)
else:
# forward message, fragment.
self.isfrag = 1
self.type = 'plain'
self.contents = message
self.headers = {}
except Packet.CompressedDataTooLong, _:
self.contents = Packet.parsePayload(message).getContents()
self.type = 'long'
self.headers = {}
except MixError, e:
self.contents = message
self.error = str(e)
self.type = 'err'
self.headers = {}
self.payload = None
def getAsciiContents(self):
"""Return the contents of this message, encoded in base64 if they are
not already printable."""
if self.type is None:
self.decode()
if self.type == 'plain' and isPrintingAscii(self.contents, allowISO=1):
return self.contents
else:
return encodeBase64(self.contents)
def getHeaders(self):
"""Return a dict containing the headers for this message."""
if self.type is None:
self.decode()
if self.headers is None:
log.warn("getHeaders found no decoded headers")
return {}
return self.headers
def getAsciiTag(self):
"""Return a base64-representation of this message's decoding handle."""
return formatBase64(self.tag)
def getTextEncodedMessage(self):
"""Return a Packet.TextEncodedMessage object for this packet."""
tag = None
if self.isOvercompressed():
tp = 'LONG'
elif self.isEncrypted():
tp = 'ENC'
tag = self.tag
elif self.isPrintingAscii():
assert self.isPlaintext()
tp = 'TXT'
elif self.isFragment():
assert self.isPlaintext()
tp = 'FRAG'
else:
assert self.isPlaintext()
tp = 'BIN'
return Packet.TextEncodedMessage(self.contents, tp, tag)
| |
import xmlrpclib
import urllib
import logging
import logging.config
import errors
import response
import request
LOG = logging.getLogger(__name__)
LOG.setLevel(level=logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
LOG.addHandler(ch)
class VCL(object):
def __init__(self, url, username, password):
self.url = url
self.username = username
self.password = password
self.verbose = 0
self.client = VCLServerProxy(
self.url, self.username, self.password, verbose=0)
def test(self, test_string):
# client = VCLServerProxy(self.url, self.username, self.password, verbose=0)
rc = self.client.XMLRPCtest(test_string)
return rc
def get_images(self):
client = VCLServerProxy(
self.url, self.username, self.password, verbose=0)
rc = self.client.XMLRPCgetImages()
return rc
def add_request(self, image_id=None, start="now", length=60, count=1):
if image_id is None or not isinstance(image_id, int):
raise ValueError("image_id expected to be integer")
if start != "now" and not isinstance(start, int):
raise ValueError(
"start time should be 'now' or integer UNIX timestamp")
if length < 0:
raise ValueError("reservation length cannot be negative")
if count <= 0:
return
responses = []
for i in range(count):
try:
rc = self.client.XMLRPCaddRequest(image_id, start, length)
LOG.debug(msg=rc)
if rc['status'] == "success":
responses.append(response.VCLRequestResponse(
status=rc['status'],
request_id=rc['requestid']))
elif rc['status'] == "error":
raise errors.VCLError(message=rc['errormsg'],
error_code=rc['errorcode'])
except errors.VCLError, e:
LOG.error(
"Error Code: {1} Message: {0} ".format(e, e.error_code))
responses.append(response.VCLErrorResponse(status="error",
error_code=e.error_code,
error_message=e.message))
return responses
def end_request(self, request_id):
ret = None
if request_id < 0:
raise ValueError("request id should be positive")
try:
rc = self.client.XMLRPCendRequest(request_id)
if rc['status'] == "success":
ret = response.VCLResponse(rc['status'])
else:
raise errors.VCLError(message=rc['errormsg'],
error_code=rc['errorcode'])
except errors.VCLError, e:
LOG.error(
msg="Error Code: {1} Message: {0}".format(e, e.error_code))
ret = response.VCLErrorResponse(status="error",
error_code=e.error_code,
error_message=e.message)
finally:
return ret
def get_request_ids(self):
res = []
try:
rc = self.client.XMLRPCgetRequestIds()
LOG.debug(msg=rc)
if rc['status'] == "success":
LOG.debug(msg="success")
for req in rc['requests']:
LOG.debug(msg="request: {}".format(req))
res.append(request.VCLRequest(
request_id=req['requestid'],
image_id=req['imageid'],
image_name=req['imagename'],
start=req['start'],
end=req['end'],
os=req['OS'],
is_server=True if req['isserver'] == 1 else False,
state=req['state'],
server_name=req['servername'] if req['isserver'] else None))
except errors.VCLError, e:
LOG.error(
msg="Error Code: {1} Message: {0}".format(e, e.error_code))
res = response.VCLErrorResponse(status="error",
error_code=e.error_code,
error_message=e.message)
except Exception, e:
raise e
finally:
return res
def get_request_status(self, request_id):
res = None
try:
rc = self.client.XMLRPCgetRequestStatus(request_id)
if rc['status'] == "loading":
res = response.VCLResponse(status=rc['status'],
time=rc['time'])
elif rc['status'] == "error":
raise errors.VCLError(message=rc['errormsg'],
error_code=rc['errorcode'])
else:
res = response.VCLResponse(status=rc['status'])
except errors.VCLError, e:
LOG.error(msg="{0} {1}".format(request_id, e.message))
res = response.VCLErrorResponse(status="error",
error_code=e.faultCode,
error_message=e.message)
finally:
return res
def get_request_connect_data(self, request_id, remote_ip):
return self.client.XMLRPCgetRequestConnectData(request_id, remote_ip)
class VCLServerProxy(xmlrpclib.ServerProxy):
__userid = ''
__passwd = ''
def __init__(self, uri, userid, passwd, transport=None, encoding=None,
verbose=0, allow_none=0, use_datetime=0):
self.__userid = userid
self.__passwd = passwd
# establish a "logical" server connection
# get the url
protocol_type, uri = urllib.splittype(uri)
if protocol_type not in ("http", "https"):
LOG.error(msg="input URL: {0}".format(uri))
LOG.error(
msg="{0}: unsupported XML-RPC protocol".format(protocol_type))
raise IOError, "unsupported XML-RPC protocol"
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
transport = VCLTransport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __request(self, method_name, params):
request = xmlrpclib.dumps(params, method_name, encoding=self.__encoding,
allow_none=self.__allow_none)
response = self.__transport.request(
self.__host,
self.__userid,
self.__passwd,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __getattr__(self, name):
return xmlrpclib._Method(self.__request, name)
class VCLTransport(xmlrpclib.SafeTransport):
##
# Send a complete request, and parse the response.
#
# @param host Target host.
# @param handler Target PRC handler.
# @param request_body XML-RPC request body.
# @param verbose Debugging flag.
# @return Parsed response.
def request(self, host, userid, passwd, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.send_request(h, handler, request_body)
h.putheader('X-APIVERSION', '2')
h.putheader('X-User', userid)
h.putheader('X-Pass', passwd)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
response = h.getresponse()
errcode, errmsg, headers = response.status, response.msg, response.getheaders()
if errcode != 200:
raise xmlrpclib.ProtocolError(
host + handler,
errcode, errmsg,
headers
)
self.verbose = verbose
resp = response.read()
try:
resp = xmlrpclib.loads(resp)[0]
except xmlrpclib.Fault, err:
# if err.faultCode == 3:
# raise errors.VCLError(
# err.faultString, err.faultCode)
# elif err.faultCode == 4:
# LOG.error("%s" % err.faultString)
# elif err.faultCode == 5:
# LOG.error("Received '%s' error. "
# "The VCL site could not establish a connection with your authentication server." % err.faultString)
# elif err.faultCode == 6:
# LOG.error("Received '%s' error. "
# "The VCL site could not determine a method to use to authenticate the supplied user."
# % err.faultString)
# else:
# LOG.error("ERROR: Received '%s' error from VCL site." %
# err.faultString)
raise errors.VCLError(message=err.faultString,
error_code=err.faultCode)
return resp
| |
import time
import os
import random
class Chip8:
# Properties of the chip 8
memory = [0] * 4096
V = [0] * 16
i = 0
pc = 0x200 # Program counter starts a ROM space
graphic = [[0] * 64 for i in range(36)]
delay = 0
sound = 0
stack = [0] * 16
stackp = 0 # Stack pointer to keep track of where we are in the stack
key = [False] * 16 # For storing key states
drawFlag = False # Set to true if the screen needs to be drawn
random.seed() # Needed for generating random numbers
cycleCount = 0 # Mostly for debugging
# The following is the Chip 8 Font set. Pre-programmed graphics for numbers and letters
fontSet = [0xF0, 0x90, 0x90, 0x90, 0xF0,
0x20, 0x60, 0x20, 0x20, 0x70,
0xF0, 0x10, 0xF0, 0x80, 0xF0,
0xF0, 0x10, 0xF0, 0x10, 0xF0,
0x90, 0x90, 0xF0, 0x10, 0x10,
0xF0, 0x80, 0xF0, 0x10, 0xF0,
0xF0, 0x80, 0xF0, 0x90, 0xF0,
0xF0, 0x10, 0x20, 0x40, 0x40,
0xF0, 0x90, 0xF0, 0x90, 0xF0,
0xF0, 0x90, 0xF0, 0x10, 0xF0,
0xF0, 0x90, 0xF0, 0x90, 0x90,
0xE0, 0x90, 0xE0, 0x90, 0xE0,
0xF0, 0x80, 0x80, 0x80, 0xF0,
0xE0, 0x90, 0x90, 0x90, 0xE0,
0xF0, 0x80, 0xF0, 0x80, 0xF0,
0xF0, 0x80, 0xF0, 0x80, 0x80]
def __init__(self, fileName):
self.fileName = fileName
# Load font set into memory
for i in range(0,80):
self.memory[i] = self.fontSet[i]
# Load rom into memory
rom = open(fileName, 'rb')
for i in range(0,os.path.getsize(fileName)):
self.memory[512 + i] = ord(rom.read(1))
rom.close()
def cycle(self, key):
if self.cycleCount == 60:
self.cycleCount = 0
self.cycleCount += 1
self.key = key
opcode = (self.memory[self.pc] << 8) | self.memory[self.pc+1] # opcodes are always 2bytes large
self.executeInst(opcode)
if self.delay > 0:
self.delay -= 1
if self.sound > 0:
self.sound -= 1
def executeInst(self, opcode):
# Constants
VX = (opcode & 0xF00) >> 8
VY = (opcode & 0xF0) >> 4
first = opcode >> 12 # Most significant byte
if first == 0x0:
if (opcode & 0xFF) == 0xE0:
self.graphic = [[0] * 64 for i in range(32)]
self.drawFlag = True
elif (opcode & 0xFF) == 0xEE:
self.stackp -= 1
self.pc = self.stack[self.stackp]
self.pc = self.pc + 2
if first == 0x1:
self.pc = opcode & 0xfff # Jumps to a point
if first == 0x2:
self.stack[self.stackp] = self.pc
self.stackp += 1
self.pc = opcode & 0xfff
if first == 0x3:
if self.V[VX] == (opcode & 0xFF):
self.pc = self.pc + 4
else:
self.pc = self.pc + 2
if first == 0x4:
if self.V[VX] != (opcode & 0xFF):
self.pc = self.pc + 4
else:
self.pc = self.pc + 2
if first == 0x5:
if self.V[VY] == self.V[VX]:
self.pc = self.pc + 4
else:
self.pc = self.pc + 2
if first == 0x6:
self.V[VX] = (opcode & 0xFF)
self.pc = self.pc + 2
if first == 0x7:
# 7XNN add NN to VX
self.V[VX] = (self.V[VX] + (opcode & 0xFF)) & 0xFF
self.pc = self.pc + 2
if first == 0x8:
if (opcode & 0xF) == 0x0:
self.V[VX] = self.V[VY]
if (opcode & 0xF) == 0x1:
self.V[VX] = self.V[VY] | self.V[VX]
if (opcode & 0xF) == 0x2:
self.V[VX] = self.V[VY] & self.V[VX]
if (opcode & 0xF) == 0x3:
self.V[VX] = self.V[VY] ^ self.V[VX]
if (opcode & 0xF) == 0x4:
self.V[VX] = self.V[VX] + self.V[VY]
if self.V[VX] > 0xff:
self.V[0xf] = 1
self.V[VX] = self.V[VX] & 0xff
else:
self.V[0xf] = 0
if (opcode & 0xF) == 0x5:
self.V[VX] = self.V[VX] - self.V[VY]
if self.V[VX] < 0x0:
self.V[0xf] = 0
self.V[VX] = self.V[VX] & 0xff
else:
self.V[0xf] = 1
if (opcode & 0xF) == 0x6:
self.V[0xf] = self.V[VX] & 0b1
self.V[VX] = self.V[VX] >> 1
if (opcode & 0xF) == 0x7:
self.V[VX] = self.V[VY] - self.V[VX]
if self.V[VX] < 0x0:
self.V[0xf] = 0
self.V[VX] = self.V[VX] & 0xff
else:
self.V[0xf] = 1
if (opcode & 0xF) == 0xE:
self.V[0xf] = self.V[VX] >> 7
self.V[VX] = self.V[VX] << 1
self.V[VX] = self.V[VX] & 0xff
self.pc = self.pc + 2
if first == 0x9:
if self.V[VX] != self.V[VY]:
self.pc = self.pc + 4
else:
self.pc = self.pc + 2
if first == 0xa:
self.I = opcode & 0xFFF
self.pc = self.pc + 2
if first == 0xb:
self.pc = (opcode & 0xfff) + self.V[0x0]
if first == 0xc:
self.V[VX] = (random.randrange(0,255)) & (opcode & 0xFF)
self.pc = self.pc + 2
if first == 0xd:
x = self.V[VX]
y = self.V[VY]
height = opcode & 0xF
self.V[0Xf] = 0
for yline in range (0,height):
pixelState = self.memory[self.I+yline]
for xline in range(0,8):
if(pixelState & (0x80 >> xline)) != 0:
if self.graphic[(y + yline)][(x + xline)] == 1:
self.V[0xF] = 1
self.graphic[(y + yline)][(x + xline)] ^= 1
self.drawFlag = True
self.pc = self.pc + 2
if first == 0xe:
if (opcode & 0xFF) == 0x9E:
if self.key[self.V[VX]] == 1:
self.pc += 4
else:
self.pc += 2
if (opcode & 0xFF) == 0xA1:
if self.key[self.V[VX]] != 1:
self.pc += 4
else:
self.pc += 2
if first == 0xf:
if (opcode & 0xFF) == 0x07:
self.V[VX] = self.delay
if (opcode & 0xFF) == 0x0A:
keyPress = False
for i in range(0,16):
if self.key[i] != 0:
self.V[VX] = i
keyPress = True
if not keyPress:
return
if (opcode & 0xFF) == 0x15:
self.delay = self.V[VX]
if (opcode & 0xFF) == 0x18:
self.sound = self.V[VX]
if (opcode & 0xFF) == 0x1E:
self.I = self.I + self.V[VX]
if self.I > 0xffff:
self.I = self.I & 0xffff
self.V[0xf] = 1
else:
self.V[0xf] = 0
if (opcode & 0xFF) == 0x29:
self.I = (self.V[VX] * 0x5) & 0xffff
if (opcode & 0xFF) == 0x33:
self.memory[self.I] = int(self.V[VX] /100) & 0xff
self.memory[self.I+1] = int(self.V[VX]/10 % 10) & 0xff
self.memory[self.I+2] = int(self.V[VX] % 10) & 0xff
if (opcode & 0xFF) == 0x55:
for i in range (0,VX+1):
self.memory[self.I + i] = self.V[i]
if (opcode & 0xFF) == 0x65:
for i in range (0,VX+1):
self.V[i] = self.memory[self.I + i]
self.I = (self.I + VX + 1) & 0xFFFF
self.pc = self.pc + 2
| |
import linear_env_small as linear_env
import sim_env
from actor import Actor
from critic import Critic
from replay_buffer import ReplayBuffer
import numpy as np
import tensorflow as tf
import keras.backend as kbck
import json
import time
import argparse
import matplotlib.pylab as plt
import os.path
def ou(x, mu, theta, sigma):
return theta * (mu - x) + sigma * np.random.randn(np.shape(x)[0])
def simulate(control, swmm ,flows):
best_reward = -1*np.inf
BUFFER_SIZE = 100000
BATCH_SIZE = 120
GAMMA = 0.99
TAU = 0.01 #Target Network HyperParameters
LRA = 0.0001 #Learning rate for Actor
LRC = 0.001 #Lerning rate for Critic
action_dim = 4
state_dim = 5
max_steps = 6000
np.random.seed(9501)
EXPLORE = 100000.
episode_count = 1000
done = False
step = 0
epsilon = 1
if swmm:
print("No support")
else:
# Constants for the linear environment
Hs = 2400
A1 = 0.0020
mu1 = 250
sigma1 = 70
A2 = 0.0048
mu2 = 250
sigma2 = 70
dt = 1
x = np.arange(Hs)
d = np.zeros((2,Hs))
if control:
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
kbck.set_session(sess)
# Actor, critic and replay buffer creation
actor = Actor(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA,flows)
critic = Critic(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE)
# Get the linear environment
reward_hist = []
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
A1 += 0.0004*np.random.rand()
mu1 += 50*np.random.rand()
sigma1 += 14*np.random.rand()
A2 += 0.00096*np.random.rand()
mu2 += 50*np.random.rand()
sigma2 += 14*np.random.rand()
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
s_t = np.divide(env.reset(),env.vmax)
total_reward = 0.
for j in range(max_steps):
## Noise addition for exploration
## Ornstein-Uhlenbeck process
loss = 0
epsilon -= 1.0 / EXPLORE
a_t = np.zeros([1,action_dim])
noise_t = np.zeros([1,action_dim])
a_t_original = actor.munet.predict(s_t.reshape(1, s_t.shape[0]))
noise_t[0,:] = max(epsilon, 0) * ou(a_t_original[0,:], 0.5 , 1 , 1.5)
#noise_t[0,4:] = max(epsilon, 0) * ou(a_t_original[0,4:], 0.5 , 1 , 1.5)
a_t[0] = a_t_original[0] + noise_t[0]
#Act over the system and get info of the next states
s_t1 , r_t, done, _ = env.step(a_t[0],flows=flows)
s_t1 = np.divide(s_t1,env.vmax)
#Add replay buffer
buff.add(s_t, a_t[0], r_t, s_t1, done)
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
next_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
# Get estimated q-values of the pair (next_state,mu(next_state))
actions_next = actor.target_munet.predict(next_states)
target_q_values = critic.target_qnet.predict([next_states, actions_next])
y_t = np.zeros(np.shape(actions))
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
loss += critic.qnet.train_on_batch([states,actions], y_t)
a_for_grad = actor.munet.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward = total_reward + GAMMA*r_t
s_t = s_t1
if j%100==0:
print("Episode", i, "Step", j, "Reward", r_t, "Loss", loss)
if done:
break
reward_hist.append(total_reward)
np.save("reward_small_history_flows_"+str(flows).lower()+".npy",np.array(reward_hist))
if i%20 == 0:
print("Saving the networks...")
actor.munet.save_weights("./actors_small/anetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
critic.qnet.save_weights("./critics_small/cnetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
if total_reward > best_reward:
print("Saving Best Actor...")
np.save("best_reward"+"_flows_"+str(flows)+".npy",np.array(total_reward))
actor.munet.save_weights("./actors_small/best_anetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
critic.qnet.save_weights("./critics_small/best_cnetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
best_reward = total_reward
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
print("Finish.")
else:
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
resv, resf, resu = env.free_sim()
f , axarr = plt.subplots(nrows=2, ncols=2 )
resv_norm = np.divide(np.transpose(resv),np.matlib.repmat(env.vmax,Hs,1))
resu = np.transpose(np.asarray(resu))
## Plot Volume Results
lines = axarr[0,0].plot(x,resv_norm[:,:5])
axarr[0,0].legend(lines , list(map(lambda x: "v"+str(x+1),range(5))))
axarr[0,0].set_title("Volumes - Tanks 1 to 5")
axarr[0,0].set_xlabel("Times(s)")
axarr[0,0].set_ylabel("Volume(%vmax)")
lines = axarr[0,1].plot(x,resv_norm[:,5:10])
axarr[0,1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=10 else "vS",range(5,10))))
axarr[0,1].set_title("Volumes - Tanks 6 to 9 and Storm Tank")
axarr[0,1].set_xlabel("Times(s)")
axarr[0,1].set_ylabel("Volume(%vmax)")
lines = axarr[1,0].plot(x,resu[:,:4])
axarr[1,0].legend(lines , list(map(lambda x: "u"+str(x+1),range(4))))
axarr[1,0].set_title("Actions - Apertures")
axarr[1,0].set_xlabel("Times(s)")
axarr[1,0].set_ylabel("% Aperture")
lines = axarr[1,1].plot(x,resu[:,4:8])
axarr[1,1].legend(lines , list(map(lambda x: "u"+str(x+1),range(4,8))))
axarr[1,1].set_title("Actions - Apertures")
axarr[1,1].set_xlabel("Times(s)")
axarr[1,1].set_ylabel("% Aperture")
plt.suptitle("DDPG performance",y=1.05)
#sns.despine()
plt.tight_layout()
plt.show()
def rainfile():
from math import exp
import numpy as np
from matplotlib import pylab as plt
#Gaussian Extension
A1 = 0.008 + 0.0008*np.random.rand(); mu1 = 500+50*np.random.rand(); sigma1 = 250+25*np.random.rand()
A2 = 0.0063 + 0.00063*np.random.rand() ; mu2 = 500+50*np.random.rand(); sigma2 = 250+25*np.random.rand()
dt = 1
Hs = 1800
x = np.arange(0,Hs,dt)
d = [[],[]]
# dconst = 0.5*mpc_obj.k1*mpc_obj.vmax(1);
d[0] = A1*np.exp((-(x-mu1)**2)/(2*sigma1**2)) # Node 1 - left
d[1] = A2*np.exp((-(x-mu2)**2)/(2*sigma2**2)) # Node 2 - right
def secs_to_hour(secs_convert):
hour = secs_convert//3600
mins = (secs_convert%3600)//60
secs = secs_convert%60
return '{h:02d}:{m:02d}'.format(h=hour,m=mins)
secs_hour_vec = np.vectorize(secs_to_hour)
for k in (1,2):
with open('swmm/runoff%d.dat' % k, 'w') as f:
i = 0
for (t,val) in zip(secs_hour_vec(x), d[k-1]):
if i%60 == 0:
f.write(t+" "+str(val)+"\n")
i += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c","--control", type=int, choices = [0,1], help = "Choose between control(1) or free dynamics(0)")
parser.add_argument("-s","--swmm", type=int, choices = [0,1], help = "Choose between a simulation with swmm(1) or not(0)")
parser.add_argument("-f","--flow", type=int, choices = [0,1], help = "Choose between a simulation with flows(1) or not(0)")
args = parser.parse_args()
if args.flow == 1 and args.swmm == 1:
print("Conflicting option flow 1 and swmm 1")
else:
t0 = time.process_time()
simulate(control=args.control, swmm=args.swmm, flows = args.flow)
tf = time.process_time()
print("Elapsed time: ",tf-t0)
| |
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
import six
from six import moves
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('heat.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memorized matchmaker object
def _serialize(data):
"""Serialization wrapper.
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""A tiny wrapper around ZeroMQ.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if self.subscriptions:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error(_("ZeroMQ socket could not be closed."))
self.sock = None
def recv(self, **kwargs):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart(**kwargs)
def send(self, data, **kwargs):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data, **kwargs)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr):
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0
if not envelope:
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
response = ConsumerBase.normalize_reply(
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
Used for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""A consumer class implementing a topic-based proxy.
Forwards to IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
data = sock.recv(copy=False)
topic = data[1].bytes
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data, copy=False)
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
try:
os.makedirs(ipc_dir)
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL)
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = six.next(i)
h[k] = six.next(i)
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""A consumer class implementing a consumer for messages.
Can also be used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""Wraps the sending of messages.
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if not queues:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
mm = CONF.rpc_zmq_matchmaker
if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker
| |
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# builtin
import collections
# external
import lxml.etree
import lxml.isoschematron
# internal
from sdv import utils, xmlconst
# relative
from . import base
# SVRL error tags
ERROR_TAGS = (
xmlconst.TAG_SVRL_FAILED_ASSERT,
xmlconst.TAG_SVRL_SUCCESSFUL_REPORT
)
# Tuple for recording schematron validation errors.
# 'node" is the etree failed-assert or successful-report node.
# 'context' is the associated schematron rule context for the error.
SVRLError = collections.namedtuple(
typename="SVRLError",
field_names=["context", "node"]
)
def make_rule(ctx):
"""Returns a Schematron rule etree.Element for the given context."""
name = lxml.etree.QName(xmlconst.NS_SCHEMATRON, "rule")
attrib = {"context": ctx}
nsmap = {None: xmlconst.NS_SCHEMATRON}
return lxml.etree.Element(name, attrib=attrib, nsmap=nsmap)
def make_pattern(id=None):
"""Returns a Schematron pattern etree.Element."""
attrib = {}
if id:
attrib["id"] = id
name = lxml.etree.QName(xmlconst.NS_SCHEMATRON, "pattern")
nsmap = {None: xmlconst.NS_SCHEMATRON}
return lxml.etree.Element(name, attrib=attrib, nsmap=nsmap)
def make_schema():
"""Returns a Schematron schema etree.Element."""
name = lxml.etree.QName(xmlconst.NS_SCHEMATRON, "schema")
nsmap = {None:xmlconst.NS_SCHEMATRON}
return lxml.etree.Element(name, nsmap=nsmap)
def make_ns(prefix, uri):
"""Returns a Schematron ns etree.Element for the given ns prefix
and uri.
"""
name = lxml.etree.QName(xmlconst.NS_SCHEMATRON, "ns")
attrib = {"prefix": prefix, "uri": uri}
nsmap = {None: xmlconst.NS_SCHEMATRON}
return lxml.etree.Element(name, attrib=attrib, nsmap=nsmap)
class SchematronError(base.ValidationError):
"""Represents an error found in a SVRL report.
Args:
doc: The instance document which was validated and produced this error.
error: The ``svrl:failed-assert`` or ``svrl:successful-report``
``etree._Element`` instance.
Attributes:
message: The validation error message.
"""
def __init__(self, doc, error):
super(SchematronError, self).__init__()
node = error.node
self._doc = doc
self._error = node
self._xpath_location = node.attrib.get('location')
self._test = node.attrib.get('test')
self._line = None
self.context = error.context
self.message = self._parse_message(node)
def __unicode__(self):
return unicode(self.message)
def __str__(self):
return unicode(self).encode("utf-8")
def _get_line(self):
"""Returns the line number in the input document associated with this
error.
"""
root = utils.get_etree_root(self._doc)
xpath = self._xpath_location
nsmap = self._error.nsmap
node = root.xpath(xpath, namespaces=nsmap)[0]
return node.sourceline
@property
def line(self):
"""Returns the line number in the input document associated with this
error.
This property is lazily evaluated, meaning the line number isn't known
until the first time this property is accessed. Each subsequent call
will return the cached line number.
"""
if not self._line:
self._line = self._get_line()
return self._line
def _parse_message(self, error):
message = error.find("{%s}text" % xmlconst.NS_SVRL)
if message is None:
return ""
return message.text
def as_dict(self):
"""Returns a dictionary representation.
Keys:
* ``'message'``: The error message
* ``'line'``: The line number associated with the error
"""
return dict(message=self.message, line=self.line)
class SchematronValidationResults(base.ValidationResults):
"""Used to hold results of a Schematron validation process.
Args:
is_valid: The boolean validation result.
doc: The document which produced these validation results.
svrl_report: The etree._ElementTree SVRL report produced during the
validation run.
Attributes:
errors: A list of :class:`SchematronError` instances representing
errors found in the `svrl_report`.
is_valid: Returns ``True`` if the validation was successful and
``False`` otherwise.
"""
def __init__(self, is_valid, doc=None, svrl_report=None):
super(SchematronValidationResults, self).__init__(is_valid)
self._svrl_report = svrl_report
self._doc = doc
self.errors = self._parse_errors(svrl_report)
def _get_errors(self, svrl_report):
"""Parses errors from the SVRL report document.
Args:
svrl_report: An etree SVRL document.
Returns:
A list of :class:`SVRLError` objects.
"""
errors = []
if not svrl_report:
return errors
root = svrl_report.getroot()
for element in utils.descendants(root):
if element.tag == xmlconst.TAG_SVRL_FIRED_RULE:
context = element.attrib['context']
continue
if element.tag not in ERROR_TAGS:
continue
error = SVRLError(context=context, node=element)
errors.append(error)
return errors
def _parse_errors(self, svrl_report):
errors = self._get_errors(svrl_report)
return [SchematronError(self._doc, x) for x in errors]
def as_dict(self):
"""A dictionary representation of the
:class:`.SchematronValidationResults` instance.
Keys:
* ``'result'``: The validation results. Values can be
``True`` or ``False``.
* ``'errors'``: A list of validation error dictionaries. The keys
are ``'message'`` and ``'line'``.
Returns:
A dictionary representation of an instance of this class.
"""
d = super(SchematronValidationResults, self).as_dict()
if self.errors:
d['errors'] = [x.as_dict() for x in self.errors]
return d
class SchematronValidator(object):
"""Performs schematron validation against an XML instance document.
Args:
schematron: A Schematron document. This can be a filename, file-like
object, ``etree._Element``, or ``etree._ElementTree`` instance.
"""
def __init__(self, schematron, phase=None):
self._schematron = self._build_schematron(schematron, phase)
self._phase_id = phase
def _build_schematron(self, sch, phase=None):
"""Attempts to build an ``lxml.isoschematron.Schematron`` instance
from `sch`.
Args:
sch: A Schematron document filename, file-like object,
etree._Element, or etree._ElementTree.
Returns:
A ``lxml.isoschematron.Schematron`` instance for `sch`.
"""
if sch is None:
raise ValueError("Input schematron document cannot be None")
root = utils.get_etree_root(sch)
schematron = lxml.isoschematron.Schematron(
root,
phase=phase,
store_report=True,
store_xslt=True,
store_schematron=True
)
return schematron
@property
def xslt(self):
"""Returns an etree._ElementTree representation of the XSLT
transform of the Schematron document.
"""
return self._schematron.validator_xslt
@property
def schematron(self):
"""Returns an etree._ElementTree representation of the Schematron
document.
"""
return self._schematron.schematron
def validate(self, doc):
"""Validates an XML instance document `doc` using Schematron rules.
Args:
doc: An XML instance document. This can be a filename, file-like
object, ``etree._Element`` or ``etree._ElementTree`` instance.
Returns:
An instance of
:class:`.SchematronValidationResults`.
Raises:
.ValidationError: If there are any issues parsing `doc`.
"""
root = utils.get_etree_root(doc)
is_valid = self._schematron.validate(root)
svrl_report = self._schematron.validation_report
return SchematronValidationResults(
is_valid=is_valid,
doc=root,
svrl_report=svrl_report
)
__all__ = [
'SchematronValidator',
'SchematronValidationResults',
'SchematronError'
]
| |
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedSmoothNode
from toontown.toonbase import ToontownGlobals
from otp.otpbase import OTPGlobals
from direct.fsm import FSM
from direct.task import Task
smileyDoId = 1
class DistCogdoCraneObject(DistributedSmoothNode.DistributedSmoothNode, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistCogdoCraneObject')
wantsWatchDrift = 1
def __init__(self, cr):
DistributedSmoothNode.DistributedSmoothNode.__init__(self, cr)
FSM.FSM.__init__(self, 'DistCogdoCraneObject')
self.craneGame = None
self.avId = 0
self.craneId = 0
self.cleanedUp = 0
self.collisionNode = CollisionNode('object')
self.collisionNode.setIntoCollideMask(ToontownGlobals.PieBitmask | OTPGlobals.WallBitmask | ToontownGlobals.CashbotBossObjectBitmask | OTPGlobals.CameraBitmask)
self.collisionNode.setFromCollideMask(ToontownGlobals.PieBitmask | OTPGlobals.FloorBitmask)
self.collisionNodePath = NodePath(self.collisionNode)
self.physicsActivated = 0
self.toMagnetSoundInterval = Sequence()
self.hitFloorSoundInterval = Sequence()
self.hitBossSfx = loader.loadSfx('phase_5/audio/sfx/AA_drop_safe_miss.ogg')
self.hitBossSoundInterval = SoundInterval(self.hitBossSfx)
self.touchedBossSfx = loader.loadSfx('phase_5/audio/sfx/AA_drop_sandbag.ogg')
self.touchedBossSoundInterval = SoundInterval(self.touchedBossSfx, duration=0.8)
self.lerpInterval = None
return
def disable(self):
self.cleanup()
self.stopSmooth()
DistributedSmoothNode.DistributedSmoothNode.disable(self)
def cleanup(self):
if self.cleanedUp:
return
else:
self.cleanedUp = 1
self.demand('Off')
self.detachNode()
self.toMagnetSoundInterval.finish()
self.hitFloorSoundInterval.finish()
self.hitBossSoundInterval.finish()
self.touchedBossSoundInterval.finish()
del self.toMagnetSoundInterval
del self.hitFloorSoundInterval
del self.hitBossSoundInterval
del self.touchedBossSoundInterval
self.craneGame = None
return
def setupPhysics(self, name):
an = ActorNode('%s-%s' % (name, self.doId))
anp = NodePath(an)
if not self.isEmpty():
self.reparentTo(anp)
NodePath.assign(self, anp)
self.physicsObject = an.getPhysicsObject()
self.setTag('object', str(self.doId))
self.collisionNodePath.reparentTo(self)
self.handler = PhysicsCollisionHandler()
self.handler.addCollider(self.collisionNodePath, self)
self.collideName = self.uniqueName('collide')
self.handler.addInPattern(self.collideName + '-%in')
self.handler.addAgainPattern(self.collideName + '-%in')
self.watchDriftName = self.uniqueName('watchDrift')
def activatePhysics(self):
if not self.physicsActivated:
self.craneGame.physicsMgr.attachPhysicalNode(self.node())
base.cTrav.addCollider(self.collisionNodePath, self.handler)
self.physicsActivated = 1
self.accept(self.collideName + '-floor', self.__hitFloor)
self.accept(self.collideName + '-goon', self.__hitGoon)
self.acceptOnce(self.collideName + '-headTarget', self.__hitBoss)
self.accept(self.collideName + '-dropPlane', self.__hitDropPlane)
def deactivatePhysics(self):
if self.physicsActivated:
self.craneGame.physicsMgr.removePhysicalNode(self.node())
base.cTrav.removeCollider(self.collisionNodePath)
self.physicsActivated = 0
self.ignore(self.collideName + '-floor')
self.ignore(self.collideName + '-goon')
self.ignore(self.collideName + '-headTarget')
self.ignore(self.collideName + '-dropPlane')
def hideShadows(self):
pass
def showShadows(self):
pass
def stashCollisions(self):
self.collisionNodePath.stash()
def unstashCollisions(self):
self.collisionNodePath.unstash()
def __hitFloor(self, entry):
if self.state == 'Dropped' or self.state == 'LocalDropped':
self.d_hitFloor()
self.demand('SlidingFloor', localAvatar.doId)
def __hitGoon(self, entry):
if self.state == 'Dropped' or self.state == 'LocalDropped':
goonId = int(entry.getIntoNodePath().getNetTag('doId'))
goon = self.cr.doId2do.get(goonId)
if goon:
self.doHitGoon(goon)
def doHitGoon(self, goon):
pass
def __hitBoss(self, entry):
if (self.state == 'Dropped' or self.state == 'LocalDropped') and self.craneId != self.craneGame.doId:
vel = self.physicsObject.getVelocity()
vel = self.crane.root.getRelativeVector(render, vel)
vel.normalize()
impact = vel[1]
if impact >= self.getMinImpact():
print 'hit! %s' % impact
self.hitBossSoundInterval.start()
self.doHitBoss(impact)
else:
self.touchedBossSoundInterval.start()
print '--not hard enough: %s' % impact
def doHitBoss(self, impact):
self.d_hitBoss(impact)
def __hitDropPlane(self, entry):
self.notify.info('%s fell out of the world.' % self.doId)
self.fellOut()
def fellOut(self):
raise StandardError, 'fellOut unimplented'
def getMinImpact(self):
return 0
def __watchDrift(self, task):
v = self.physicsObject.getVelocity()
if abs(v[0]) < 0.0001 and abs(v[1]) < 0.0001:
self.d_requestFree()
self.demand('Free')
return Task.cont
def prepareGrab(self):
pass
def prepareRelease(self):
pass
def setCraneGameId(self, craneGameId):
self.craneGameId = craneGameId
self.craneGame = base.cr.doId2do[craneGameId]
def setObjectState(self, state, avId, craneId):
if state == 'G':
self.demand('Grabbed', avId, craneId)
elif state == 'D':
if self.state != 'Dropped':
self.demand('Dropped', avId, craneId)
elif state == 's':
if self.state != 'SlidingFloor':
self.demand('SlidingFloor', avId)
elif state == 'F':
self.demand('Free')
else:
self.notify.error('Invalid state from AI: %s' % state)
def d_requestGrab(self):
self.sendUpdate('requestGrab')
def rejectGrab(self):
if self.state == 'LocalGrabbed':
self.demand('LocalDropped', self.avId, self.craneId)
def d_requestDrop(self):
self.sendUpdate('requestDrop')
def d_hitFloor(self):
self.sendUpdate('hitFloor')
def d_requestFree(self):
self.sendUpdate('requestFree', [self.getX(),
self.getY(),
self.getZ(),
self.getH()])
def d_hitBoss(self, impact):
self.sendUpdate('hitBoss', [impact])
def defaultFilter(self, request, args):
if self.craneGame == None:
raise FSM.RequestDenied, request
return FSM.FSM.defaultFilter(self, request, args)
def enterOff(self):
self.detachNode()
if self.lerpInterval:
self.lerpInterval.finish()
self.lerpInterval = None
return
def exitOff(self):
self.reparentTo(render)
def enterLocalGrabbed(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
self.hideShadows()
self.prepareGrab()
self.crane.grabObject(self)
def exitLocalGrabbed(self):
if self.newState != 'Grabbed':
self.crane.dropObject(self)
self.prepareRelease()
del self.crane
self.showShadows()
def enterGrabbed(self, avId, craneId):
if self.oldState == 'LocalGrabbed':
if craneId == self.craneId:
return
else:
self.crane.dropObject(self)
self.prepareRelease()
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
self.hideShadows()
self.prepareGrab()
self.crane.grabObject(self)
def exitGrabbed(self):
self.crane.dropObject(self)
self.prepareRelease()
self.showShadows()
del self.crane
def enterLocalDropped(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
self.activatePhysics()
self.startPosHprBroadcast()
self.hideShadows()
self.handler.setStaticFrictionCoef(0)
self.handler.setDynamicFrictionCoef(0)
def exitLocalDropped(self):
if self.newState != 'SlidingFloor' and self.newState != 'Dropped':
self.deactivatePhysics()
self.stopPosHprBroadcast()
del self.crane
self.showShadows()
def enterDropped(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
if self.avId == base.localAvatar.doId:
self.activatePhysics()
self.startPosHprBroadcast()
self.handler.setStaticFrictionCoef(0)
self.handler.setDynamicFrictionCoef(0)
else:
self.startSmooth()
self.hideShadows()
def exitDropped(self):
if self.avId == base.localAvatar.doId:
if self.newState != 'SlidingFloor':
self.deactivatePhysics()
self.stopPosHprBroadcast()
else:
self.stopSmooth()
del self.crane
self.showShadows()
def enterSlidingFloor(self, avId):
self.avId = avId
if self.lerpInterval:
self.lerpInterval.finish()
self.lerpInterval = None
if self.avId == base.localAvatar.doId:
self.activatePhysics()
self.startPosHprBroadcast()
self.handler.setStaticFrictionCoef(0.9)
self.handler.setDynamicFrictionCoef(0.5)
if self.wantsWatchDrift:
taskMgr.add(self.__watchDrift, self.watchDriftName)
else:
self.startSmooth()
self.hitFloorSoundInterval.start()
return
def exitSlidingFloor(self):
if self.avId == base.localAvatar.doId:
taskMgr.remove(self.watchDriftName)
self.deactivatePhysics()
self.stopPosHprBroadcast()
else:
self.stopSmooth()
def enterFree(self):
self.avId = 0
self.craneId = 0
def exitFree(self):
pass
| |
import os
import urlparse
import yaml
import pytest
import matplotlib.pyplot as plt
from numpy.testing import assert_allclose
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import download_file
from tardis.atomic import AtomData
from tardis.simulation.base import run_radial1d
from tardis.model import Radial1DModel
from tardis.io.config_reader import Configuration
@pytest.mark.skipif(not pytest.config.getvalue("integration-tests"),
reason="integration tests are not included in this run")
class TestIntegration(object):
"""Slow integration test for various setups present in subdirectories of
``tardis/tests/integration_tests``.
"""
@classmethod
@pytest.fixture(scope="class", autouse=True)
def setup(self, request, reference, data_path):
"""
This method does initial setup of creating configuration and performing
a single run of integration test.
"""
# The last component in dirpath can be extracted as name of setup.
self.name = data_path['setup_name']
self.config_file = os.path.join(data_path['config_dirpath'], "config.yml")
# A quick hack to use atom data per setup. Atom data is ingested from
# local HDF or downloaded and cached from a url, depending on data_path
# keys.
atom_data_name = yaml.load(open(self.config_file))['atom_data']
# Get the path to HDF file:
if 'atom_data_url' in data_path:
# If the atom data is to be ingested from url:
atom_data_filepath = download_file(urlparse.urljoin(
base=data_path['atom_data_url'], url=atom_data_name), cache=True
)
else:
# If the atom data is to be ingested from local file:
atom_data_filepath = os.path.join(
data_path['atom_data_dirpath'], atom_data_name
)
# Load atom data file separately, pass it for forming tardis config.
self.atom_data = AtomData.from_hdf5(atom_data_filepath)
# Check whether the atom data file in current run and the atom data
# file used in obtaining the reference data are same.
# TODO: hard coded UUID for kurucz atom data file, generalize it later.
# kurucz_data_file_uuid1 = "5ca3035ca8b311e3bb684437e69d75d7"
# assert self.atom_data.uuid1 == kurucz_data_file_uuid1
# Create a Configuration through yaml file and atom data.
tardis_config = Configuration.from_yaml(
self.config_file, atom_data=self.atom_data)
# Check whether current run is with less packets.
if request.config.getoption("--less-packets"):
less_packets = request.config.integration_tests_config['less_packets']
tardis_config['montecarlo']['no_of_packets'] = (
less_packets['no_of_packets']
)
tardis_config['montecarlo']['last_no_of_packets'] = (
less_packets['last_no_of_packets']
)
# We now do a run with prepared config and get radial1d model.
self.result = Radial1DModel(tardis_config)
# If current test run is just for collecting reference data, store the
# output model to HDF file, save it at specified path. Skip all tests.
# Else simply perform the run and move further for performing
# assertions.
if request.config.getoption("--generate-reference"):
run_radial1d(self.result, hdf_path_or_buf=os.path.join(
data_path['gen_ref_dirpath'], "{0}.h5".format(self.name)
))
pytest.skip("Reference data saved at {0}".format(
data_path['gen_ref_dirpath']
))
else:
run_radial1d(self.result)
# Get the reference data through the fixture.
self.reference = reference
@pytest.mark.skipif(True, reason="Introduction of HDF mechanism.")
def test_j_estimators(self):
assert_allclose(
self.reference['j_estimators'],
self.result.j_estimators)
@pytest.mark.skipif(True, reason="Introduction of HDF mechanism.")
def test_j_blue_estimators(self):
assert_allclose(
self.reference['j_blue_estimators'],
self.result.j_blue_estimators)
assert_quantity_allclose(
self.reference['j_blues_norm_factor'],
self.result.j_blues_norm_factor)
@pytest.mark.skipif(True, reason="Introduction of HDF mechanism.")
def test_last_line_interactions(self):
assert_allclose(
self.reference['last_line_interaction_in_id'],
self.result.last_line_interaction_in_id)
assert_allclose(
self.reference['last_line_interaction_out_id'],
self.result.last_line_interaction_out_id)
assert_allclose(
self.reference['last_line_interaction_shell_id'],
self.result.last_line_interaction_shell_id)
assert_quantity_allclose(
self.reference['last_line_interaction_angstrom'],
self.result.last_line_interaction_angstrom)
@pytest.mark.skipif(True, reason="Introduction of HDF mechanism.")
def test_nubar_estimators(self):
assert_allclose(
self.reference['nubar_estimators'],
self.result.nubar_estimators)
@pytest.mark.skipif(True, reason="Introduction of HDF mechanism.")
def test_ws(self):
assert_allclose(
self.reference['ws'],
self.result.ws)
@pytest.mark.skipif(True, reason="Introduction of HDF mechanism.")
def test_luminosity_inner(self):
assert_quantity_allclose(
self.reference['luminosity_inner'],
self.result.luminosity_inner)
def test_spectrum(self, plot_object):
plot_object.add(self.plot_spectrum(), "{0}_spectrum".format(self.name))
assert_allclose(
self.reference['/simulation/runner/spectrum/luminosity_density_nu'],
self.result.runner.spectrum.luminosity_density_nu.cgs.value)
assert_allclose(
self.reference['/simulation/runner/spectrum/wavelength'],
self.result.runner.spectrum.wavelength.cgs.value)
assert_allclose(
self.reference['/simulation/runner/spectrum/luminosity_density_lambda'],
self.result.runner.spectrum.luminosity_density_lambda.cgs.value)
def plot_spectrum(self):
plt.suptitle("Deviation in spectrum_quantities", fontweight="bold")
figure = plt.figure()
# `ldl_` prefixed variables associated with `luminosity_density_lambda`.
# Axes of subplot are extracted, if we wish to make multiple plots
# for different spectrum quantities all in one figure.
ldl_ax = figure.add_subplot(111)
ldl_ax.set_title("Deviation in luminosity_density_lambda")
ldl_ax.set_xlabel("Wavelength")
ldl_ax.set_ylabel("Relative error (1 - result / reference)")
deviation = 1 - (
self.result.runner.spectrum.luminosity_density_lambda.cgs.value /
self.reference['/simulation/runner/spectrum/luminosity_density_lambda']
)
ldl_ax.plot(
self.reference['/simulation/runner/spectrum/wavelength'], deviation,
color="blue", marker="."
)
return figure
@pytest.mark.skipif(True, reason="Introduction of HDF mechanism.")
def test_montecarlo_properties(self):
assert_quantity_allclose(
self.reference['montecarlo_luminosity'],
self.result.montecarlo_luminosity)
assert_quantity_allclose(
self.reference['montecarlo_virtual_luminosity'],
self.result.runner.montecarlo_virtual_luminosity)
assert_quantity_allclose(
self.reference['montecarlo_nu'],
self.result.montecarlo_nu)
def test_shell_temperature(self, plot_object):
plot_object.add(self.plot_t_rads(), "{0}_t_rads".format(self.name))
assert_allclose(
self.reference['/simulation/model/t_rads'],
self.result.t_rads.cgs.value)
def plot_t_rads(self):
plt.suptitle("Shell temperature for packets", fontweight="bold")
figure = plt.figure()
ax = figure.add_subplot(111)
ax.set_xlabel("Shell id")
ax.set_ylabel("t_rads")
result_line = ax.plot(
self.result.t_rads.cgs, color="blue", marker=".", label="Result"
)
reference_line = ax.plot(
self.reference['/simulation/model/t_rads'],
color="green", marker=".", label="Reference"
)
error_ax = ax.twinx()
error_line = error_ax.plot(
(1 - self.result.t_rads.cgs.value / self.reference['/simulation/model/t_rads']),
color="red", marker=".", label="Rel. Error"
)
error_ax.set_ylabel("Relative error (1 - result / reference)")
lines = result_line + reference_line + error_line
labels = [l.get_label() for l in lines]
ax.legend(lines, labels, loc="lower left")
return figure
| |
"""SCons.Tool
SCons tool selection.
This looks for modules that define a callable object that can modify
a construction environment as appropriate for a given tool (or tool
chain).
Note that because this subsystem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "tool specification" in an arbitrary callable function. No
one needs to use or tie in to this subsystem in order to roll their own
tool definition.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/__init__.py 4043 2009/02/23 09:06:45 scons"
import imp
import sys
import SCons.Builder
import SCons.Errors
import SCons.Node.FS
import SCons.Scanner
import SCons.Scanner.C
import SCons.Scanner.D
import SCons.Scanner.LaTeX
import SCons.Scanner.Prog
DefaultToolpath=[]
CScanner = SCons.Scanner.C.CScanner()
DScanner = SCons.Scanner.D.DScanner()
LaTeXScanner = SCons.Scanner.LaTeX.LaTeXScanner()
PDFLaTeXScanner = SCons.Scanner.LaTeX.PDFLaTeXScanner()
ProgramScanner = SCons.Scanner.Prog.ProgramScanner()
SourceFileScanner = SCons.Scanner.Base({}, name='SourceFileScanner')
CSuffixes = [".c", ".C", ".cxx", ".cpp", ".c++", ".cc",
".h", ".H", ".hxx", ".hpp", ".hh",
".F", ".fpp", ".FPP",
".m", ".mm",
".S", ".spp", ".SPP"]
DSuffixes = ['.d']
IDLSuffixes = [".idl", ".IDL"]
LaTeXSuffixes = [".tex", ".ltx", ".latex"]
for suffix in CSuffixes:
SourceFileScanner.add_scanner(suffix, CScanner)
for suffix in DSuffixes:
SourceFileScanner.add_scanner(suffix, DScanner)
# FIXME: what should be done here? Two scanners scan the same extensions,
# but look for different files, e.g., "picture.eps" vs. "picture.pdf".
# The builders for DVI and PDF explicitly reference their scanners
# I think that means this is not needed???
for suffix in LaTeXSuffixes:
SourceFileScanner.add_scanner(suffix, LaTeXScanner)
SourceFileScanner.add_scanner(suffix, PDFLaTeXScanner)
class Tool:
def __init__(self, name, toolpath=[], **kw):
self.name = name
self.toolpath = toolpath + DefaultToolpath
# remember these so we can merge them into the call
self.init_kw = kw
module = self._tool_module()
self.generate = module.generate
self.exists = module.exists
if hasattr(module, 'options'):
self.options = module.options
def _tool_module(self):
# TODO: Interchange zipimport with normal initilization for better error reporting
oldpythonpath = sys.path
sys.path = self.toolpath + sys.path
try:
try:
file, path, desc = imp.find_module(self.name, self.toolpath)
try:
return imp.load_module(self.name, file, path, desc)
finally:
if file:
file.close()
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError, e
try:
import zipimport
except ImportError:
pass
else:
for aPath in self.toolpath:
try:
importer = zipimport.zipimporter(aPath)
return importer.load_module(self.name)
except ImportError, e:
pass
finally:
sys.path = oldpythonpath
full_name = 'SCons.Tool.' + self.name
try:
return sys.modules[full_name]
except KeyError:
try:
smpath = sys.modules['SCons.Tool'].__path__
try:
file, path, desc = imp.find_module(self.name, smpath)
module = imp.load_module(full_name, file, path, desc)
setattr(SCons.Tool, self.name, module)
if file:
file.close()
return module
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError, e
try:
import zipimport
importer = zipimport.zipimporter( sys.modules['SCons.Tool'].__path__[0] )
module = importer.load_module(full_name)
setattr(SCons.Tool, self.name, module)
return module
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError, m
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError, m
def __call__(self, env, *args, **kw):
if self.init_kw is not None:
# Merge call kws into init kws;
# but don't bash self.init_kw.
if kw is not None:
call_kw = kw
kw = self.init_kw.copy()
kw.update(call_kw)
else:
kw = self.init_kw
env.Append(TOOLS = [ self.name ])
if hasattr(self, 'options'):
import SCons.Variables
if not env.has_key('options'):
from SCons.Script import ARGUMENTS
env['options']=SCons.Variables.Variables(args=ARGUMENTS)
opts=env['options']
self.options(opts)
opts.Update(env)
apply(self.generate, ( env, ) + args, kw)
def __str__(self):
return self.name
##########################################################################
# Create common executable program / library / object builders
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = ProgramScanner)
env['BUILDERS']['Program'] = program
return program
def createStaticLibBuilder(env):
"""This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ]
if env.Detect('ranlib'):
ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR")
action_list.append(ranlib_action)
static_lib = SCons.Builder.Builder(action = action_list,
emitter = '$LIBEMITTER',
prefix = '$LIBPREFIX',
suffix = '$LIBSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'StaticObject')
env['BUILDERS']['StaticLibrary'] = static_lib
env['BUILDERS']['Library'] = static_lib
return static_lib
def createSharedLibBuilder(env):
"""This is a utility function that creates the SharedLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
shared_lib = env['BUILDERS']['SharedLibrary']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.ShLinkAction ]
shared_lib = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = '$SHLIBPREFIX',
suffix = '$SHLIBSUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['SharedLibrary'] = shared_lib
return shared_lib
def createLoadableModuleBuilder(env):
"""This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction ]
ld_module = SCons.Builder.Builder(action = action_list,
emitter = "$LDMODULEEMITTER",
prefix = '$LDMODULEPREFIX',
suffix = '$LDMODULESUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module
def createObjBuilders(env):
"""This is a utility function that creates the StaticObject
and SharedObject Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (StaticObject, SharedObject)
"""
try:
static_obj = env['BUILDERS']['StaticObject']
except KeyError:
static_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['StaticObject'] = static_obj
env['BUILDERS']['Object'] = static_obj
try:
shared_obj = env['BUILDERS']['SharedObject']
except KeyError:
shared_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['SharedObject'] = shared_obj
return (static_obj, shared_obj)
def createCFileBuilders(env):
"""This is a utility function that creates the CFile/CXXFile
Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (CFile, CXXFile)
"""
try:
c_file = env['BUILDERS']['CFile']
except KeyError:
c_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CFILESUFFIX'})
env['BUILDERS']['CFile'] = c_file
env.SetDefault(CFILESUFFIX = '.c')
try:
cxx_file = env['BUILDERS']['CXXFile']
except KeyError:
cxx_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CXXFILESUFFIX'})
env['BUILDERS']['CXXFile'] = cxx_file
env.SetDefault(CXXFILESUFFIX = '.cc')
return (c_file, cxx_file)
##########################################################################
# Create common Java builders
def CreateJarBuilder(env):
try:
java_jar = env['BUILDERS']['Jar']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR')
java_jar = SCons.Builder.Builder(action = jar_com,
suffix = '$JARSUFFIX',
src_suffix = '$JAVACLASSSUFIX',
src_builder = 'JavaClassFile',
source_factory = fs.Entry)
env['BUILDERS']['Jar'] = java_jar
return java_jar
def CreateJavaHBuilder(env):
try:
java_javah = env['BUILDERS']['JavaH']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
java_javah_com = SCons.Action.Action('$JAVAHCOM', '$JAVAHCOMSTR')
java_javah = SCons.Builder.Builder(action = java_javah_com,
src_suffix = '$JAVACLASSSUFFIX',
target_factory = fs.Entry,
source_factory = fs.File,
src_builder = 'JavaClassFile')
env['BUILDERS']['JavaH'] = java_javah
return java_javah
def CreateJavaClassFileBuilder(env):
try:
java_class_file = env['BUILDERS']['JavaClassFile']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_file = SCons.Builder.Builder(action = javac_com,
emitter = {},
#suffix = '$JAVACLASSSUFFIX',
src_suffix = '$JAVASUFFIX',
src_builder = ['JavaFile'],
target_factory = fs.Entry,
source_factory = fs.File)
env['BUILDERS']['JavaClassFile'] = java_class_file
return java_class_file
def CreateJavaClassDirBuilder(env):
try:
java_class_dir = env['BUILDERS']['JavaClassDir']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_dir = SCons.Builder.Builder(action = javac_com,
emitter = {},
target_factory = fs.Dir,
source_factory = fs.Dir)
env['BUILDERS']['JavaClassDir'] = java_class_dir
return java_class_dir
def CreateJavaFileBuilder(env):
try:
java_file = env['BUILDERS']['JavaFile']
except KeyError:
java_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$JAVASUFFIX'})
env['BUILDERS']['JavaFile'] = java_file
env['JAVASUFFIX'] = '.java'
return java_file
class ToolInitializerMethod:
"""
This is added to a construction environment in place of a
method(s) normally called for a Builder (env.Object, env.StaticObject,
etc.). When called, it has its associated ToolInitializer
object search the specified list of tools and apply the first
one that exists to the construction environment. It then calls
whatever builder was (presumably) added to the construction
environment in place of this particular instance.
"""
def __init__(self, name, initializer):
"""
Note: we store the tool name as __name__ so it can be used by
the class that attaches this to a construction environment.
"""
self.__name__ = name
self.initializer = initializer
def get_builder(self, env):
"""
Returns the appropriate real Builder for this method name
after having the associated ToolInitializer object apply
the appropriate Tool module.
"""
builder = getattr(env, self.__name__)
self.initializer.apply_tools(env)
builder = getattr(env, self.__name__)
if builder is self:
# There was no Builder added, which means no valid Tool
# for this name was found (or possibly there's a mismatch
# between the name we were called by and the Builder name
# added by the Tool module).
return None
self.initializer.remove_methods(env)
return builder
def __call__(self, env, *args, **kw):
"""
"""
builder = self.get_builder(env)
if builder is None:
return [], []
return apply(builder, args, kw)
class ToolInitializer:
"""
A class for delayed initialization of Tools modules.
Instances of this class associate a list of Tool modules with
a list of Builder method names that will be added by those Tool
modules. As part of instantiating this object for a particular
construction environment, we also add the appropriate
ToolInitializerMethod objects for the various Builder methods
that we want to use to delay Tool searches until necessary.
"""
def __init__(self, env, tools, names):
if not SCons.Util.is_List(tools):
tools = [tools]
if not SCons.Util.is_List(names):
names = [names]
self.env = env
self.tools = tools
self.names = names
self.methods = {}
for name in names:
method = ToolInitializerMethod(name, self)
self.methods[name] = method
env.AddMethod(method)
def remove_methods(self, env):
"""
Removes the methods that were added by the tool initialization
so we no longer copy and re-bind them when the construction
environment gets cloned.
"""
for method in self.methods.values():
env.RemoveMethod(method)
def apply_tools(self, env):
"""
Searches the list of associated Tool modules for one that
exists, and applies that to the construction environment.
"""
for t in self.tools:
tool = SCons.Tool.Tool(t)
if tool.exists(env):
env.Tool(tool)
return
# If we fall through here, there was no tool module found.
# This is where we can put an informative error message
# about the inability to find the tool. We'll start doing
# this as we cut over more pre-defined Builder+Tools to use
# the ToolInitializer class.
def Initializers(env):
ToolInitializer(env, ['install'], ['_InternalInstall', '_InternalInstallAs'])
def Install(self, *args, **kw):
return apply(self._InternalInstall, args, kw)
def InstallAs(self, *args, **kw):
return apply(self._InternalInstallAs, args, kw)
env.AddMethod(Install)
env.AddMethod(InstallAs)
def FindTool(tools, env):
for tool in tools:
t = Tool(tool)
if t.exists(env):
return tool
return None
def FindAllTools(tools, env):
def ToolExists(tool, env=env):
return Tool(tool).exists(env)
return filter (ToolExists, tools)
def tool_list(platform, env):
# XXX this logic about what tool to prefer on which platform
# should be moved into either the platform files or
# the tool files themselves.
# The search orders here are described in the man page. If you
# change these search orders, update the man page as well.
if str(platform) == 'win32':
"prefer Microsoft tools on Windows"
linkers = ['mslink', 'gnulink', 'ilink', 'linkloc', 'ilink32' ]
c_compilers = ['msvc', 'mingw', 'gcc', 'intelc', 'icl', 'icc', 'cc', 'bcc32' ]
cxx_compilers = ['msvc', 'intelc', 'icc', 'g++', 'c++', 'bcc32' ]
assemblers = ['masm', 'nasm', 'gas', '386asm' ]
fortran_compilers = ['gfortran', 'g77', 'ifl', 'cvf', 'f95', 'f90', 'fortran']
ars = ['mslib', 'ar', 'tlib']
elif str(platform) == 'os2':
"prefer IBM tools on OS/2"
linkers = ['ilink', 'gnulink', 'mslink']
c_compilers = ['icc', 'gcc', 'msvc', 'cc']
cxx_compilers = ['icc', 'g++', 'msvc', 'c++']
assemblers = ['nasm', 'masm', 'gas']
fortran_compilers = ['ifl', 'g77']
ars = ['ar', 'mslib']
elif str(platform) == 'irix':
"prefer MIPSPro on IRIX"
linkers = ['sgilink', 'gnulink']
c_compilers = ['sgicc', 'gcc', 'cc']
cxx_compilers = ['sgic++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['sgiar']
elif str(platform) == 'sunos':
"prefer Forte tools on SunOS"
linkers = ['sunlink', 'gnulink']
c_compilers = ['suncc', 'gcc', 'cc']
cxx_compilers = ['sunc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['sunf95', 'sunf90', 'sunf77', 'f95', 'f90', 'f77',
'gfortran', 'g77', 'fortran']
ars = ['sunar']
elif str(platform) == 'hpux':
"prefer aCC tools on HP-UX"
linkers = ['hplink', 'gnulink']
c_compilers = ['hpcc', 'gcc', 'cc']
cxx_compilers = ['hpc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'aix':
"prefer AIX Visual Age tools on AIX"
linkers = ['aixlink', 'gnulink']
c_compilers = ['aixcc', 'gcc', 'cc']
cxx_compilers = ['aixc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'aixf77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'darwin':
"prefer GNU tools on Mac OS X, except for some linkers and IBM tools"
linkers = ['applelink', 'gnulink']
c_compilers = ['gcc', 'cc']
cxx_compilers = ['g++', 'c++']
assemblers = ['as']
fortran_compilers = ['gfortran', 'f95', 'f90', 'g77']
ars = ['ar']
else:
"prefer GNU tools on all other platforms"
linkers = ['gnulink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
c_compiler = FindTool(c_compilers, env) or c_compilers[0]
# XXX this logic about what tool provides what should somehow be
# moved into the tool files themselves.
if c_compiler and c_compiler == 'mingw':
# MinGW contains a linker, C compiler, C++ compiler,
# Fortran compiler, archiver and assembler:
cxx_compiler = None
linker = None
assembler = None
fortran_compiler = None
ar = None
else:
# Don't use g++ if the C compiler has built-in C++ support:
if c_compiler in ('msvc', 'intelc', 'icc'):
cxx_compiler = None
else:
cxx_compiler = FindTool(cxx_compilers, env) or cxx_compilers[0]
linker = FindTool(linkers, env) or linkers[0]
assembler = FindTool(assemblers, env) or assemblers[0]
fortran_compiler = FindTool(fortran_compilers, env) or fortran_compilers[0]
ar = FindTool(ars, env) or ars[0]
other_tools = FindAllTools(['BitKeeper', 'CVS',
'dmd',
'filesystem',
'dvipdf', 'dvips', 'gs',
'jar', 'javac', 'javah',
'latex', 'lex',
'm4', 'midl', 'msvs',
'pdflatex', 'pdftex', 'Perforce',
'RCS', 'rmic', 'rpcgen',
'SCCS',
# 'Subversion',
'swig',
'tar', 'tex',
'yacc', 'zip', 'rpm', 'wix'],
env)
tools = ([linker, c_compiler, cxx_compiler,
fortran_compiler, assembler, ar]
+ other_tools)
return filter(lambda x: x, tools)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
import webob
import webob.dec
import webob.exc
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.auth import manager
FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.api")
flags.DEFINE_boolean('use_forwarded_for', False,
'Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
flags.DEFINE_integer('lockout_attempts', 5,
'Number of failed auths before lockout.')
flags.DEFINE_integer('lockout_minutes', 15,
'Number of minutes to lockout if triggered.')
flags.DEFINE_integer('lockout_window', 15,
'Number of minutes for lockout window.')
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = utils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('ec2.context', None)
delta = utils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-proccess cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts."""
def __init__(self, application):
"""middleware can use fake for testing."""
if FLAGS.memcached_servers:
import memcache
else:
from nova import fakememcache as memcache
self.mc = memcache.Client(FLAGS.memcached_servers,
debug=0)
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= FLAGS.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
lock_mins = FLAGS.lockout_minutes
msg = _('Access key %(access_key)s has had %(failures)d'
' failed authentications and will be locked out'
' for %(lock_mins)d minutes.') % locals()
LOG.warn(msg)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
class Authenticate(wsgi.Middleware):
"""Authenticate an EC2 request and add 'ec2.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
# Read request signature and access id.
try:
signature = req.params['Signature']
access = req.params['AWSAccessKeyId']
except:
raise webob.exc.HTTPBadRequest()
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature')
# Authenticate the request.
try:
(user, project) = manager.AuthManager().authenticate(
access,
signature,
auth_params,
req.method,
req.host,
req.path)
# Be explicit for what exceptions are 403, the rest bubble as 500
except (exception.NotFound, exception.NotAuthorized) as ex:
LOG.audit(_("Authentication Failure: %s"), unicode(ex))
raise webob.exc.HTTPForbidden()
# Authenticated!
remote_address = req.remote_addr
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctxt = context.RequestContext(user=user,
project=project,
remote_address=remote_address)
req.environ['ec2.context'] = ctxt
uname = user.name
pname = project.name
msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals()
LOG.audit(msg, context=req.environ['ec2.context'])
return self.application
class Requestify(wsgi.Middleware):
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = utils.import_class(controller)()
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
version = req.params['SignatureVersion']
if int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
except:
raise webob.exc.HTTPBadRequest()
LOG.debug(_('action: %s'), action)
for key, value in args.items():
LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals())
# Success!
api_request = apirequest.APIRequest(self.controller, action,
req.params['Version'], args)
req.environ['ec2.request'] = api_request
req.environ['ec2.action_args'] = args
return self.application
class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
executed in ec2.context.
"""
def __init__(self, application):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
'DescribeVolumes': ['projectmanager', 'sysadmin'],
'CreateVolume': ['projectmanager', 'sysadmin'],
'AttachVolume': ['projectmanager', 'sysadmin'],
'DetachVolume': ['projectmanager', 'sysadmin'],
'DescribeInstances': ['all'],
'DescribeAddresses': ['all'],
'AllocateAddress': ['netadmin'],
'ReleaseAddress': ['netadmin'],
'AssociateAddress': ['netadmin'],
'DisassociateAddress': ['netadmin'],
'RunInstances': ['projectmanager', 'sysadmin'],
'TerminateInstances': ['projectmanager', 'sysadmin'],
'RebootInstances': ['projectmanager', 'sysadmin'],
'UpdateInstance': ['projectmanager', 'sysadmin'],
'DeleteVolume': ['projectmanager', 'sysadmin'],
'DescribeImages': ['all'],
'DeregisterImage': ['projectmanager', 'sysadmin'],
'RegisterImage': ['projectmanager', 'sysadmin'],
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
# superusers will be allowed to run them
# all others will get HTTPUnauthorized.
},
}
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_('Unauthorized request for controller=%(controller)s '
'and action=%(action)s') % locals(), context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
if context.user.is_superuser():
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
return any(context.project.has_role(context.user_id, role)
for role in roles)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action' upon 'ec2.controller', passing 'ec2.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2.context']
api_request = req.environ['ec2.request']
result = None
try:
result = api_request.invoke(context)
except exception.InstanceNotFound as ex:
LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
context=context)
return self._error(req, context, type(ex).__name__, ex.message)
except exception.VolumeNotFound as ex:
LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
context=context)
ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x')
message = _('Volume %s not found') % ec2_id
return self._error(req, context, type(ex).__name__, message)
except exception.NotFound as ex:
LOG.info(_('NotFound raised: %s'), unicode(ex), context=context)
return self._error(req, context, type(ex).__name__, unicode(ex))
except exception.ApiError as ex:
LOG.exception(_('ApiError raised: %s'), unicode(ex),
context=context)
if ex.code:
return self._error(req, context, ex.code, unicode(ex))
else:
return self._error(req, context, type(ex).__name__,
unicode(ex))
except Exception as ex:
extra = {'environment': req.environ}
LOG.exception(_('Unexpected error raised: %s'), unicode(ex),
extra=extra, context=context)
return self._error(req,
context,
'UnknownError',
_('An unknown error has occurred. '
'Please try your request again.'))
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
def _error(self, req, context, code, message):
LOG.error("%s: %s", code, message, context=context)
resp = webob.Response()
resp.status = 400
resp.headers['Content-Type'] = 'text/xml'
resp.body = str('<?xml version="1.0"?>\n'
'<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>%s</RequestID></Response>' %
(utils.utf8(code), utils.utf8(message),
utils.utf8(context.request_id)))
return resp
class Versions(wsgi.Application):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Respond to a request for all EC2 versions."""
# available api versions
versions = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
return ''.join('%s\n' % v for v in versions)
| |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from .peak_finder import peak_finder
from .. import pick_types, pick_channels
from ..utils import logger, verbose
from ..filter import band_pass_filter
from ..epochs import Epochs
@verbose
def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10,
filter_length='10s', ch_name=None, tstart=0,
verbose=None):
"""Locate EOG artifacts
Parameters
----------
raw : instance of Raw
The raw data.
event_id : int
The index to assign to found events.
l_freq : float
Low cut-off frequency in Hz.
h_freq : float
High cut-off frequency in Hz.
filter_length : str | int | None
Number of taps to use for filtering.
ch_name: str | None
If not None, use specified channel(s) for EOG
tstart : float
Start detection after tstart seconds.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
eog_events : array
Events.
"""
# Getting EOG Channel
eog_inds = _get_eog_channel_index(ch_name, raw)
logger.info('EOG channel index for this subject is: %s' % eog_inds)
eog, _ = raw[eog_inds, :]
eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq,
h_freq=h_freq,
sampling_rate=raw.info['sfreq'],
first_samp=raw.first_samp,
filter_length=filter_length,
tstart=tstart)
return eog_events
def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp,
filter_length='10s', tstart=0.):
"""Helper function"""
logger.info('Filtering the data to remove DC offset to help '
'distinguish blinks from saccades')
# filtering to remove dc offset so that we know which is blink and saccades
fmax = np.minimum(45, sampling_rate / 2.0 - 0.75) # protect Nyquist
filteog = np.array([band_pass_filter(x, sampling_rate, 2, fmax,
filter_length=filter_length)
for x in eog])
temp = np.sqrt(np.sum(filteog ** 2, axis=1))
indexmax = np.argmax(temp)
# easier to detect peaks with filtering.
filteog = band_pass_filter(eog[indexmax], sampling_rate, l_freq, h_freq,
filter_length=filter_length)
# detecting eog blinks and generating event file
logger.info('Now detecting blinks and generating corresponding events')
temp = filteog - np.mean(filteog)
n_samples_start = int(sampling_rate * tstart)
if np.abs(np.max(temp)) > np.abs(np.min(temp)):
eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1)
else:
eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1)
eog_events += n_samples_start
n_events = len(eog_events)
logger.info("Number of EOG events detected : %d" % n_events)
eog_events = np.c_[eog_events + first_samp, np.zeros(n_events),
event_id * np.ones(n_events)]
return eog_events
def _get_eog_channel_index(ch_name, inst):
if isinstance(ch_name, str):
# Check if multiple EOG Channels
if ',' in ch_name:
ch_name = ch_name.split(',')
else:
ch_name = [ch_name]
eog_inds = pick_channels(inst.ch_names, include=ch_name)
if len(eog_inds) == 0:
raise ValueError('%s not in channel list' % ch_name)
else:
logger.info('Using channel %s as EOG channel%s' % (
" and ".join(ch_name),
'' if len(eog_inds) < 2 else 's'))
elif ch_name is None:
eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False,
eog=True, ecg=False, emg=False, ref_meg=False,
exclude='bads')
if len(eog_inds) == 0:
logger.info('No EOG channels found')
logger.info('Trying with EEG 061 and EEG 062')
eog_inds = pick_channels(inst.ch_names,
include=['EEG 061', 'EEG 062'])
if len(eog_inds) != 2:
raise RuntimeError('EEG 61 or EEG 62 channel not found !!')
else:
raise ValueError('Could not find EOG channel.')
return eog_inds
@verbose
def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10,
reject=None, flat=None,
baseline=None, verbose=None):
"""Conveniently generate epochs around EOG artifact events
Parameters
----------
raw : instance of Raw
The raw data
ch_name : str
The name of the channel to use for EOG peak detection.
The argument is mandatory if the dataset contains no EOG channels.
event_id : int
The index to assign to found events
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels
are used).
tmin : float
Start time before event.
tmax : float
End time after event.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6 # uV (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used. If None, no correction is applied.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
eog_epochs : instance of Epochs
Data epoched around EOG events.
"""
events = find_eog_events(raw, ch_name=ch_name, event_id=event_id,
l_freq=l_freq, h_freq=h_freq)
# create epochs around EOG events
eog_epochs = Epochs(raw, events=events, event_id=event_id,
tmin=tmin, tmax=tmax, proj=False, reject=reject,
flat=flat, picks=picks, baseline=baseline,
preload=True)
return eog_epochs
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests specific to `Sequential` model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class TestSequential(keras_parameterized.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
@keras_parameterized.run_all_keras_modes
def test_basic_methods(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
model.add(keras.layers.Dropout(0.3, name='dp'))
model.add(keras.layers.Dense(2, kernel_regularizer='l2',
kernel_constraint='max_norm'))
self.assertEqual(len(model.layers), 3)
self.assertEqual(len(model.weights), 2 * 2)
self.assertEqual(model.get_layer(name='dp').name, 'dp')
@keras_parameterized.run_all_keras_modes
def test_input_defined_first_layer(self):
model = keras.models.Sequential()
model.add(keras.Input(shape=(2,), name='input_layer'))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Dropout(0.3, name='dp'))
model.add(keras.layers.Dense(2, kernel_regularizer='l2',
kernel_constraint='max_norm'))
self.assertLen(model.layers, 3)
self.assertLen(model.weights, 2 * 2)
self.assertEqual(model.get_layer(name='dp').name, 'dp')
@keras_parameterized.run_all_keras_modes
def test_sequential_pop(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
model = testing_utils.get_small_sequential_mlp(
num_hidden, num_classes, input_dim)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertEqual(model.output_shape, (None, num_hidden))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.random((batch_size, num_hidden))
model.fit(x, y, epochs=1)
# Test popping single-layer model
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.pop()
self.assertEqual(model.layers, [])
self.assertEqual(model.outputs, None)
# Invalid use case
model = keras.models.Sequential()
with self.assertRaises(TypeError):
model.pop()
@keras_parameterized.run_all_keras_modes
def test_sequential_deferred_build_with_np_arrays(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.layers), 2)
self.assertEqual(len(model.weights), 0)
self.assertFalse(model.built)
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
self.assertTrue(model.built)
self.assertFalse(model._is_graph_network)
self.assertEqual(len(model.weights), 2 * 2)
@keras_parameterized.run_all_keras_modes
def test_sequential_deferred_build_with_dataset_iterators(self):
num_hidden = 5
input_dim = 3
num_classes = 2
num_samples = 50
steps_per_epoch = 10
model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.layers), 2)
self.assertEqual(len(model.weights), 0)
self.assertFalse(model.built)
x = array_ops.ones((num_samples, input_dim))
y = array_ops.zeros((num_samples, num_classes))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
model.fit(iterator, epochs=1, steps_per_epoch=steps_per_epoch)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 2 * 2)
self.assertFalse(model._is_graph_network)
# TODO(kaftan) This test fails w/ run_with_all_keras_modes. File ticket
@parameterized.parameters((True,), (False,))
@tf_test_util.run_deprecated_v1
def test_training_and_eval_methods_on_symbolic_tensors(self, deferred):
with self.cached_session():
def get_model():
if deferred:
model = testing_utils.get_small_sequential_mlp(10, 4)
else:
model = testing_utils.get_small_sequential_mlp(10, 4, input_dim=3)
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model = get_model()
model.fit(inputs, targets, epochs=10, steps_per_epoch=30)
model = get_model()
model.evaluate(inputs, targets, steps=2, verbose=0)
model = get_model()
model.predict(inputs, steps=2)
model = get_model()
model.train_on_batch(inputs, targets)
model = get_model()
model.test_on_batch(inputs, targets)
model = get_model()
model.fit(
inputs,
targets,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=(inputs, targets),
validation_steps=2)
@keras_parameterized.run_all_keras_modes
def test_invalid_use_cases(self):
# Added objects must be layer instances
with self.assertRaises(TypeError):
model = keras.models.Sequential()
model.add(None)
# Added layers cannot have multiple outputs
class MyLayer(keras.layers.Layer):
def call(self, inputs):
return [3 * inputs, 2 * inputs]
def compute_output_shape(self, input_shape):
return [input_shape, input_shape]
with self.assertRaises(ValueError):
model = keras.models.Sequential()
model.add(MyLayer(input_shape=(3,)))
with self.assertRaises(TypeError):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=1))
model.add(MyLayer())
@keras_parameterized.run_all_keras_modes
def test_nested_sequential_trainability(self):
input_dim = 20
num_units = 10
num_classes = 2
inner_model = keras.models.Sequential()
inner_model.add(keras.layers.Dense(num_units, input_shape=(input_dim,)))
model = keras.models.Sequential()
model.add(inner_model)
model.add(keras.layers.Dense(num_classes))
self.assertEqual(len(model.layers), 2)
self.assertEqual(len(model.trainable_weights), 4)
inner_model.trainable = False
self.assertEqual(len(model.trainable_weights), 2)
inner_model.trainable = True
self.assertEqual(len(model.trainable_weights), 4)
def test_sequential_update_disabling(self):
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.BatchNormalization(input_shape=(4,)))
assert model.updates
model.trainable = False
assert not model.updates
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile('sgd', 'mse')
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
@keras_parameterized.run_all_keras_modes
def test_sequential_deferred_build_serialization(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
self.assertFalse(model.built)
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.train_on_batch(x, y)
self.assertTrue(model.built)
config = model.get_config()
self.assertIn('build_input_shape', config)
new_model = keras.models.Sequential.from_config(config)
self.assertEqual(len(new_model.layers), 2)
self.assertEqual(len(new_model.weights), 4)
@keras_parameterized.run_all_keras_modes
def test_sequential_shape_inference_deferred(self):
model = testing_utils.get_small_sequential_mlp(4, 5)
output_shape = model.compute_output_shape((None, 7))
self.assertEqual(tuple(output_shape.as_list()), (None, 5))
@keras_parameterized.run_all_keras_modes
def test_sequential_build_deferred(self):
model = testing_utils.get_small_sequential_mlp(4, 5)
model.build((None, 10))
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
# Test with nested model
model = testing_utils.get_small_sequential_mlp(4, 3)
inner_model = testing_utils.get_small_sequential_mlp(4, 5)
model.add(inner_model)
model.build((None, 10))
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 8)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_sequential_deferred_manual_build(self):
model = testing_utils.get_small_sequential_mlp(4, 5)
self.assertFalse(model.built)
model(array_ops.zeros([1, 2]))
self.assertTrue(model.built)
self.assertEqual(len(model.outputs), 0)
model.compile('rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.outputs), 0)
model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5)))
self.assertEqual(len(model.outputs), 1)
@keras_parameterized.run_all_keras_modes
def test_sequential_nesting(self):
model = testing_utils.get_small_sequential_mlp(4, 3)
inner_model = testing_utils.get_small_sequential_mlp(4, 5)
model.add(inner_model)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((2, 6))
y = np.random.random((2, 5))
model.fit(x, y, epochs=1)
@keras_parameterized.run_all_keras_modes
def test_variable_names(self):
model = keras.models.Sequential([keras.layers.Dense(3)])
model.add(keras.layers.Dense(2))
model(array_ops.ones([2, 4]))
self.assertEqual(
['sequential/dense/kernel:0', 'sequential/dense/bias:0',
'sequential/dense_1/kernel:0', 'sequential/dense_1/bias:0'],
[v.name for v in model.variables])
@keras_parameterized.run_all_keras_modes
def test_input_assumptions_propagation(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1))
if context.executing_eagerly():
with self.assertRaisesRegexp(ValueError,
'expected min_ndim=2, found ndim=0'):
model(1.0)
class TestSequentialEagerIntegration(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_defun_on_call(self):
# Check that one can subclass Sequential and place the `call` in a `defun`.
class MySequential(keras.Sequential):
def __init__(self, name=None):
super(MySequential, self).__init__(name=name)
self.call = function.defun(self.call)
model = MySequential()
model.add(keras.layers.Dense(4, activation='relu'))
model.add(keras.layers.Dense(5, activation='softmax'))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((2, 6))
y = np.random.random((2, 5))
model.fit(x, y, epochs=1)
@keras_parameterized.run_all_keras_modes
def test_build_before_fit(self):
# Fix for b/112433577
model = testing_utils.get_small_sequential_mlp(4, 5)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.build((None, 6))
x = np.random.random((2, 6))
y = np.random.random((2, 5))
model.fit(x, y, epochs=1)
@keras_parameterized.run_all_keras_modes
def test_sequential_model_fails_with_dict_inputs(self):
num_classes = 5
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes)
model.compile(
'rmsprop',
metrics=['acc'],
weighted_metrics=['mae'],
loss='categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
x = {'dense_input': np.random.random((10, 1))}
y = np.random.randint(num_classes, size=(10, 1))
with self.assertRaisesRegexp(
ValueError, 'Passing a dictionary input to a Sequential Model which '
'doesn\'t have FeatureLayer as the first layer is an error'):
model.fit(x, y, batch_size=5, epochs=1)
if __name__ == '__main__':
test.main()
| |
# Django settings for djangoapp project.
import socket
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Eduardo Gonzalo Espinoza Carreon', 'edubecks007@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {}
## local dev
if (
socket.gethostname() == 'SpiderMac.local'
):
DEBUG = True
## database
DB_NAME = os.getenv('CARONAS_BRASIL_DB_NAME')
DB_USER = os.getenv('CARONAS_BRASIL_DB_USER')
DB_PASSWORD = os.getenv('CARONAS_BRASIL_DB_PASSWORD')
DB_HOST = os.getenv('CARONAS_BRASIL_DB_HOST')
DB_PORT = os.getenv('CARONAS_BRASIL_DB_PORT')
DB_ENGINE = os.getenv('CARONAS_BRASIL_DB_ENGINE')
DATABASES['default'] = {
'ENGINE': DB_ENGINE, # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': DB_NAME, # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST': DB_HOST, # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': DB_PORT, # Set to empty string for default.
}
else:
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['www.vaidecarona.org', 'caronasbrasil.herokuapp.com', 'localhost', 'loc-caronasbrasil.com']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Sao_Paulo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
if DEBUG:
STATIC_URL = '/static/'
else:
STATIC_URL = 'https://s3.amazonaws.com/caronas-brasil/staticfiles/'
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
PROJECT_DIR = os.path.abspath(os.path.join(PROJECT_ROOT, os.path.pardir))
STATIC_ROOT = os.path.abspath(os.path.join(PROJECT_DIR, 'staticfiles/'))
# Additional locations of static files
STATICFILES_DIRS = (
os.path.abspath(os.path.join(PROJECT_DIR, 'static/')),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.getenv('CARONAS_BRASIL_DJANGO_SECRET_KEY')
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'djangoapp.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'djangoapp.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), '..//', 'templates').replace('\\','/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'djangoapp.apps.caronasbrasil',
'south',
# 'social.apps.django_app.default',
## production
'gunicorn',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#
# ## facebook login
# ## social auth
# ## http://psa.matiasaguirre.net/docs/backends/facebook.html#oauth2
# SOCIAL_AUTH_FACEBOOK_KEY = os.getenv('CARONAS_BRASIL_FB_APP_ID')
# SOCIAL_AUTH_FACEBOOK_SECRET = os.getenv('CARONAS_BRASIL_FB_APP_SECRET')
#
#
# AUTHENTICATION_BACKENDS = (
# 'social.backends.facebook.FacebookOAuth2',
# # 'social.apps.django_app.utils.BackendWrapper',
# )
#
#
# TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.debug',
# 'django.core.context_processors.i18n',
# 'django.core.context_processors.media',
# 'django.contrib.messages.context_processors.messages',
# 'social.apps.django_app.context_processors.backends',
# )
#
#
# LOGIN_URL = '/login/'
# LOGIN_REDIRECT_URL = '/done/'
# URL_PATH = ''
# SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
# SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage'
# SOCIAL_AUTH_GOOGLE_OAUTH_SCOPE = [
# 'https://www.googleapis.com/auth/drive',
# 'https://www.googleapis.com/auth/userinfo.profile'
# ]
# # SOCIAL_AUTH_EMAIL_FORM_URL = '/signup-email'
# SOCIAL_AUTH_EMAIL_FORM_HTML = 'email_signup.html'
# SOCIAL_AUTH_EMAIL_VALIDATION_FUNCTION = 'example.app.mail.send_validation'
# SOCIAL_AUTH_EMAIL_VALIDATION_URL = '/email-sent/'
# # SOCIAL_AUTH_USERNAME_FORM_URL = '/signup-username'
# SOCIAL_AUTH_USERNAME_FORM_HTML = 'username_signup.html'
#
# SOCIAL_AUTH_PIPELINE = (
# # 'social.pipeline.social_auth.social_details',
# # 'social.pipeline.social_auth.social_uid',
# # 'social.pipeline.social_auth.auth_allowed',
# # 'social.pipeline.social_auth.social_user',
# # 'social.pipeline.user.get_username',
# # 'example.app.pipeline.require_email',
# # 'social.pipeline.mail.mail_validation',
# # 'social.pipeline.user.create_user',
# # 'social.pipeline.social_auth.associate_user',
# # 'social.pipeline.social_auth.load_extra_data',
# # 'social.pipeline.user.user_details'
# )
| |
import subprocess
import tempfile
import shutil
import os
import re
import sys
import logging
import copy
from apptest.exception import *
apktool_path = "tools/apktool.jar"
logger = logging.getLogger("apk")
class APK:
def __init__(self, path):
self.path = path
self.tempdir = None
self.details = None
def resign(self, keystore, keystore_pass, keystore_alias):
subprocess.call("zip -d %s META-INF*" % self.path, shell=True)
subprocess.check_call("jarsigner -keystore %s -storepass %s %s -sigalg MD5withRSA -digestalg SHA1 %s " % (keystore, keystore_pass, self.path, keystore_alias), shell=True)
def get_perms(self):
perms = subprocess.check_output("aapt d permissions %s" % self.path, shell=True)
perms = perms.split('\n')
ret = set()
perm_re = re.compile('uses-permission: (.*)')
for perm in perms:
if perm_re.match(perm):
item = perm_re.match(perm).group(1)
ret.add(item)
return ret
def add_permission(self, perms):
curr_perms = self.get_perms()
perm_to_add = []
for perm in perms:
if not perm in curr_perms:
perm_to_add += [perm]
shared_uid = self.get_shared_uid()
if not perm_to_add and (not shared_uid or not "com.google.android.apps.maps" in shared_uid):
return
self.tempdir = tempfile.mkdtemp(prefix='andchecker', suffix='perm')
workdir = self.tempdir + "/mod"
output_apk = self.tempdir + "/modified.apk"
subprocess.check_call("java -jar %s d -s %s -o %s" % (apktool_path, self.path, workdir), shell = True)
# subprocess.check_call("unzip %s AndroidManifest.xml" % self.path, shell = True)
# subprocess.check_call("java -cp axml-0.9.jar:. AddPermission AndroidManifest.xml android.permission.INTERNET", shell = True)
# shutil.copy(self.path, "modified.apk")
# subprocess.check_call("zip -u modified.apk AndroidManifest.xml", shell = True)
# subprocess.check_call("java -jar AXMLPrinter2.jar AndroidManifest.xml > AndroidManifest.unzip.xml", shell = True)
with open("%s/AndroidManifest.net.xml" % workdir, "w") as newf:
with open("%s/AndroidManifest.xml" % workdir) as f:
line = f.readline()
while True:
if line == '':
break
if "android:sharedUserId=" in line:
startmark = "android:sharedUserId=\""
pos = line.find(startmark)
if pos != -1:
endpos = line.find("\"", pos + len(startmark))
if endpos != -1:
line = line[:pos + len(startmark)] + "com.andchecker" + line[endpos:]
if re.match("</manifest>", line):
for perm in perm_to_add:
newf.write("<uses-permission android:name=\"%s\"></uses-permission>" % perm)
newf.write(line)
line = f.readline()
os.remove("%s/AndroidManifest.xml" % workdir)
shutil.move("%s/AndroidManifest.net.xml" % workdir, "%s/AndroidManifest.xml" % workdir)
subprocess.check_call("java -jar %s b %s -o %s" % (apktool_path, workdir, output_apk), shell = True)
self.path = output_apk
try:
shutil.rmtree(workdir)
except:
pass
def get_details(self):
if not self.details:
self.details = subprocess.check_output("aapt l -a %s" % self.path, shell = True)
return self.details
def get_package(self):
details = self.get_details()
in_manifest = False
for line in details.split('\n'):
if 'E: manifest' in line:
in_manifest = True
elif 'A: package' in line and in_manifest:
match = re.search("A:\s+package=\"([^\"]+)\"", line)
if match:
logger.info("package detected: %s" % match.group(1))
return match.group(1)
raise InternalException("can't find package name")
def get_level(self, line):
match = re.match(" *", line)
return len(match.group(0))
def get_default_activity_full(self, package_name):
act_name = self.get_default_activity()
if act_name[:1] == '.':
act_name = package_name + act_name
elif not '.' in act_name:
act_name = package_name + "." + act_name
logger.info("default activity detected: %s" % act_name)
return act_name
def process_details(self, elem_cb = None, attr_cb = None):
details = self.get_details()
element_stack = []
element_level = []
element_re = re.compile("\s+E:\s+([a-zA-Z-]+)\s+.*")
attrib_re = re.compile("\s+A:\s+([a-zA-Z-:]+)\(([0-9a-fx]+)\)\s*=\s*\"([^\"]+)\"\s*.*")
attrib_withtype_re = re.compile("\s+A:\s+([a-zA-Z-:]+)\(([0-9a-fx]+)\)\s*=\s*\(type\s+([^\)]+)\)([^\s]+)\s*.*")
for line in details.split('\n'):
level = self.get_level(line)
for i in range(len(element_stack)-1, -1, -1):
if element_level[i] >= level:
element_stack.pop()
element_level.pop()
else:
break
if element_re.match(line):
match = element_re.match(line)
etype = match.group(1)
element_stack += [etype]
element_level += [level]
if elem_cb:
elem_cb(element_stack, element_level, etype)
if attr_cb:
if attrib_re.match(line):
match = attrib_re.match(line)
attr_cb(element_stack, element_level, match.group(1), match.group(2), match.group(3), None)
if attrib_withtype_re.match(line):
match = attrib_withtype_re.match(line)
attr_cb(element_stack, element_level, match.group(1), match.group(2), match.group(4), match.group(3))
def get_shared_uid(self):
shared_uid = []
def attr_cb(stack, level, name, id_, value, type_):
if stack[-1] == "manifest":
if name == "android:sharedUserId":
shared_uid.append(value)
self.process_details(attr_cb = attr_cb)
if shared_uid:
return shared_uid[0]
else:
return None
def has_something(self, sth):
info = {sth: False}
def elem_cb(stack, level, name):
if name == sth:
info[sth] = True
self.process_details(elem_cb = elem_cb)
return info[sth]
def get_intent_filters(self):
current_filters = []
intent_filters = []
current_intent_filter = {}
current_act = {}
current_recv = {}
state = {}
def attr_cb(stack, level, name, id_, value, type_):
if len(stack) >= 2 and stack[-2] == 'intent-filter':
if stack[-1] == 'action':
if name == 'android:name':
if not 'SEND_MULTIPLE' in value:
current_intent_filter['action'] = value
elif stack[-1] == 'category':
if name == 'android:name':
if not 'category' in current_intent_filter:
current_intent_filter['category'] = []
current_intent_filter['category'].append(value)
elif stack[-1] == 'data':
if not 'data' in current_intent_filter:
current_intent_filter['data'] = {}
if name == 'android:scheme':
if not 'scheme' in current_intent_filter['data']:
current_intent_filter['data']['scheme'] = []
current_intent_filter['data']['scheme'].append(value)
elif name == 'android:mimeType':
if not 'mime' in current_intent_filter['data']:
current_intent_filter['data']['mime'] = []
current_intent_filter['data']['mime'].append(value)
if len(stack) >= 1 and stack[-1] == 'activity':
if name == 'android:name':
current_act['name'] = value
if len(stack) >= 1 and stack[-1] == 'receiver':
if name == 'android:name':
current_recv['name'] = value
def elem_cb(stack, level, name):
if "intent_level" in state:
if level[-1] <= state['intent_level']:
# finish current intent!
del state['intent_level']
if 'action' in current_intent_filter and len(current_intent_filter) < 3:
current_filters.append(copy.deepcopy(current_intent_filter))
current_intent_filter.clear()
if "activity_level" in state:
if level[-1] <= state['activity_level']:
# finish current activity!
del state['activity_level']
for flt in current_filters:
flt['activity'] = copy.deepcopy(current_act)
intent_filters.append(copy.deepcopy(flt))
current_act.clear()
del current_filters[:]
if "receiver_level" in state:
if level[-1] <= state['receiver_level']:
# finish current receiver
del state['receiver_level']
for flt in current_filters:
flt['receiver'] = copy.deepcopy(current_recv)
intent_filters.append(copy.deepcopy(flt))
current_recv.clear()
del current_filters[:]
if name == 'intent-filter':
# new intent-filter
state['intent_level'] = level[-1]
elif name == 'activity':
# new activity
state['activity_level'] = level[-1]
elif name == 'receiver':
state['receiver_level'] = level[-1]
self.process_details(attr_cb = attr_cb, elem_cb = elem_cb)
for intent in intent_filters:
print intent
return intent_filters
def has_service(self):
return self.has_something("service")
def has_receiver(self):
return self.has_something("receiver")
def has_provider(self):
return self.has_something("provider")
def has_activity(self):
return self.has_something("activity")
def get_receivers(self):
def attr_cb(stack, level, name, id_, value, type_):
if stack[-1] == "action" and stack[-2] == "intent-filter" and stack[-3] == "receiver":
if name == "android:name":
print value
self.process_details(attr_cb = attr_cb)
def get_used_libs(self):
used_libs = []
einfo = {}
def attr_cb(stack, level, name, id_, value, type_):
if stack[-1] == "uses-library":
if name == "android:name":
if einfo['required']:
used_libs.append(value)
einfo['added'] = True
else:
logger.debug("ignored optional dep %s" % value)
if name == "android:required":
if value == "0x0":
if einfo['added']:
removed = used_libs.pop()
logger.debug("removed optional dep %s" % removed)
else:
einfo['required'] = False
def elem_cb(stack, level, name):
if name == "uses-library":
einfo['required'] = True
einfo['added'] = False
self.process_details(elem_cb = elem_cb, attr_cb = attr_cb)
return used_libs
def files(self):
return subprocess.check_output("aapt l %s" % self.path, shell = True).split("\n")
def get_abis(self):
abis = set()
for f in self.files():
if f.startswith("lib/"):
if "arm" in f:
abis.add("arm")
elif "x86" in f:
abis.add("x86")
elif "mips" in f:
abis.add("mips")
else:
abi = f.split('/')[1]
if abi:
abis.add(abi)
return abis
def get_total_activities(self):
details = self.get_details()
element_stack = []
element_level = []
act_name = ''
act_set = set()
element_re = re.compile("\s+E:\s+([a-zA-Z-]+)\s+.*")
for line in details.split('\n'):
level = self.get_level(line)
for i in range(len(element_stack)-1, -1, -1):
if element_level[i] >= level:
element_stack.pop()
element_level.pop()
else:
break
if element_re.match(line):
match = element_re.match(line)
etype = match.group(1)
element_stack += [etype]
element_level += [level]
if 'activity' in element_stack or 'activity-alias' in element_stack:
if ('activity' == element_stack[-1] or 'activity-alias' == element_stack[-1]) and 'A: android:name' in line:
match = re.search("A:\s+android:name\([^)]+\)\s*=\s*\"([^\"]+)\"", line)
if match:
act_name = match.group(1)
act_set.add(act_name)
return act_set
def get_default_activity(self):
details = self.get_details()
element_stack = []
element_level = []
act_name = ''
is_launcher = False
element_re = re.compile("\s+E:\s+([a-zA-Z-]+)\s+.*")
for line in details.split('\n'):
level = self.get_level(line)
for i in range(len(element_stack)-1, -1, -1):
if element_level[i] >= level:
element_stack.pop()
element_level.pop()
else:
break
if element_re.match(line):
match = element_re.match(line)
etype = match.group(1)
element_stack += [etype]
element_level += [level]
# print "element %s at %d" % (etype, level)
if etype == "activity" or etype == 'activity-alias':
is_launcher = False
act_name = ''
if 'activity' in element_stack or 'activity-alias' in element_stack:
if ('activity' == element_stack[-1] or 'activity-alias' == element_stack[-1]) and 'A: android:name' in line:
match = re.search("A:\s+android:name\([^)]+\)\s*=\s*\"([^\"]+)\"", line)
if match:
act_name = match.group(1)
# print "got act name: %s" % act_name
if is_launcher:
return act_name
elif 'A: android:name' in line and element_stack[-1] == 'category':
match = re.search("A:\s+android:name\([^)]+\)\s*=\s*\"([^\"]+)\"", line)
if match:
cat_name = match.group(1)
# print "got cat name: %s" % cat_name
if "android.intent.category.LAUNCHER" in cat_name:
if act_name:
return act_name
else:
is_launcher = True
raise InternalException("can't find default activity name")
def cleanup(self):
try:
if self.tempdir:
shutil.rmtree(self.tempdir)
except:
pass
def get_path(self):
return self.path
def identify_apk(apk):
this_apk = APK(apk)
print "%s: " % apk
print " deps: ", this_apk.get_used_libs()
print " perms: ", this_apk.get_perms()
if __name__ == "__main__":
for apk in sys.argv[1:]:
identify_apk(apk)
| |
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 21, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
try:
import simplejson as json
except ImportError:
import json
import pyocni.adapters.cnv_toHTTP as extractor
from webob import Response
class To_HTTP_Text_Plain():
"""
formats JSON object to HTTP text/plain descriptions
"""
def format_to_text_plain_categories(self, var):
"""
Format JSON categories into HTTP text/plain categories
Args:
@param var: JSON categories
"""
resp = ""
if var.has_key('kinds'):
items = var['kinds']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "kind") + "\n"
if var.has_key('mixins'):
items = var['mixins']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "mixin") + "\n"
if var.has_key('actions'):
items = var['actions']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "action") + "\n"
return resp
def format_to_text_plain_entities(self, var):
"""
Convert a JSON resource description into a text/plain resource description
Args:
@param var: JSON resource description
"""
response = ""
if var.has_key('resources'):
items = var['resources']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response += "Category: " + c + "\n"
for l in link:
response += "Link: " + l + "\n"
for a in att:
response += "X-OCCI-Attribute: " + a + "\n"
response = response[:-1] + ",\n"
response = response[:-2]
if var.has_key('links'):
items = var['links']
response += ",\n"
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response += "Category: " + c + "\n"
for l in link:
response += "Link: " + l + "\n"
for a in att:
response += "X-OCCI-Attribute: " + a + "\n"
response = response[:-1] + ",\n"
response = response[:-2]
return response
def format_to_text_plain_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
for item in var:
locs += "Location: " + item + "\n"
return locs
def format_to_text_plain_x_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
for item in var:
locs += "X-OCCI-Location: " + item + "\n"
return locs
class To_HTTP_Text_OCCI():
"""
formats JSON object to HTTP text/plain descriptions
"""
def format_to_text_occi_categories(self, var):
"""
Format JSON categories into HTTP text/plain categories
Args:
@param var: JSON categories
"""
resp = Response()
resp.headers.clear()
value = ""
if var.has_key('kinds'):
items = var['kinds']
for item in items:
value = cnv_JSON_category(item, "kind") + ",\n"
resp.headers.add('Category', value[:-2])
if var.has_key('mixins'):
items = var['mixins']
for item in items:
value = cnv_JSON_category(item, "mixin") + ",\n"
resp.headers.add('Category', value[:-2])
if var.has_key('actions'):
items = var['actions']
for item in items:
value = cnv_JSON_category(item, "action") + ",\n"
resp.headers.add('Category', value[:-2])
return resp.headers
def format_to_text_occi_entities(self, var):
"""
Convert a JSON resource description into a text/occi resource description
Args:
@param var: JSON resource description
"""
response = Response()
response.headers.clear()
if var.has_key('resources'):
items = var['resources']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response.headers.add("Category", c)
for l in link:
response.headers.add("Link", l)
for a in att:
response.headers.add("X-OCCI-Attribute", a)
if var.has_key('links'):
items = var['links']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response.headers.add("Category", c)
for l in link:
response.headers.add("Link", l)
for a in att:
response.headers.add("X-OCCI-Attribute", a)
return response.headers
def format_to_text_occi_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
resp = Response()
resp.headers.clear()
for item in var:
locs += item + ","
resp.headers.add("Location", locs[:-1])
return resp.headers
def format_to_text_x_occi_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
resp = Response()
resp.headers.clear()
for item in var:
locs += item + ","
resp.headers.add("X-OCCI-Location", locs[:-1])
return resp.headers
class To_HTTP_Text_URI_List():
"""
formats JSON object to HTTP text/plain descriptions
"""
def __init__(self):
pass
def check_for_uri_locations(self, var):
"""
Checks for the existence of path URIs in a JSON location object
Args:
@param var: JSON location object
"""
resp = ""
is_text_uri = False
for item in var:
resp += item + "\n"
if item.endswith("/"):
is_text_uri = True
return resp, is_text_uri
def cnv_JSON_category(category, type):
"""
Converts a json category into a HTTP category
Args:
@param category: JSON category
@param type: Category type = (kind || mixin || action)
"""
http_cat = extractor.extract_term_from_category(category) + ';'
http_cat += "scheme=\"" + extractor.extract_scheme_from_category(category) + "\";"
http_cat += "class=\"" + type + "\";"
title = extractor.extract_title_from_category(category)
if title is not None:
http_cat += "title=\"" + title + "\";"
rel = extractor.extract_related_from_category(category)
if rel is not None:
http_cat += "rel=\"" + rel + "\";"
attributes = extractor.extract_attributes_from_category(category)
if attributes is not None:
http_cat += "attributes=\"" + attributes + "\";"
actions = extractor.extract_actions_from_category(category)
if actions is not None:
http_cat += "actions=\"" + actions + "\";"
location = extractor.extract_location_from_category(category)
if location is not None:
http_cat += "location=\"" + location + "\";"
return http_cat
def cnv_JSON_Resource(json_object):
"""
Converts a JSON Resource into a HTTP Resource
"""
res_cat = list()
res_links = list()
res_cat.append(extractor.extract_kind_from_entity(json_object))
items = extractor.extract_mixin_from_entity(json_object)
if items is not None:
res_cat.extend(items)
var = extractor.extract_attributes_from_entity(json_object)
if var is not None:
res_att = var
else:
res_att = list()
items = extractor.extract_internal_link_from_entity(json_object)
if items is not None:
res_links.extend(items)
items = extractor.extract_actions_from_entity(json_object)
if items is not None:
res_links.extend(items)
return res_cat, res_links, res_att
| |
from nsbaseresource import NSBaseResource
__author__ = 'vlazarenko'
class NSSSLCertKey(NSBaseResource):
# General Netscaler configuration object
def __init__(self, json_data=None):
"""
Supplied with json_data the object can be pre-filled
"""
super(NSSSLCertKey, self).__init__()
self.options = {'certkey': '',
'cert': '',
'key': '',
'password': '',
'fipskey': '',
'inform': '',
'passplain': '',
'expirymonitor': '',
'notificationperiod': '',
'linkcertkeyname': '',
'nodomaincheck': '',
'signaturealg': '',
'serial': '',
'issuer': '',
'clientcertnotbefore': '',
'clientcertnotafter': '',
'daystoexpiration': '',
'subject': '',
'publickey': '',
'publickeysize': '',
'version': '',
'priority': '',
'status': '',
'passcrypt': '',
'data': '',
'servicename': ''}
if not (json_data is None):
for key in json_data.keys():
if key in self.options.keys():
self.options[key] = json_data[key]
self.resourcetype = NSSSLCertKey.get_resourcetype()
@staticmethod
def get_resourcetype():
return "sslcertkey"
def set_certkey(self, certkey):
self.options['certkey'] = certkey
def get_certkey(self):
return self.options['certkey']
def set_cert(self, cert):
self.options['cert'] = cert
def get_cert(self):
return self.options['cert']
def set_key(self, key):
self.options['key'] = key
def get_key(self):
return self.options['key']
def set_password(self, password):
self.options['password'] = password
def get_password(self):
return self.options['password']
def set_fipskey(self, fipskey):
self.options['fipskey'] = fipskey
def get_fipskey(self):
return self.options['fipskey']
def set_inform(self, inform):
self.options['inform'] = inform
def get_inform(self):
return self.options['inform']
def set_passplain(self, passplain):
self.options['passplain'] = passplain
def get_passplain(self):
return self.options['passplain']
def set_expirymonitor(self, expirymonitor):
self.options['expirymonitor'] = expirymonitor
def get_expirymonitor(self):
return self.options['expirymonitor']
def set_notificationperiod(self, notificationperiod):
self.options['notificationperiod'] = notificationperiod
def get_notificationperiod(self):
return self.options['notificationperiod']
def set_linkcertkeyname(self, linkcertkeyname):
self.options['linkcertkeyname'] = linkcertkeyname
def get_linkcertkeyname(self):
return self.options['linkcertkeyname']
def set_nodomaincheck(self, nodomaincheck):
self.options['nodomaincheck'] = nodomaincheck
def get_nodomaincheck(self):
return self.options['nodomaincheck']
def get_signaturealg(self):
return self.options['signaturealg']
def get_serial(self):
return self.options['serial']
def get_issuer(self):
return self.options['issuer']
def get_clientcertnotbefore(self):
return self.options['clientcertnotbefore']
def get_clientcertnotafter(self):
return self.options['clientcertnotafter']
def get_daystoexpiration(self):
return self.options['daystoexpiration']
def get_subject(self):
return self.options['subject']
def get_publickey(self):
return self.options['publickey']
def get_publickeysize(self):
return self.options['publickeysize']
def get_version(self):
return self.options['version']
def get_priority(self):
return self.options['priority']
def get_status(self):
return self.options['status']
def get_passcrypt(self):
return self.options['passcrypt']
def get_data(self):
return self.options['data']
def get_servicename(self):
return self.options['servicename']
@staticmethod
def get(nitro, resource):
"""
Use this API to fetch SSL certkey resource of given name.
"""
__resource = NSSSLCertKey()
__resource.get_resource(nitro, resource.get_certkey())
return __resource
@staticmethod
def get_all(nitro):
"""
Use this API to fetch all configured SSL certkey resources.
"""
__url = nitro.get_url() + NSSSLCertKey.get_resourcetype()
__json_resources = nitro.get(__url).get_response_field(NSSSLCertKey.get_resourcetype())
__resources = []
for json_resource in __json_resources:
__resources.append(NSSSLCertKey(json_resource))
return __resources
@staticmethod
def add(nitro, resource):
"""
Use this API to add resource.
"""
__resource = NSSSLCertKey()
__resource.set_certkey(resource.get_certkey())
__resource.set_cert(resource.get_cert())
__resource.set_key(resource.get_key())
__resource.set_password(resource.get_password())
__resource.set_fipskey(resource.get_fipskey())
__resource.set_inform(resource.get_inform())
__resource.set_passplain(resource.get_passplain())
__resource.set_expirymonitor(resource.get_expirymonitor())
__resource.set_notificationperiod(resource.get_notificationperiod())
return __resource.add_resource(nitro)
@staticmethod
def delete(nitro, resource):
"""
Use this API to delete server of a given name.
"""
__resource = NSSSLCertKey()
__resource.set_certkey(resource.get_certkey())
nsresponse = __resource.delete_resource(nitro)
return nsresponse
@staticmethod
def update(nitro, resource):
"""
Use this API to update a server of a given name.
"""
__resource = NSSSLCertKey()
__resource.set_certkey(resource.get_certkey())
__resource.set_expirymonitor(resource.get_expirymonitor())
__resource.set_notificationperiod(resource.get_notificationperiod())
return __resource.update_resource(nitro)
@staticmethod
def link(nitro, resource):
"""
Use this API to link resource of a given name
"""
__resource = NSSSLCertKey()
__resource.set_certkey(resource.get_certkey())
__resource.set_linkcertkeyname(resource.get_linkcertkeyname())
nsresponse = __resource.perform_operation(nitro, "link")
return nsresponse
@staticmethod
def unlink(nitro, resource):
"""
Use this API to link resource of a given name
"""
__resource = NSSSLCertKey()
__resource.set_certkey(resource.get_certkey())
nsresponse = __resource.perform_operation(nitro, "unlink")
return nsresponse
| |
# -*- coding: utf-8 -*-
"""Abstract Base Class for Basis Function and some common implementations."""
import abc
import numpy as np
class BasisFunction(object):
r"""ABC for basis functions used by LSPI Policies.
A basis function is a function that takes in a state vector and an action
index and returns a vector of features. The resulting feature vector is
referred to as :math:`\phi` in the LSPI paper (pg 9 of the PDF referenced
in this package's documentation). The :math:`\phi` vector is dotted with
the weight vector of the Policy to calculate the Q-value.
The dimensions of the state vector are usually smaller than the dimensions
of the :math:`\phi` vector. However, the dimensions of the :math:`\phi`
vector are usually much smaller than the dimensions of an exact
representation of the state which leads to significant savings when
computing and storing a policy.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def size(self):
r"""Return the vector size of the basis function.
Returns
-------
int
The size of the :math:`\phi` vector.
(Referred to as k in the paper).
"""
pass # pragma: no cover
@abc.abstractmethod
def evaluate(self, state, action):
r"""Calculate the :math:`\phi` matrix for the given state-action pair.
The way this value is calculated depends entirely on the concrete
implementation of BasisFunction.
Parameters
----------
state : numpy.array
The state to get the features for.
When calculating Q(s, a) this is the s.
action : int
The action index to get the features for.
When calculating Q(s, a) this is the a.
Returns
-------
numpy.array
The :math:`\phi` vector. Used by Policy to compute Q-value.
"""
pass # pragma: no cover
@abc.abstractproperty
def num_actions(self):
"""Return number of possible actions.
Returns
-------
int
Number of possible actions.
"""
pass # pragma: no cover
@staticmethod
def _validate_num_actions(num_actions):
"""Return num_actions if valid. Otherwise raise ValueError.
Return
------
int
Number of possible actions.
Raises
------
ValueError
If num_actions < 1
"""
if num_actions < 1:
raise ValueError('num_actions must be >= 1')
return num_actions
class FakeBasis(BasisFunction):
r"""Basis that ignores all input. Useful for random sampling.
When creating a purely random Policy a basis function is still required.
This basis function just returns a :math:`\phi` equal to [1.] for all
inputs. It will however, still throw exceptions for impossible values like
negative action indexes.
"""
def __init__(self, num_actions):
"""Initialize FakeBasis."""
self.__num_actions = BasisFunction._validate_num_actions(num_actions)
def size(self):
r"""Return size of 1.
Returns
-------
int
Size of :math:`phi` which is always 1 for FakeBasis
Example
-------
>>> FakeBasis().size()
1
"""
return 1
def evaluate(self, state, action):
r"""Return :math:`\phi` equal to [1.].
Parameters
----------
state : numpy.array
The state to get the features for.
When calculating Q(s, a) this is the s. FakeBasis ignores these
values.
action : int
The action index to get the features for.
When calculating Q(s, a) this is the a. FakeBasis ignores these
values.
Returns
-------
numpy.array
:math:`\phi` vector equal to [1.].
Raises
------
IndexError
If action index is < 0
Example
-------
>>> FakeBasis().evaluate(np.arange(10), 0)
array([ 1.])
"""
if action < 0:
raise IndexError('action index must be >= 0')
if action >= self.num_actions:
raise IndexError('action must be < num_actions')
return np.array([1.])
@property
def num_actions(self):
"""Return number of possible actions."""
return self.__num_actions
@num_actions.setter
def num_actions(self, value):
"""Set the number of possible actions.
Parameters
----------
value: int
Number of possible actions. Must be >= 1.
Raises
------
ValueError
If value < 1.
"""
if value < 1:
raise ValueError('num_actions must be at least 1.')
self.__num_actions = value
class OneDimensionalPolynomialBasis(BasisFunction):
"""Polynomial features for a state with one dimension.
Takes the value of the state and constructs a vector proportional
to the specified degree and number of actions. The polynomial is first
constructed as [..., 1, value, value^2, ..., value^k, ...]
where k is the degree. The rest of the vector is 0.
Parameters
----------
degree : int
The polynomial degree.
num_actions: int
The total number of possible actions
Raises
------
ValueError
If degree is less than 0
ValueError
If num_actions is less than 1
"""
def __init__(self, degree, num_actions):
"""Initialize polynomial basis function."""
self.__num_actions = BasisFunction._validate_num_actions(num_actions)
if degree < 0:
raise ValueError('Degree must be >= 0')
self.degree = degree
def size(self):
"""Calculate the size of the basis function.
The base size will be degree + 1. This basic matrix is then
duplicated once for every action. Therefore the size is equal to
(degree + 1) * number of actions
Returns
-------
int
The size of the phi matrix that will be returned from evaluate.
Example
-------
>>> basis = OneDimensionalPolynomialBasis(2, 2)
>>> basis.size()
6
"""
return (self.degree + 1) * self.num_actions
def evaluate(self, state, action):
r"""Calculate :math:`\phi` matrix for given state action pair.
The :math:`\phi` matrix is used to calculate the Q function for the
given policy.
Parameters
----------
state : numpy.array
The state to get the features for.
When calculating Q(s, a) this is the s.
action : int
The action index to get the features for.
When calculating Q(s, a) this is the a.
Returns
-------
numpy.array
The :math:`\phi` vector. Used by Policy to compute Q-value.
Raises
------
IndexError
If :math:`0 \le action < num\_actions` then IndexError is raised.
ValueError
If the state vector has any number of dimensions other than 1 a
ValueError is raised.
Example
-------
>>> basis = OneDimensionalPolynomialBasis(2, 2)
>>> basis.evaluate(np.array([2]), 0)
array([ 1., 2., 4., 0., 0., 0.])
"""
if action < 0 or action >= self.num_actions:
raise IndexError('Action index out of bounds')
if state.shape != (1, ):
raise ValueError('This class only supports one dimensional states')
phi = np.zeros((self.size(), ))
offset = (self.size()/self.num_actions)*action
value = state[0]
phi[offset:offset + self.degree + 1] = \
np.array([pow(value, i) for i in range(self.degree+1)])
return phi
@property
def num_actions(self):
"""Return number of possible actions."""
return self.__num_actions
@num_actions.setter
def num_actions(self, value):
"""Set the number of possible actions.
Parameters
----------
value: int
Number of possible actions. Must be >= 1.
Raises
------
ValueError
If value < 1.
"""
if value < 1:
raise ValueError('num_actions must be at least 1.')
self.__num_actions = value
class RadialBasisFunction(BasisFunction):
r"""Gaussian Multidimensional Radial Basis Function (RBF).
Given a set of k means :math:`(\mu_1 , \ldots, \mu_k)` produce a feature
vector :math:`(1, e^{-\gamma || s - \mu_1 ||^2}, \cdots,
e^{-\gamma || s - \mu_k ||^2})` where `s` is the state vector and
:math:`\gamma` is a free parameter. This vector will be padded with
0's on both sides proportional to the number of possible actions
specified.
Parameters
----------
means: list(numpy.array)
List of numpy arrays representing :math:`(\mu_1, \ldots, \mu_k)`.
Each :math:`\mu` is a numpy array with dimensions matching the state
vector this basis function will be used with. If the dimensions of each
vector are not equal than an exception will be raised. If no means are
specified then a ValueError will be raised
gamma: float
Free parameter which controls the size/spread of the Gaussian "bumps".
This parameter is best selected via tuning through cross validation.
gamma must be > 0.
num_actions: int
Number of actions. Must be in range [1, :math:`\infty`] otherwise
an exception will be raised.
Raises
------
ValueError
If means list is empty
ValueError
If dimensions of each mean vector do not match.
ValueError
If gamma is <= 0.
ValueError
If num_actions is less than 1.
Note
----
The numpy arrays specifying the means are not copied.
"""
def __init__(self, means, gamma, num_actions):
"""Initialize RBF instance."""
self.__num_actions = BasisFunction._validate_num_actions(num_actions)
if len(means) == 0:
raise ValueError('You must specify at least one mean')
if reduce(RadialBasisFunction.__check_mean_size, means) is None:
raise ValueError('All mean vectors must have the same dimensions')
self.means = means
if gamma <= 0:
raise ValueError('gamma must be > 0')
self.gamma = gamma
@staticmethod
def __check_mean_size(left, right):
"""Apply f if the value is not None.
This method is meant to be used with reduce. It will return either the
right most numpy array or None if any of the array's had
differing sizes. I wanted to use a Maybe monad here,
but Python doesn't support that out of the box.
Return
------
None or numpy.array
None values will propogate through the reduce automatically.
"""
if left is None or right is None:
return None
else:
if left.shape != right.shape:
return None
return right
def size(self):
r"""Calculate size of the :math:`\phi` matrix.
The size is equal to the number of means + 1 times the number of
number actions.
Returns
-------
int
The size of the phi matrix that will be returned from evaluate.
"""
return (len(self.means) + 1) * self.num_actions
def evaluate(self, state, action):
r"""Calculate the :math:`\phi` matrix.
Matrix will have the following form:
:math:`[\cdots, 1, e^{-\gamma || s - \mu_1 ||^2}, \cdots,
e^{-\gamma || s - \mu_k ||^2}, \cdots]`
where the matrix will be padded with 0's on either side depending
on the specified action index and the number of possible actions.
Returns
-------
numpy.array
The :math:`\phi` vector. Used by Policy to compute Q-value.
Raises
------
IndexError
If :math:`0 \le action < num\_actions` then IndexError is raised.
ValueError
If the state vector has any number of dimensions other than 1 a
ValueError is raised.
"""
if action < 0 or action >= self.num_actions:
raise IndexError('Action index out of bounds')
if state.shape != self.means[0].shape:
raise ValueError('Dimensions of state must match '
'dimensions of means')
phi = np.zeros((self.size(), ))
offset = (len(self.means[0])+1)*action
rbf = [RadialBasisFunction.__calc_basis_component(state,
mean,
self.gamma)
for mean in self.means]
phi[offset] = 1.
phi[offset+1:offset+1+len(rbf)] = rbf
return phi
@staticmethod
def __calc_basis_component(state, mean, gamma):
mean_diff = state - mean
return np.exp(-gamma*np.sum(mean_diff*mean_diff))
@property
def num_actions(self):
"""Return number of possible actions."""
return self.__num_actions
@num_actions.setter
def num_actions(self, value):
"""Set the number of possible actions.
Parameters
----------
value: int
Number of possible actions. Must be >= 1.
Raises
------
ValueError
If value < 1.
"""
if value < 1:
raise ValueError('num_actions must be at least 1.')
self.__num_actions = value
class ExactBasis(BasisFunction):
"""Basis function with no functional approximation.
This can only be used in domains with finite, discrete state-spaces. For
example the Chain domain from the LSPI paper would work with this basis,
but the inverted pendulum domain would not.
Parameters
----------
num_states: list
A list containing integers representing the number of possible values
for each state variable.
num_actions: int
Number of possible actions.
"""
def __init__(self, num_states, num_actions):
"""Initialize ExactBasis."""
if len(np.where(num_states <= 0)[0]) != 0:
raise ValueError('num_states value\'s must be > 0')
self.__num_actions = BasisFunction._validate_num_actions(num_actions)
self._num_states = num_states
self._offsets = [1]
for i in range(1, len(num_states)):
self._offsets.append(self._offsets[-1]*num_states[i-1])
def size(self):
r"""Return the vector size of the basis function.
Returns
-------
int
The size of the :math:`\phi` vector.
(Referred to as k in the paper).
"""
return reduce(lambda x, y: x*y, self._num_states, 1)*self.__num_actions
def get_state_action_index(self, state, action):
"""Return the non-zero index of the basis.
Parameters
----------
state: numpy.array
The state to get the index for.
action: int
The state to get the index for.
Returns
-------
int
The non-zero index of the basis
Raises
------
IndexError
If action index < 0 or action index > num_actions
"""
if action < 0:
raise IndexError('action index must be >= 0')
if action >= self.num_actions:
raise IndexError('action must be < num_actions')
base = action * int(self.size() / self.__num_actions)
offset = 0
for i, value in enumerate(state):
offset += self._offsets[i] * state[i]
return base + offset
def evaluate(self, state, action):
r"""Return a :math:`\phi` vector that has a single non-zero value.
Parameters
----------
state: numpy.array
The state to get the features for. When calculating Q(s, a) this is
the s.
action: int
The action index to get the features for.
When calculating Q(s, a) this is the a.
Returns
-------
numpy.array
:math:`\phi` vector
Raises
------
IndexError
If action index < 0 or action index > num_actions
ValueError
If the size of the state does not match the the size of the
num_states list used during construction.
ValueError
If any of the state variables are < 0 or >= the corresponding
value in the num_states list used during construction.
"""
if len(state) != len(self._num_states):
raise ValueError('Number of state variables must match '
+ 'size of num_states.')
if len(np.where(state < 0)[0]) != 0:
raise ValueError('state cannot contain negative values.')
for state_var, num_state_values in zip(state, self._num_states):
if state_var >= num_state_values:
raise ValueError('state values must be <= corresponding '
+ 'num_states value.')
phi = np.zeros(self.size())
phi[self.get_state_action_index(state, action)] = 1
return phi
@property
def num_actions(self):
"""Return number of possible actions."""
return self.__num_actions
@num_actions.setter
def num_actions(self, value):
"""Set the number of possible actions.
Parameters
----------
value: int
Number of possible actions. Must be >= 1.
Raises
------
ValueError
if value < 1.
"""
if value < 1:
raise ValueError('num_actions must be at least 1.')
self.__num_actions = value
| |
from __future__ import absolute_import, unicode_literals
from future.builtins import filter, int, range, str, super, zip
from future.utils import with_metaclass
from copy import copy
from datetime import date
from itertools import dropwhile, takewhile
from locale import localeconv
from re import match
from django import forms
from django.forms.models import BaseInlineFormSet, ModelFormMetaclass
from django.forms.models import inlineformset_factory
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
from cartridge.shop import checkout
from cartridge.shop.models import Product, ProductOption, ProductVariation
from cartridge.shop.models import Cart, CartItem, Order, DiscountCode
from cartridge.shop.utils import (make_choices, set_locale, set_shipping,
clear_session)
ADD_PRODUCT_ERRORS = {
"invalid_options": _("The selected options are currently unavailable."),
"no_stock": _("The selected options are currently not in stock."),
"no_stock_quantity": _("The selected quantity is currently unavailable."),
}
class AddProductForm(forms.Form):
"""
A form for adding the given product to the cart or the
wishlist.
"""
quantity = forms.IntegerField(label=_("Quantity"), min_value=1)
sku = forms.CharField(required=False, widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
"""
Handles adding a variation to the cart or wishlist.
When adding from the product page, the product is provided
from the view and a set of choice fields for all the
product options for this product's variations are added to
the form. When the form is validated, the selected options
are used to determine the chosen variation.
A ``to_cart`` boolean keyword arg is also given specifying
whether the product is being added to a cart or wishlist.
If a product is being added to the cart, then its stock
level is also validated.
When adding to the cart from the wishlist page, a sku is
given for the variation, so the creation of choice fields
is skipped.
"""
self._product = kwargs.pop("product", None)
self._to_cart = kwargs.pop("to_cart")
super(AddProductForm, self).__init__(*args, **kwargs)
# Adding from the wishlist with a sku, bail out.
if args[0] is not None and args[0].get("sku", None):
return
# Adding from the product page, remove the sku field
# and build the choice fields for the variations.
del self.fields["sku"]
option_fields = ProductVariation.option_fields()
if not option_fields:
return
option_names, option_labels = list(zip(*[(f.name, f.verbose_name)
for f in option_fields]))
option_values = list(zip(*self._product.variations.filter(
unit_price__isnull=False).values_list(*option_names)))
if option_values:
for i, name in enumerate(option_names):
values = [_f for _f in set(option_values[i]) if _f]
if values:
field = forms.ChoiceField(label=option_labels[i],
choices=make_choices(values))
self.fields[name] = field
def clean(self):
"""
Determine the chosen variation, validate it and assign it as
an attribute to be used in views.
"""
if not self.is_valid():
return
# Posted data will either be a sku, or product options for
# a variation.
data = self.cleaned_data.copy()
quantity = data.pop("quantity")
# Ensure the product has a price if adding to cart.
if self._to_cart:
data["unit_price__isnull"] = False
error = None
if self._product is not None:
# Chosen options will be passed to the product's
# variations.
qs = self._product.variations
else:
# A product hasn't been given since we have a direct sku.
qs = ProductVariation.objects
try:
variation = qs.get(**data)
except ProductVariation.DoesNotExist:
error = "invalid_options"
else:
# Validate stock if adding to cart.
if self._to_cart:
if not variation.has_stock():
error = "no_stock"
elif not variation.has_stock(quantity):
error = "no_stock_quantity"
if error is not None:
raise forms.ValidationError(ADD_PRODUCT_ERRORS[error])
self.variation = variation
return self.cleaned_data
class CartItemForm(forms.ModelForm):
"""
Model form for each item in the cart - used for the
``CartItemFormSet`` below which controls editing the entire cart.
"""
class Meta:
model = CartItem
fields = ("quantity",)
def clean_quantity(self):
"""
Validate that the given quantity is available.
"""
variation = ProductVariation.objects.get(sku=self.instance.sku)
quantity = self.cleaned_data["quantity"]
if not variation.has_stock(quantity - self.instance.quantity):
error = ADD_PRODUCT_ERRORS["no_stock_quantity"].rstrip(".")
raise forms.ValidationError("%s: %s" % (error, quantity))
return quantity
CartItemFormSet = inlineformset_factory(Cart, CartItem, form=CartItemForm,
can_delete=True, extra=0)
class FormsetForm(object):
"""
Form mixin that provides template methods for iterating through
sets of fields by prefix, single fields and finally remaning
fields that haven't been, iterated with each fieldset made up from
a copy of the original form, giving access to as_* methods.
The use case for this is ``OrderForm`` below. It contains a
handful of fields named with the prefixes ``billing_detail_XXX``
and ``shipping_detail_XXX``. Using ``FormsetForm`` we can then
group these into fieldsets in our templates::
<!-- Fields prefixed with "billing_detail_" -->
<fieldset>{{ form.billing_detail_fields.as_p }}</fieldset>
<!-- Fields prefixed with "shipping_detail_" -->
<fieldset>{{ form.shipping_detail_fields.as_p }}</fieldset>
<!-- All remaining fields -->
<fieldset>{{ form.other_fields.as_p }}</fieldset>
Some other helpers exist for use with an individual field name:
- ``XXX_field`` returns a fieldset containing the field named XXX
- ``fields_before_XXX`` returns a fieldset with all fields before
the field named XXX
- ``fields_after_XXX`` returns a fieldset with all fields after
the field named XXX
"""
def _fieldset(self, field_names):
"""
Return a subset of fields by making a copy of the form
containing only the given field names.
"""
fieldset = copy(self)
if not hasattr(self, "_fields_done"):
self._fields_done = []
fieldset.non_field_errors = lambda *args: None
names = [f for f in field_names if f not in self._fields_done]
fieldset.fields = SortedDict([(f, self.fields[f]) for f in names])
self._fields_done.extend(names)
return fieldset
def values(self):
"""
Return pairs of label and value for each field.
"""
for field in self.fields:
label = self.fields[field].label
if label is None:
label = field[0].upper() + field[1:].replace("_", " ")
yield (label, self.initial.get(field, self.data.get(field, "")))
def __getattr__(self, name):
"""
Dynamic fieldset caller - matches requested attribute name
against pattern for creating the list of field names to use
for the fieldset.
"""
if name == "errors":
return None
filters = (
("^other_fields$", lambda:
self.fields.keys()),
("^hidden_fields$", lambda:
[n for n, f in self.fields.items()
if isinstance(f.widget, forms.HiddenInput)]),
("^(\w*)_fields$", lambda name:
[f for f in self.fields.keys() if f.startswith(name)]),
("^(\w*)_field$", lambda name:
[f for f in self.fields.keys() if f == name]),
("^fields_before_(\w*)$", lambda name:
takewhile(lambda f: f != name, self.fields.keys())),
("^fields_after_(\w*)$", lambda name:
dropwhile(lambda f: f != name, self.fields.keys())[1:]),
)
for filter_exp, filter_func in filters:
filter_args = match(filter_exp, name)
if filter_args is not None:
return self._fieldset(filter_func(*filter_args.groups()))
raise AttributeError(name)
class DiscountForm(forms.ModelForm):
class Meta:
model = Order
fields = ("discount_code",)
def __init__(self, request, data=None, initial=None):
"""
Store the request so that it can be used to retrieve the cart
which is required to validate the discount code when entered.
"""
super(DiscountForm, self).__init__(data=data, initial=initial)
self._request = request
def clean_discount_code(self):
"""
Validate the discount code if given, and attach the discount
instance to the form.
"""
code = self.cleaned_data.get("discount_code", "")
cart = self._request.cart
if code:
try:
discount = DiscountCode.objects.get_valid(code=code, cart=cart)
self._discount = discount
except DiscountCode.DoesNotExist:
error = _("The discount code entered is invalid.")
raise forms.ValidationError(error)
return code
def set_discount(self):
"""
Assigns the session variables for the discount.
"""
discount = getattr(self, "_discount", None)
if discount is not None:
# Clear out any previously defined discount code
# session vars.
names = ("free_shipping", "discount_code", "discount_total")
clear_session(self._request, *names)
total = self._request.cart.calculate_discount(discount)
if discount.free_shipping:
set_shipping(self._request, _("Free shipping"), 0)
else:
# A previously entered discount code providing free
# shipping may have been entered prior to this
# discount code beign entered, so clear out any
# previously set shipping vars.
clear_session(self._request, "shipping_type", "shipping_total")
self._request.session["free_shipping"] = discount.free_shipping
self._request.session["discount_code"] = discount.code
self._request.session["discount_total"] = str(total)
class OrderForm(FormsetForm, DiscountForm):
"""
Main Form for the checkout process - ModelForm for the Order Model
with extra fields for credit card. Used across each step of the
checkout process with fields being hidden where applicable.
"""
step = forms.IntegerField(widget=forms.HiddenInput())
same_billing_shipping = forms.BooleanField(required=False, initial=True,
label=_("My delivery details are the same as my billing details"))
remember = forms.BooleanField(required=False, initial=True,
label=_("Remember my address for next time"))
card_name = forms.CharField(label=_("Cardholder name"))
card_type = forms.ChoiceField(label=_("Card type"),
widget=forms.RadioSelect,
choices=make_choices(settings.SHOP_CARD_TYPES))
card_number = forms.CharField(label=_("Card number"))
card_expiry_month = forms.ChoiceField(label=_("Card expiry month"),
initial="%02d" % date.today().month,
choices=make_choices(["%02d" % i for i in range(1, 13)]))
card_expiry_year = forms.ChoiceField(label=_("Card expiry year"))
card_ccv = forms.CharField(label=_("CCV"), help_text=_("A security code, "
"usually the last 3 digits found on the back of your card."))
class Meta:
model = Order
fields = ([f.name for f in Order._meta.fields if
f.name.startswith("billing_detail") or
f.name.startswith("shipping_detail")] +
["additional_instructions", "discount_code"])
def __init__(self, request, step, data=None, initial=None, errors=None):
"""
Setup for each order form step which does a few things:
- Calls OrderForm.preprocess on posted data
- Sets up any custom checkout errors
- Hides the discount code field if applicable
- Hides sets of fields based on the checkout step
- Sets year choices for cc expiry field based on current date
"""
# ``data`` is usually the POST attribute of a Request object,
# which is an immutable QueryDict. We want to modify it, so we
# need to make a copy.
data = copy(data)
# Force the specified step in the posted data, which is
# required to allow moving backwards in steps. Also handle any
# data pre-processing, which subclasses may override.
if data is not None:
data["step"] = step
data = self.preprocess(data)
if initial is not None:
initial["step"] = step
super(OrderForm, self).__init__(request, data=data, initial=initial)
self._checkout_errors = errors
# Hide discount code field if it shouldn't appear in checkout,
# or if no discount codes are active.
settings.use_editable()
if not (settings.SHOP_DISCOUNT_FIELD_IN_CHECKOUT and
DiscountCode.objects.active().exists()):
self.fields["discount_code"].widget = forms.HiddenInput()
# Determine which sets of fields to hide for each checkout step.
# A ``hidden_filter`` function is defined that's used for
# filtering out the fields to hide.
is_first_step = step == checkout.CHECKOUT_STEP_FIRST
is_last_step = step == checkout.CHECKOUT_STEP_LAST
is_payment_step = step == checkout.CHECKOUT_STEP_PAYMENT
hidden_filter = lambda f: False
if settings.SHOP_CHECKOUT_STEPS_SPLIT:
if is_first_step:
# Hide cc fields for billing/shipping if steps are split.
hidden_filter = lambda f: f.startswith("card_")
elif is_payment_step:
# Hide non-cc fields for payment if steps are split.
hidden_filter = lambda f: not f.startswith("card_")
elif not settings.SHOP_PAYMENT_STEP_ENABLED:
# Hide all cc fields if payment step is not enabled.
hidden_filter = lambda f: f.startswith("card_")
if settings.SHOP_CHECKOUT_STEPS_CONFIRMATION and is_last_step:
# Hide all fields for the confirmation step.
hidden_filter = lambda f: True
for field in filter(hidden_filter, self.fields):
self.fields[field].widget = forms.HiddenInput()
self.fields[field].required = False
# Set year choices for cc expiry, relative to the current year.
year = now().year
choices = make_choices(list(range(year, year + 21)))
self.fields["card_expiry_year"].choices = choices
@classmethod
def preprocess(cls, data):
"""
A preprocessor for the order form data that can be overridden
by custom form classes. The default preprocessor here handles
copying billing fields to shipping fields if "same" checked.
"""
if data.get("same_billing_shipping", "") == "on":
for field in data:
bill_field = field.replace("shipping_detail", "billing_detail")
if field.startswith("shipping_detail") and bill_field in data:
data[field] = data[bill_field]
return data
def clean_card_expiry_year(self):
"""
Ensure the card expiry doesn't occur in the past.
"""
try:
month = int(self.cleaned_data["card_expiry_month"])
year = int(self.cleaned_data["card_expiry_year"])
except ValueError:
# Haven't reached payment step yet.
return
n = now()
if year == n.year and month < n.month:
raise forms.ValidationError(_("A valid expiry date is required."))
return str(year)
def clean(self):
"""
Raise ``ValidationError`` if any errors have been assigned
externally, via one of the custom checkout step handlers.
"""
if self._checkout_errors:
raise forms.ValidationError(self._checkout_errors)
return super(OrderForm, self).clean()
#######################
# ADMIN WIDGETS #
#######################
class ImageWidget(forms.FileInput):
"""
Render a visible thumbnail for image fields.
"""
def render(self, name, value, attrs):
rendered = super(ImageWidget, self).render(name, value, attrs)
if value:
orig = u"%s%s" % (settings.MEDIA_URL, value)
thumb = u"%s%s" % (settings.MEDIA_URL, thumbnail(value, 48, 48))
rendered = (u"<a target='_blank' href='%s'>"
u"<img style='margin-right:6px;' src='%s'>"
u"</a>%s" % (orig, thumb, rendered))
return mark_safe(rendered)
class MoneyWidget(forms.TextInput):
"""
Render missing decimal places for money fields.
"""
def render(self, name, value, attrs):
try:
value = float(value)
except (TypeError, ValueError):
pass
else:
set_locale()
value = ("%%.%sf" % localeconv()["frac_digits"]) % value
attrs["style"] = "text-align:right;"
return super(MoneyWidget, self).render(name, value, attrs)
class ProductAdminFormMetaclass(ModelFormMetaclass):
"""
Metaclass for the Product Admin form that dynamically assigns each
of the types of product options as sets of checkboxes for selecting
which options to use when creating new product variations.
"""
def __new__(cls, name, bases, attrs):
for option in settings.SHOP_OPTION_TYPE_CHOICES:
field = forms.MultipleChoiceField(label=option[1],
required=False, widget=forms.CheckboxSelectMultiple)
attrs["option%s" % option[0]] = field
args = (cls, name, bases, attrs)
return super(ProductAdminFormMetaclass, cls).__new__(*args)
class ProductAdminForm(with_metaclass(ProductAdminFormMetaclass,
forms.ModelForm)):
"""
Admin form for the Product model.
"""
class Meta:
model = Product
def __init__(self, *args, **kwargs):
"""
Set the choices for each of the fields for product options.
Also remove the current instance from choices for related and
upsell products (if enabled).
"""
super(ProductAdminForm, self).__init__(*args, **kwargs)
for field, options in list(ProductOption.objects.as_fields().items()):
self.fields[field].choices = make_choices(options)
instance = kwargs.get("instance")
if instance:
queryset = Product.objects.exclude(id=instance.id)
if settings.SHOP_USE_RELATED_PRODUCTS:
self.fields["related_products"].queryset = queryset
if settings.SHOP_USE_UPSELL_PRODUCTS:
self.fields["upsell_products"].queryset = queryset
class ProductVariationAdminForm(forms.ModelForm):
"""
Ensure the list of images for the variation are specific to the
variation's product.
"""
def __init__(self, *args, **kwargs):
super(ProductVariationAdminForm, self).__init__(*args, **kwargs)
if "instance" in kwargs:
product = kwargs["instance"].product
qs = self.fields["image"].queryset.filter(product=product)
self.fields["image"].queryset = qs
class ProductVariationAdminFormset(BaseInlineFormSet):
"""
Ensure no more than one variation is checked as default.
"""
def clean(self):
super(ProductVariationAdminFormset, self).clean()
if len([f for f in self.forms if hasattr(f, "cleaned_data") and
f.cleaned_data.get("default", False)]) > 1:
error = _("Only one variation can be checked as the default.")
raise forms.ValidationError(error)
class DiscountAdminForm(forms.ModelForm):
"""
Ensure only one discount field is given a value and if not, assign
the error to the first discount field so that it displays correctly.
"""
def clean(self):
fields = [f for f in self.fields if f.startswith("discount_")]
reductions = [self.cleaned_data.get(f) for f in fields
if self.cleaned_data.get(f)]
if len(reductions) > 1:
error = _("Please enter a value for only one type of reduction.")
self._errors[fields[0]] = self.error_class([error])
return super(DiscountAdminForm, self).clean()
| |
# -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT1(t):
r'(/\*(.|\n)*?\*/)'
ncr = t.value.count("\n")
t.lexer.lineno += ncr
# replace with one space or a number of '\n'
t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
return t
# Line comment
def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_Type.Type("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except StandardError:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
# insert necessary whitespace instead of eaten tokens
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,(str,unicode)):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
| |
"""An implementation of the Python Database API Specification v2.0
using Teradata ODBC."""
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import ctypes
import logging
import threading
import atexit
import platform
import re
import collections
from . import util, datatypes
from .api import * # @UnusedWildImport # noqa
logger = logging.getLogger(__name__)
# ODBC Constants
SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC2, SQL_OV_ODBC3 = 200, 2, 3
SQL_ATTR_QUERY_TIMEOUT, SQL_ATTR_AUTOCOMMIT = 0, 102
SQL_NULL_HANDLE, SQL_HANDLE_ENV, SQL_HANDLE_DBC, SQL_HANDLE_STMT = 0, 1, 2, 3
SQL_SUCCESS, SQL_SUCCESS_WITH_INFO = 0, 1,
SQL_ERROR, SQL_INVALID_HANDLE = -1, -2
SQL_NEED_DATA, SQL_NO_DATA = 99, 100
SQL_CLOSE, SQL_UNBIND, SQL_RESET_PARAMS = 0, 2, 3
SQL_PARAM_TYPE_UNKNOWN = 0
SQL_PARAM_INPUT, SQL_PARAM_INPUT_OUTPUT, SQL_PARAM_OUTPUT = 1, 2, 4
SQL_ATTR_PARAM_BIND_TYPE = 18
SQL_ATTR_PARAMS_PROCESSED_PTR, SQL_ATTR_PARAM_STATUS_PTR = 21, 20
SQL_ATTR_PARAMSET_SIZE = 22
SQL_PARAM_BIND_BY_COLUMN = 0
SQL_NULL_DATA, SQL_NTS = -1, -3
SQL_IS_POINTER, SQL_IS_UINTEGER, SQL_IS_INTEGER = -4, -5, -6
SQL_C_BINARY, SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY = -2, -2, -3, -4
SQL_C_WCHAR, SQL_WVARCHAR, SQL_WLONGVARCHAR = -8, -9, -10
SQL_FLOAT = 6
SQL_C_FLOAT = SQL_REAL = 7
SQL_C_DOUBLE = SQL_DOUBLE = 8
SQL_DESC_TYPE_NAME = 14
SQL_COMMIT, SQL_ROLLBACK = 0, 1
SQL_STATE_DATA_TRUNCATED = '01004'
SQL_STATE_CONNECTION_NOT_OPEN = '08003'
SQL_STATE_INVALID_TRANSACTION_STATE = '25000'
SQLLEN = ctypes.c_ssize_t
SQLULEN = ctypes.c_size_t
SQLUSMALLINT = ctypes.c_ushort
SQLSMALLINT = ctypes.c_short
SQLINTEGER = ctypes.c_int
SQLFLOAT = ctypes.c_float
SQLDOUBLE = ctypes.c_double
SQLBYTE = ctypes.c_ubyte
SQLWCHAR = ctypes.c_wchar
SQLRETURN = SQLSMALLINT
SQLPOINTER = ctypes.c_void_p
SQLHANDLE = ctypes.c_void_p
ADDR = ctypes.byref
PTR = ctypes.POINTER
SMALL_BUFFER_SIZE = 2 ** 12
LARGE_BUFFER_SIZE = 2 ** 20
TRUE = 1
FALSE = 0
odbc = None
hEnv = None
lock = threading.Lock()
pyVer = sys.version_info[0]
osType = platform.system()
# The amount of seconds to wait when submitting non-user defined SQL (e.g.
# set query bands, etc).
QUERY_TIMEOUT = 120
if pyVer > 2:
unicode = str # @ReservedAssignment
if osType == "Darwin" or osType == "Windows":
# Mac OSx and Windows
_createBuffer = lambda l: ctypes.create_unicode_buffer(l)
_inputStr = lambda s, l = None: None if s is None else \
ctypes.create_unicode_buffer((s if util.isString(s) else str(s)), l)
_outputStr = lambda s: s.value
_convertParam = lambda s: None if s is None else (
s if util.isString(s) else str(s))
else:
# Unix/Linux
_createBuffer = lambda l: ctypes.create_string_buffer(l)
_inputStr = lambda s, l = None: None if s is None else \
ctypes.create_string_buffer((s if util.isString(s) else str(s)).encode(
'utf8'), l)
_outputStr = lambda s: unicode(s.raw.partition(b'\00')[0], 'utf8')
_convertParam = lambda s: None if s is None else (
(s if util.isString(s) else str(s)).encode('utf8'))
SQLWCHAR = ctypes.c_char
connections = []
def cleanupConnections():
"""Cleanup open connections."""
if connections:
logger.warn(
"%s open connections found on exit, attempting to close...",
len(connections))
for conn in list(connections):
conn.close()
def getDiagnosticInfo(handle, handleType=SQL_HANDLE_STMT):
"""Gets diagnostic information associated with ODBC calls, particularly
when errors occur."""
info = []
infoNumber = 1
sqlState = _createBuffer(6)
nativeError = SQLINTEGER()
messageBuffer = _createBuffer(SMALL_BUFFER_SIZE)
messageLength = SQLSMALLINT()
while True:
rc = odbc.SQLGetDiagRecW(handleType, handle, infoNumber, sqlState,
ADDR(nativeError), messageBuffer,
len(messageBuffer), ADDR(messageLength))
if rc == SQL_SUCCESS_WITH_INFO and \
messageLength.value > ctypes.sizeof(messageBuffer):
# Resize buffer to fit entire message.
messageBuffer = _createBuffer(messageLength.value)
continue
if rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO:
info.append(
(_outputStr(sqlState), _outputStr(messageBuffer),
abs(nativeError.value)))
infoNumber += 1
elif rc == SQL_NO_DATA:
return info
elif rc == SQL_INVALID_HANDLE:
raise InterfaceError(
'SQL_INVALID_HANDLE',
"Invalid handle passed to SQLGetDiagRecW.")
elif rc == SQL_ERROR:
raise InterfaceError(
"SQL_ERROR", "SQL_ERROR returned from SQLGetDiagRecW.")
else:
raise InterfaceError(
"UNKNOWN_RETURN_CODE",
"SQLGetDiagRecW returned an unknown return code: %s", rc)
def checkStatus(rc, hEnv=SQL_NULL_HANDLE, hDbc=SQL_NULL_HANDLE,
hStmt=SQL_NULL_HANDLE, method="Method", ignore=None):
""" Check return status code and log any information or error messages.
If error is returned, raise exception."""
sqlState = []
logger.trace("%s returned status code %s", method, rc)
if rc not in (SQL_SUCCESS, SQL_NO_DATA):
if hStmt != SQL_NULL_HANDLE:
info = getDiagnosticInfo(hStmt, SQL_HANDLE_STMT)
elif hDbc != SQL_NULL_HANDLE:
info = getDiagnosticInfo(hDbc, SQL_HANDLE_DBC)
else:
info = getDiagnosticInfo(hEnv, SQL_HANDLE_ENV)
for i in info:
sqlState.append(i[0])
if rc == SQL_SUCCESS_WITH_INFO:
logger.debug(
u"{} succeeded with info: [{}] {}".format(method,
i[0], i[1]))
elif not ignore or i[0] not in ignore:
logger.debug((u"{} returned non-successful error code "
u"{}: [{}] {}").format(method, rc, i[0], i[1]))
raise DatabaseError(i[2], u"[{}] {}".format(i[0], i[1]), i[0])
else:
logger.debug(
u"Ignoring return of {} from {}: [{}] {}".format(rc,
method,
i[0],
i[1]))
# Breaking here because this error is ignored and info could
# contain older error messages.
# E.g. if error was SQL_STATE_CONNECTION_NOT_OPEN, the next
# error would be the original connection error.
break
if not info:
logger.info(
"No information associated with return code %s from %s",
rc, method)
return sqlState
def prototype(func, *args):
"""Setup function prototype"""
func.restype = SQLRETURN
func.argtypes = args
def initFunctionPrototypes():
"""Initialize function prototypes for ODBC calls."""
prototype(odbc.SQLAllocHandle, SQLSMALLINT, SQLHANDLE, PTR(SQLHANDLE))
prototype(odbc.SQLGetDiagRecW, SQLSMALLINT, SQLHANDLE, SQLSMALLINT,
PTR(SQLWCHAR), PTR(SQLINTEGER), PTR(SQLWCHAR), SQLSMALLINT,
PTR(SQLSMALLINT))
prototype(odbc.SQLSetEnvAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLDriverConnectW, SQLHANDLE, SQLHANDLE,
PTR(SQLWCHAR), SQLSMALLINT, PTR(SQLWCHAR), SQLSMALLINT,
PTR(SQLSMALLINT), SQLUSMALLINT)
prototype(odbc.SQLFreeHandle, SQLSMALLINT, SQLHANDLE)
prototype(odbc.SQLExecDirectW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER)
prototype(odbc.SQLNumResultCols, SQLHANDLE, PTR(SQLSMALLINT))
prototype(odbc.SQLDescribeColW, SQLHANDLE, SQLUSMALLINT, PTR(SQLWCHAR),
SQLSMALLINT, PTR(SQLSMALLINT), PTR(SQLSMALLINT), PTR(SQLULEN),
PTR(SQLSMALLINT), PTR(SQLSMALLINT))
prototype(odbc.SQLColAttributeW, SQLHANDLE, SQLUSMALLINT,
SQLUSMALLINT, SQLPOINTER, SQLSMALLINT, PTR(SQLSMALLINT),
PTR(SQLLEN))
prototype(odbc.SQLFetch, SQLHANDLE)
prototype(odbc.SQLGetData, SQLHANDLE, SQLUSMALLINT,
SQLSMALLINT, SQLPOINTER, SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLFreeStmt, SQLHANDLE, SQLUSMALLINT)
prototype(odbc.SQLPrepareW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER)
prototype(odbc.SQLNumParams, SQLHANDLE, PTR(SQLSMALLINT))
prototype(odbc.SQLDescribeParam, SQLHANDLE, SQLUSMALLINT, PTR(
SQLSMALLINT), PTR(SQLULEN), PTR(SQLSMALLINT), PTR(SQLSMALLINT))
prototype(odbc.SQLBindParameter, SQLHANDLE, SQLUSMALLINT, SQLSMALLINT,
SQLSMALLINT, SQLSMALLINT, SQLULEN, SQLSMALLINT, SQLPOINTER,
SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLExecute, SQLHANDLE)
prototype(odbc.SQLSetStmtAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLMoreResults, SQLHANDLE)
prototype(odbc.SQLDisconnect, SQLHANDLE)
prototype(odbc.SQLSetConnectAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLEndTran, SQLSMALLINT, SQLHANDLE, SQLSMALLINT)
prototype(odbc.SQLRowCount, SQLHANDLE, PTR(SQLLEN))
def initOdbcLibrary(odbcLibPath=None):
"""Initialize the ODBC Library."""
global odbc
if odbc is None:
if osType == "Windows":
odbc = ctypes.windll.odbc32
else:
if not odbcLibPath:
# If MAC OSx
if osType == "Darwin":
odbcLibPath = "libiodbc.dylib"
else:
odbcLibPath = 'libodbc.so'
logger.info("Loading ODBC Library: %s", odbcLibPath)
odbc = ctypes.cdll.LoadLibrary(odbcLibPath)
def initOdbcEnv():
"""Initialize ODBC environment handle."""
global hEnv
if hEnv is None:
hEnv = SQLPOINTER()
rc = odbc.SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, ADDR(hEnv))
checkStatus(rc, hEnv=hEnv)
atexit.register(cleanupOdbcEnv)
atexit.register(cleanupConnections)
# Set the ODBC environment's compatibility level to ODBC 3.0
rc = odbc.SQLSetEnvAttr(hEnv, SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC3, 0)
checkStatus(rc, hEnv=hEnv)
def cleanupOdbcEnv():
"""Cleanup ODBC environment handle."""
if hEnv:
odbc.SQLFreeHandle(SQL_HANDLE_ENV, hEnv)
def init(odbcLibPath=None):
try:
lock.acquire()
initOdbcLibrary(odbcLibPath)
initFunctionPrototypes()
initOdbcEnv()
finally:
lock.release()
class OdbcConnection:
"""Represents a Connection to Teradata using ODBC."""
def __init__(self, dbType="Teradata", system=None,
username=None, password=None, autoCommit=False,
transactionMode=None, queryBands=None, odbcLibPath=None,
dataTypeConverter=datatypes.DefaultDataTypeConverter(),
**kwargs):
"""Creates an ODBC connection."""
self.hDbc = SQLPOINTER()
self.cursorCount = 0
self.sessionno = 0
self.cursors = []
self.dbType = dbType
self.converter = dataTypeConverter
connections.append(self)
# Build connect string
extraParams = set(k.lower() for k in kwargs)
connectParams = collections.OrderedDict()
if "dsn" not in extraParams:
connectParams["DRIVER"] = dbType
if system:
connectParams["DBCNAME"] = system
if username:
connectParams["UID"] = username
if password:
connectParams["PWD"] = password
if transactionMode:
connectParams["SESSIONMODE"] = "Teradata" \
if transactionMode == "TERA" else transactionMode
connectParams.update(kwargs)
connectString = u";".join(u"{}={}".format(key, value)
for key, value in connectParams.items())
# Initialize connection handle
init(odbcLibPath)
rc = odbc.SQLAllocHandle(SQL_HANDLE_DBC, hEnv, ADDR(self.hDbc))
checkStatus(rc, hEnv=hEnv, method="SQLAllocHandle")
# Create connection
logger.debug("Creating connection using ODBC ConnectString: %s",
re.sub("PWD=.*?(;|$)", "PWD=XXX;", connectString))
try:
lock.acquire()
rc = odbc.SQLDriverConnectW(self.hDbc, 0, _inputStr(connectString),
SQL_NTS, None, 0, None, 0)
finally:
lock.release()
checkStatus(rc, hDbc=self.hDbc, method="SQLDriverConnectW")
# Setup autocommit, query bands, etc.
try:
logger.debug("Setting AUTOCOMMIT to %s",
"True" if util.booleanValue(autoCommit) else "False")
rc = odbc.SQLSetConnectAttr(
self.hDbc, SQL_ATTR_AUTOCOMMIT,
TRUE if util.booleanValue(autoCommit) else FALSE, 0)
checkStatus(
rc, hDbc=self.hDbc,
method="SQLSetConnectAttr - SQL_ATTR_AUTOCOMMIT")
if dbType == "Teradata":
with self.cursor() as c:
self.sessionno = c.execute(
"SELECT SESSION",
queryTimeout=QUERY_TIMEOUT).fetchone()[0]
logger.debug("SELECT SESSION returned %s", self.sessionno)
if queryBands:
c.execute(u"SET QUERY_BAND = '{};' FOR SESSION".format(
u";".join(u"{}={}".format(util.toUnicode(k),
util.toUnicode(v))
for k, v in queryBands.items())),
queryTimeout=QUERY_TIMEOUT)
self.commit()
logger.debug("Created session %s.", self.sessionno)
except Exception:
self.close()
raise
def close(self):
"""CLoses an ODBC Connection."""
if self.hDbc:
if self.sessionno:
logger.debug("Closing session %s...", self.sessionno)
for cursor in list(self.cursors):
cursor.close()
rc = odbc.SQLDisconnect(self.hDbc)
sqlState = checkStatus(
rc, hDbc=self.hDbc, method="SQLDisconnect",
ignore=[SQL_STATE_CONNECTION_NOT_OPEN,
SQL_STATE_INVALID_TRANSACTION_STATE])
if SQL_STATE_INVALID_TRANSACTION_STATE in sqlState:
logger.warning("Rolling back open transaction for session %s "
"so it can be closed.", self.sessionno)
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK)
checkStatus(
rc, hDbc=self.hDbc,
method="SQLEndTran - SQL_ROLLBACK - Disconnect")
rc = odbc.SQLDisconnect(self.hDbc)
checkStatus(rc, hDbc=self.hDbc, method="SQLDisconnect")
rc = odbc.SQLFreeHandle(SQL_HANDLE_DBC, self.hDbc)
if rc != SQL_INVALID_HANDLE:
checkStatus(rc, hDbc=self.hDbc, method="SQLFreeHandle")
connections.remove(self)
self.hDbc = None
if self.sessionno:
logger.debug("Session %s closed.", self.sessionno)
def commit(self):
"""Commits a transaction."""
logger.debug("Committing transaction...")
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_COMMIT)
checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_COMMIT")
def rollback(self):
"""Rollsback a transaction."""
logger.debug("Rolling back transaction...")
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK)
checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_ROLLBACK")
def cursor(self):
"""Returns a cursor."""
cursor = OdbcCursor(
self, self.dbType, self.converter, self.cursorCount)
self.cursorCount += 1
return cursor
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
def __repr__(self):
return "OdbcConnection(sessionno={})".format(self.sessionno)
connect = OdbcConnection
class OdbcCursor (util.Cursor):
"""Represents an ODBC Cursor."""
def __init__(self, connection, dbType, converter, num):
util.Cursor.__init__(self, connection, dbType, converter)
self.num = num
self.moreResults = None
if num > 0:
logger.debug(
"Creating cursor %s for session %s.", self.num,
self.connection.sessionno)
self.hStmt = SQLPOINTER()
rc = odbc.SQLAllocHandle(
SQL_HANDLE_STMT, connection.hDbc, ADDR(self.hStmt))
checkStatus(rc, hStmt=self.hStmt)
connection.cursors.append(self)
def callproc(self, procname, params, queryTimeout=0):
query = "CALL {} (".format(procname)
for i in range(0, len(params)):
if i > 0:
query += ", "
query += "?"
query += ")"
logger.debug("Executing Procedure: %s", query)
self.execute(query, params, queryTimeout=queryTimeout)
return util.OutParams(params, self.dbType, self.converter)
def close(self):
if self.hStmt:
if self.num > 0:
logger.debug(
"Closing cursor %s for session %s.", self.num,
self.connection.sessionno)
rc = odbc.SQLFreeHandle(SQL_HANDLE_STMT, self.hStmt)
checkStatus(rc, hStmt=self.hStmt)
self.connection.cursors.remove(self)
self.hStmt = None
def _setQueryTimeout(self, queryTimeout):
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_QUERY_TIMEOUT, SQLPOINTER(queryTimeout),
SQL_IS_UINTEGER)
checkStatus(
rc, hStmt=self.hStmt,
method="SQLSetStmtStmtAttr - SQL_ATTR_QUERY_TIMEOUT")
def execute(self, query, params=None, queryTimeout=0):
if params:
self.executemany(query, [params, ], queryTimeout)
else:
if self.connection.sessionno:
logger.debug(
"Executing query on session %s using SQLExecDirectW: %s",
self.connection.sessionno, query)
self._free()
self._setQueryTimeout(queryTimeout)
rc = odbc.SQLExecDirectW(
self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS)
checkStatus(rc, hStmt=self.hStmt, method="SQLExecDirectW")
self._handleResults()
return self
def executemany(self, query, params, batch=False, queryTimeout=0):
self._free()
# Prepare the query
rc = odbc.SQLPrepareW(
self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS)
checkStatus(rc, hStmt=self.hStmt, method="SQLPrepare")
self._setQueryTimeout(queryTimeout)
# Get the number of parameters in the SQL statement.
numParams = SQLSMALLINT()
rc = odbc.SQLNumParams(self.hStmt, ADDR(numParams))
checkStatus(rc, hStmt=self.hStmt, method="SQLNumParams")
numParams = numParams.value
# The argument types.
dataTypes = []
for paramNum in range(0, numParams):
dataType = SQLSMALLINT()
parameterSize = SQLULEN()
decimalDigits = SQLSMALLINT()
nullable = SQLSMALLINT()
rc = odbc.SQLDescribeParam(
self.hStmt, paramNum + 1, ADDR(dataType), ADDR(parameterSize),
ADDR(decimalDigits), ADDR(nullable))
checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeParams")
dataTypes.append(dataType.value)
if batch:
logger.debug(
"Executing query on session %s using batched SQLExecute: %s",
self.connection.sessionno, query)
self._executeManyBatch(params, numParams, dataTypes)
else:
logger.debug(
"Executing query on session %s using SQLExecute: %s",
self.connection.sessionno, query)
rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMSET_SIZE, 1, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
paramSetNum = 0
for p in params:
paramSetNum += 1
logger.trace("ParamSet %s: %s", paramSetNum, p)
if len(p) != numParams:
raise InterfaceError(
"PARAMS_MISMATCH", "The number of supplied parameters "
"({}) does not match the expected number of "
"parameters ({}).".format(len(p), numParams))
paramArray = []
lengthArray = []
for paramNum in range(0, numParams):
val = p[paramNum]
inputOutputType = _getInputOutputType(val)
valueType, paramType = _getParamValueType(
dataTypes[paramNum])
param, length = _getParamValue(val, valueType, False)
paramArray.append(param)
if param is not None:
if valueType == SQL_C_BINARY:
bufSize = SQLLEN(length)
lengthArray.append(SQLLEN(length))
columnSize = SQLULEN(length)
elif valueType == SQL_C_DOUBLE:
bufSize = SQLLEN(length)
lengthArray.append(SQLLEN(length))
columnSize = SQLULEN(length)
param = ADDR(param)
else:
bufSize = SQLLEN(ctypes.sizeof(param))
lengthArray.append(SQLLEN(SQL_NTS))
columnSize = SQLULEN(length)
else:
bufSize = SQLLEN(0)
columnSize = SQLULEN(0)
lengthArray.append(SQLLEN(SQL_NULL_DATA))
logger.trace("Binding parameter %s...", paramNum + 1)
rc = odbc.SQLBindParameter(
self.hStmt, paramNum + 1, inputOutputType, valueType,
paramType, columnSize, 0, param, bufSize,
ADDR(lengthArray[paramNum]))
checkStatus(
rc, hStmt=self.hStmt, method="SQLBindParameter")
logger.debug("Executing prepared statement.")
rc = odbc.SQLExecute(self.hStmt)
for paramNum in range(0, numParams):
val = p[paramNum]
if isinstance(val, OutParam):
val.size = lengthArray[paramNum].value
checkStatus(rc, hStmt=self.hStmt, method="SQLExecute")
self._handleResults()
return self
def _executeManyBatch(self, params, numParams, dataTypes):
# Get the number of parameter sets.
paramSetSize = len(params)
# Set the SQL_ATTR_PARAM_BIND_TYPE statement attribute to use
# column-wise binding.
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_PARAM_BIND_TYPE, SQL_PARAM_BIND_BY_COLUMN, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify the number of elements in each parameter array.
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_PARAMSET_SIZE, paramSetSize, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify a PTR to get the number of parameters processed.
# paramsProcessed = SQLULEN()
# rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMS_PROCESSED_PTR,
# ADDR(paramsProcessed), SQL_IS_POINTER)
# checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify a PTR to get the status of the parameters processed.
# paramsStatus = (SQLUSMALLINT * paramSetSize)()
# rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAM_STATUS_PTR,
# ADDR(paramsStatus), SQL_IS_POINTER)
# checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Bind the parameters.
paramArrays = []
lengthArrays = []
paramSetSize = len(params)
paramSetNum = 0
for p in params:
paramSetNum += 1
logger.debug("ParamSet %s: %s", paramSetNum, p)
if len(p) != numParams:
raise InterfaceError(
"PARAMS_MISMATCH", "The number of supplied parameters "
"({}) does not match the expected number of parameters "
"({}).".format(len(p), numParams))
for paramNum in range(0, numParams):
p = []
valueType, paramType = _getParamValueType(dataTypes[paramNum])
maxLen = 0
for paramSetNum in range(0, paramSetSize):
param, length = _getParamValue(
params[paramSetNum][paramNum], valueType, True)
if length > maxLen:
maxLen = length
p.append(param)
logger.debug(
"Max length for parameter %s is %s.", paramNum + 1, maxLen)
if valueType == SQL_C_BINARY:
valueSize = SQLLEN(maxLen)
paramArrays.append((SQLBYTE * (paramSetSize * maxLen))())
elif valueType == SQL_C_DOUBLE:
valueSize = SQLLEN(maxLen)
paramArrays.append((SQLDOUBLE * paramSetSize)())
else:
maxLen += 1
valueSize = SQLLEN(ctypes.sizeof(SQLWCHAR) * maxLen)
paramArrays.append(_createBuffer(paramSetSize * maxLen))
lengthArrays.append((SQLLEN * paramSetSize)())
for paramSetNum in range(0, paramSetSize):
index = paramSetNum * maxLen
if p[paramSetNum] is not None:
if valueType == SQL_C_DOUBLE:
paramArrays[paramNum][paramSetNum] = p[paramSetNum]
else:
for c in p[paramSetNum]:
paramArrays[paramNum][index] = c
index += 1
if valueType == SQL_C_BINARY:
lengthArrays[paramNum][
paramSetNum] = len(p[paramSetNum])
else:
lengthArrays[paramNum][
paramSetNum] = SQLLEN(SQL_NTS)
paramArrays[paramNum][
index] = _convertParam("\x00")[0]
else:
lengthArrays[paramNum][paramSetNum] = SQLLEN(SQL_NULL_DATA)
if valueType == SQL_C_WCHAR:
paramArrays[paramNum][index] = _convertParam("\x00")[0]
logger.trace("Binding parameter %s...", paramNum + 1)
rc = odbc.SQLBindParameter(self.hStmt, paramNum + 1,
SQL_PARAM_INPUT, valueType, paramType,
SQLULEN(maxLen), 0,
paramArrays[paramNum], valueSize,
lengthArrays[paramNum])
checkStatus(rc, hStmt=self.hStmt, method="SQLBindParameter")
# Execute the SQL statement.
logger.debug("Executing prepared statement.")
rc = odbc.SQLExecute(self.hStmt)
checkStatus(rc, hStmt=self.hStmt, method="SQLExecute")
def _handleResults(self):
# Rest cursor attributes.
self.description = None
self.rowcount = -1
self.rownumber = None
self.columns = {}
self.types = []
self.moreResults = None
# Get column count in result set.
columnCount = SQLSMALLINT()
rc = odbc.SQLNumResultCols(self.hStmt, ADDR(columnCount))
checkStatus(rc, hStmt=self.hStmt, method="SQLNumResultCols")
rowCount = SQLLEN()
rc = odbc.SQLRowCount(self.hStmt, ADDR(rowCount))
checkStatus(rc, hStmt=self.hStmt, method="SQLRowCount")
self.rowcount = rowCount.value
# Get column meta data and create row iterator.
if columnCount.value > 0:
self.description = []
nameBuf = _createBuffer(SMALL_BUFFER_SIZE)
nameLength = SQLSMALLINT()
dataType = SQLSMALLINT()
columnSize = SQLULEN()
decimalDigits = SQLSMALLINT()
nullable = SQLSMALLINT()
for col in range(0, columnCount.value):
rc = odbc.SQLDescribeColW(
self.hStmt, col + 1, nameBuf, len(nameBuf),
ADDR(nameLength), ADDR(dataType), ADDR(columnSize),
ADDR(decimalDigits), ADDR(nullable))
checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeColW")
columnName = _outputStr(nameBuf)
odbc.SQLColAttributeW(
self.hStmt, col + 1, SQL_DESC_TYPE_NAME, ADDR(nameBuf),
len(nameBuf), None, None)
checkStatus(rc, hStmt=self.hStmt, method="SQLColAttributeW")
typeName = _outputStr(nameBuf)
typeCode = self.converter.convertType(self.dbType, typeName)
self.columns[columnName.lower()] = col
self.types.append((typeName, typeCode))
self.description.append((
columnName, typeCode, None, columnSize.value,
decimalDigits.value, None, nullable.value))
self.iterator = rowIterator(self)
def nextset(self):
if self.moreResults is None:
self._checkForMoreResults()
if self.moreResults:
self._handleResults()
return True
def _checkForMoreResults(self):
rc = odbc.SQLMoreResults(self.hStmt)
checkStatus(rc, hStmt=self.hStmt, method="SQLMoreResults")
self.moreResults = rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO
return self.moreResults
def _free(self):
rc = odbc.SQLFreeStmt(self.hStmt, SQL_CLOSE)
checkStatus(rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_CLOSE")
rc = odbc.SQLFreeStmt(self.hStmt, SQL_RESET_PARAMS)
checkStatus(
rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_RESET_PARAMS")
def _convertLineFeeds(query):
return "\r".join(util.linesplit(query))
def _getInputOutputType(val):
inputOutputType = SQL_PARAM_INPUT
if isinstance(val, InOutParam):
inputOutputType = SQL_PARAM_INPUT_OUTPUT
elif isinstance(val, OutParam):
inputOutputType = SQL_PARAM_OUTPUT
return inputOutputType
def _getParamValueType(dataType):
valueType = SQL_C_WCHAR
paramType = SQL_WVARCHAR
if dataType in (SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY):
valueType = SQL_C_BINARY
paramType = dataType
elif dataType == SQL_WLONGVARCHAR:
paramType = SQL_WLONGVARCHAR
elif dataType in (SQL_FLOAT, SQL_DOUBLE, SQL_REAL):
valueType = SQL_C_DOUBLE
paramType = SQL_DOUBLE
return valueType, paramType
def _getParamValue(val, valueType, batch):
length = 0
if val is None:
param = None
elif valueType == SQL_C_BINARY:
ba = val
if isinstance(val, InOutParam):
ba = val.inValue
elif isinstance(val, OutParam):
ba = bytearray(SMALL_BUFFER_SIZE if val.size is None else val.size)
if not isinstance(ba, bytearray):
raise InterfaceError("Expected bytearray for BINARY parameter.")
length = len(ba)
if batch:
param = ba
else:
byteArr = SQLBYTE * length
param = byteArr.from_buffer(ba)
if isinstance(val, OutParam):
val.setValueFunc(lambda: ba[:val.size])
elif valueType == SQL_C_DOUBLE:
f = val
if isinstance(val, InOutParam):
f = val.inValue
elif isinstance(val, OutParam):
f = float(0)
param = SQLDOUBLE(f if not util.isString(f) else float(f))
length = ctypes.sizeof(param)
if isinstance(val, OutParam):
val.setValueFunc(lambda: param.value)
else:
if batch:
param = _convertParam(val)
length = len(param)
elif isinstance(val, InOutParam):
length = SMALL_BUFFER_SIZE if val.size is None else val.size
param = _inputStr(val.inValue, length)
val.setValueFunc(lambda: _outputStr(param))
elif isinstance(val, OutParam):
length = SMALL_BUFFER_SIZE if val.size is None else val.size
param = _createBuffer(length)
val.setValueFunc(lambda: _outputStr(param))
else:
param = _inputStr(val)
length = len(param)
return param, length
def rowIterator(cursor):
""" Generator function for iterating over the rows in a result set. """
buf = _createBuffer(LARGE_BUFFER_SIZE)
bufSize = ctypes.sizeof(buf)
length = SQLLEN()
while cursor.description is not None:
rc = odbc.SQLFetch(cursor.hStmt)
checkStatus(rc, hStmt=cursor.hStmt, method="SQLFetch")
if rc == SQL_NO_DATA:
break
values = []
# Get each column in the row.
for col in range(1, len(cursor.description) + 1):
val = None
dataType = SQL_C_WCHAR
if cursor.description[col - 1][1] == BINARY:
dataType = SQL_C_BINARY
elif cursor.types[col - 1][0] in datatypes.FLOAT_TYPES:
dataType = SQL_C_DOUBLE
rc = odbc.SQLGetData(
cursor.hStmt, col, dataType, buf, bufSize, ADDR(length))
sqlState = checkStatus(rc, hStmt=cursor.hStmt, method="SQLGetData")
if length.value != SQL_NULL_DATA:
if SQL_STATE_DATA_TRUNCATED in sqlState:
logger.debug(
"Data truncated. Calling SQLGetData to get next part "
"of data for column %s of size %s.",
col, length.value)
if dataType == SQL_C_BINARY:
val = bytearray(length.value)
val[0:bufSize] = (
ctypes.c_ubyte * bufSize).from_buffer(buf)
newBufSize = len(val) - bufSize
newBuffer = (ctypes.c_ubyte * newBufSize).from_buffer(
val, bufSize)
rc = odbc.SQLGetData(
cursor.hStmt, col, dataType, newBuffer,
newBufSize, ADDR(length))
checkStatus(
rc, hStmt=cursor.hStmt, method="SQLGetData2")
else:
val = [_outputStr(buf), ]
while SQL_STATE_DATA_TRUNCATED in sqlState:
rc = odbc.SQLGetData(
cursor.hStmt, col, dataType, buf, bufSize,
ADDR(length))
sqlState = checkStatus(
rc, hStmt=cursor.hStmt, method="SQLGetData2")
val.append(_outputStr(buf))
val = "".join(val)
else:
if dataType == SQL_C_BINARY:
val = bytearray(
(ctypes.c_ubyte * length.value).from_buffer(buf))
elif dataType == SQL_C_DOUBLE:
val = ctypes.c_double.from_buffer(buf).value
else:
val = _outputStr(buf)
values.append(val)
yield values
if not cursor._checkForMoreResults():
cursor._free()
| |
"""
Support for the Mercurial SCM
"""
import logging
import salt.utils.data
import salt.utils.path
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if hg is installed
"""
if salt.utils.path.which("hg") is None:
return (False, "The hg execution module cannot be loaded: hg unavailable.")
else:
return True
def _ssh_flag(identity_path):
return ["--ssh", "ssh -i {}".format(identity_path)]
def revision(cwd, rev="tip", short=False, user=None):
"""
Returns the long hash of a given identifier (hash, branch, tag, HEAD, etc)
cwd
The path to the Mercurial repository
rev: tip
The revision
short: False
Return an abbreviated commit hash
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' hg.revision /path/to/repo mybranch
"""
cmd = ["hg", "id", "-i", "--debug" if not short else "", "-r", "{}".format(rev)]
result = __salt__["cmd.run_all"](cmd, cwd=cwd, runas=user, python_shell=False)
if result["retcode"] == 0:
return result["stdout"]
else:
return ""
def describe(cwd, rev="tip", user=None):
"""
Mimic git describe and return an identifier for the given revision
cwd
The path to the Mercurial repository
rev: tip
The path to the archive tarball
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' hg.describe /path/to/repo
"""
cmd = [
"hg",
"log",
"-r",
"{}".format(rev),
"--template",
"'{{latesttag}}-{{latesttagdistance}}-{{node|short}}'",
]
desc = __salt__["cmd.run_stdout"](cmd, cwd=cwd, runas=user, python_shell=False)
return desc or revision(cwd, rev, short=True)
def archive(cwd, output, rev="tip", fmt=None, prefix=None, user=None):
"""
Export a tarball from the repository
cwd
The path to the Mercurial repository
output
The path to the archive tarball
rev: tip
The revision to create an archive from
fmt: None
Format of the resulting archive. Mercurial supports: tar,
tbz2, tgz, zip, uzip, and files formats.
prefix : None
Prepend <prefix>/ to every filename in the archive
user : None
Run hg as a user other than what the minion runs as
If ``prefix`` is not specified it defaults to the basename of the repo
directory.
CLI Example:
.. code-block:: bash
salt '*' hg.archive /path/to/repo output=/tmp/archive.tgz fmt=tgz
"""
cmd = [
"hg",
"archive",
"{}".format(output),
"--rev",
"{}".format(rev),
]
if fmt:
cmd.append("--type")
cmd.append("{}".format(fmt))
if prefix:
cmd.append("--prefix")
cmd.append('"{}"'.format(prefix))
return __salt__["cmd.run"](cmd, cwd=cwd, runas=user, python_shell=False)
def pull(cwd, opts=None, user=None, identity=None, repository=None):
"""
Perform a pull on the given repository
cwd
The path to the Mercurial repository
repository : None
Perform pull from the repository different from .hg/hgrc:[paths]:default
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
identity : None
Private SSH key on the minion server for authentication (ssh://)
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' hg.pull /path/to/repo opts=-u
"""
cmd = ["hg", "pull"]
if identity:
cmd.extend(_ssh_flag(identity))
if opts:
for opt in opts.split():
cmd.append(opt)
if repository is not None:
cmd.append(repository)
ret = __salt__["cmd.run_all"](cmd, cwd=cwd, runas=user, python_shell=False)
if ret["retcode"] != 0:
raise CommandExecutionError(
"Hg command failed: {}".format(ret.get("stderr", ret["stdout"]))
)
return ret["stdout"]
def update(cwd, rev, force=False, user=None):
"""
Update to a given revision
cwd
The path to the Mercurial repository
rev
The revision to update to
force : False
Force an update
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt devserver1 hg.update /path/to/repo somebranch
"""
cmd = ["hg", "update", "{}".format(rev)]
if force:
cmd.append("-C")
ret = __salt__["cmd.run_all"](cmd, cwd=cwd, runas=user, python_shell=False)
if ret["retcode"] != 0:
raise CommandExecutionError(
"Hg command failed: {}".format(ret.get("stderr", ret["stdout"]))
)
return ret["stdout"]
def clone(cwd, repository, opts=None, user=None, identity=None):
"""
Clone a new repository
cwd
The path to the Mercurial repository
repository
The hg URI of the repository
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
identity : None
Private SSH key on the minion server for authentication (ssh://)
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' hg.clone /path/to/repo https://bitbucket.org/birkenfeld/sphinx
"""
cmd = ["hg", "clone", "{}".format(repository), "{}".format(cwd)]
if opts:
for opt in opts.split():
cmd.append("{}".format(opt))
if identity:
cmd.extend(_ssh_flag(identity))
ret = __salt__["cmd.run_all"](cmd, runas=user, python_shell=False)
if ret["retcode"] != 0:
raise CommandExecutionError(
"Hg command failed: {}".format(ret.get("stderr", ret["stdout"]))
)
return ret["stdout"]
def status(cwd, opts=None, user=None):
"""
Show changed files of the given repository
cwd
The path to the Mercurial repository
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' hg.status /path/to/repo
"""
def _status(cwd):
cmd = ["hg", "status"]
if opts:
for opt in opts.split():
cmd.append("{}".format(opt))
out = __salt__["cmd.run_stdout"](cmd, cwd=cwd, runas=user, python_shell=False)
types = {
"M": "modified",
"A": "added",
"R": "removed",
"C": "clean",
"!": "missing",
"?": "not tracked",
"I": "ignored",
" ": "origin of the previous file",
}
ret = {}
for line in out.splitlines():
t, f = types[line[0]], line[2:]
if t not in ret:
ret[t] = []
ret[t].append(f)
return ret
if salt.utils.data.is_iter(cwd):
return {cwd: _status(cwd) for cwd in cwd}
else:
return _status(cwd)
| |
# -*- coding: utf-8 -*-
import math
import pydash as _
from . import fixtures
from .fixtures import parametrize
@parametrize('case,expected', [
((['a', 'b', 'c', 'd', 'e'], [0, 2, 4]), ['a', 'c', 'e']),
((['moe', 'larry', 'curly'], 0, 2), ['moe', 'curly']),
(({'a': 1, 'b': 2, 'c': 3}, 'a', 'b'), [1, 2])
])
def test_at(case, expected):
assert _.at(*case) == expected
@parametrize('case,expected', [
(([1, 2, 3], 1), True),
(([1, 2, 3], 1, 2), False),
(({'name': 'fred', 'age': 40}, 'fred'), True),
(('pebbles', 'eb'), True)
])
def test_contains(case, expected):
assert _.contains(*case) == expected
@parametrize('case', [
_.include
])
def test_contains_aliases(case):
assert _.contains is case
@parametrize('case,expected', [
(([4.3, 6.1, 6.4], lambda num: int(math.floor(num))), {4: 1, 6: 2}),
(([{'one': 1}, {'one': 1}, {'two': 2}, {'one': 1}], {'one': 1}),
{True: 3, False: 1}),
(([{'one': 1}, {'one': 1}, {'two': 2}, {'one': 1}], 'one'),
{1: 3, None: 1}),
(({1: 0, 2: 0, 4: 3},), {0: 2, 3: 1})
])
def test_count_by(case, expected):
assert _.count_by(*case) == expected
@parametrize('case,expected', [
(([{'level1': {'level2': {'level3': {'value': 1}}}},
{'level1': {'level2': {'level3': {'value': 2}}}},
{'level1': {'level2': {'level3': {'value': 3}}}},
{'level1': {'level2': {'level3': {'value': 4}}}},
{'level1': {'level2': {}}},
{}],
'level1.level2.level3.value'),
[1, 2, 3, 4, None, None])
])
def test_deep_pluck(case, expected):
assert _.deep_pluck(*case) == expected
@parametrize('case,expected', [
(([0, True, False, None, 1, 2, 3],), [True, 1, 2, 3]),
(([1, 2, 3, 4, 5, 6], lambda num: num % 2 == 0), [2, 4, 6]),
(([{'name': 'barney', 'age': 36, 'blocked': False},
{'name': 'fred', 'age': 40, 'blocked': True}],
'blocked'),
[{'name': 'fred', 'age': 40, 'blocked': True}]),
(([{'name': 'barney', 'age': 36, 'blocked': False},
{'name': 'fred', 'age': 40, 'blocked': True}],
{'age': 36}),
[{'name': 'barney', 'age': 36, 'blocked': False}]),
])
def test_filter_(case, expected):
assert _.filter_(*case) == expected
@parametrize('case,expected', [
(([{'name': 'barney', 'age': 36, 'blocked': False},
{'name': 'fred', 'age': 40, 'blocked': True},
{'name': 'pebbles', 'age': 1, 'blocked': False}],
lambda c: c['age'] < 40),
{'name': 'barney', 'age': 36, 'blocked': False}),
(([{'name': 'barney', 'age': 36, 'blocked': False},
{'name': 'fred', 'age': 40, 'blocked': True},
{'name': 'pebbles', 'age': 1, 'blocked': False}],
{'age': 1}),
{'name': 'pebbles', 'age': 1, 'blocked': False}),
(([{'name': 'barney', 'age': 36, 'blocked': False},
{'name': 'fred', 'age': 40, 'blocked': True},
{'name': 'pebbles', 'age': 1, 'blocked': False}],
'blocked'),
{'name': 'fred', 'age': 40, 'blocked': True}),
(([{'name': 'barney', 'age': 36, 'blocked': False},
{'name': 'fred', 'age': 40, 'blocked': True},
{'name': 'pebbles', 'age': 1, 'blocked': False}],),
{'name': 'barney', 'age': 36, 'blocked': False}),
])
def test_find(case, expected):
assert _.find(*case) == expected
@parametrize('case,expected', [
(([1, 2, 3, 4], lambda num: num % 2 == 1), 3),
])
def test_find_last(case, expected):
assert _.find_last(*case) == expected
@parametrize('case,expected', [
(([1, 2, 3], fixtures.noop), [1, 2, 3]),
(([1, 2, 3], lambda value: value < 2), [1, 2, 3]),
(({'one': 1, 'two': 2, 'three': 3}, fixtures.noop),
{'one': 1, 'two': 2, 'three': 3}),
])
def test_for_each(case, expected):
assert _.for_each(*case) == expected
@parametrize('case', [
_.each
])
def test_for_each_aliases(case):
assert _.for_each is case
@parametrize('case,expected', [
(([1, 2, 3], fixtures.noop), [1, 2, 3]),
(([1, 2, 3], lambda value: value < 2), [1, 2, 3]),
(({'one': 1, 'two': 2, 'three': 3}, fixtures.noop),
{'one': 1, 'two': 2, 'three': 3}),
])
def test_for_each_right(case, expected):
assert _.for_each_right(*case) == expected
@parametrize('case', [
_.each_right
])
def test_for_each_right_aliases(case):
assert _.for_each_right is case
@parametrize('case,expected', [
(([4.2, 6.1, 6.4],
lambda num: int(math.floor(num))),
{4: [4.2], 6: [6.1, 6.4]}),
])
def test_group_by(case, expected):
assert _.group_by(*case) == expected
@parametrize('case,expected', [
(([{'dir': 'left', 'code': 97}, {'dir': 'right', 'code': 100}], 'dir'),
{'left': {'dir': 'left', 'code': 97},
'right': {'dir': 'right', 'code': 100}}),
])
def test_index_by(case, expected):
assert _.index_by(*case) == expected
@parametrize('case,expected', [
(([[5, 1, 7], [3, 2, 1]], 'sort'), [None, None]),
(([[5, 1, 7], [3, 2, 1]], lambda lst: sorted(lst)),
[[1, 5, 7], [1, 2, 3]]),
(([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], 'get', 'a'), [1, 3]),
(([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], 'get', 'c'), [None, None]),
((['anaconda', 'bison', 'cat'], 'count', 'a'), [3, 0, 1]),
(([1, 2, 3], lambda item, num: item + num, 1), [2, 3, 4]),
])
def test_invoke(case, expected):
assert _.invoke(*case) == expected
@parametrize('case,expected,sort_results', [
(([1, 2, 3],), [1, 2, 3], False),
(([1.1, 2.1, 3.1], int), [1, 2, 3], False),
(([1, 2, 3], lambda num: num * 3), [3, 6, 9], False),
(({'one': 1, 'two': 2, 'three': 3}, lambda num: num * 3),
[3, 6, 9],
True),
(([{'name': 'moe', 'age': 40},
{'name': 'larry', 'age': 50}],
'name'),
['moe', 'larry'],
False)
])
def test_map_(case, expected, sort_results):
actual = _.map_(*case)
if sort_results:
actual = sorted(actual)
assert actual == expected
@parametrize('case', [
_.collect
])
def test_map_aliases(case):
assert _.map_ is case
@parametrize('case,expectations', [
(([1, 2, 3], lambda num: num * 3), (3, 6, 9)),
])
def test_mapiter(case, expectations):
mapper = _.mapiter(*case)
for expected in expectations:
assert next(mapper) == expected
@parametrize('case,expected', [
(([1, 2, 3],), 3),
(({'a': 3, 'b': 2, 'c': 1},), 3),
((['anaconda', 'bison', 'camel'], lambda x: len(x)), 'anaconda'),
(([{'name': 'barney', 'age': 36}, {'name': 'fred', 'age': 40}], 'age',),
{'name': 'fred', 'age': 40}),
(([{'name': 'barney', 'age': 36}, {'name': 'fred', 'age': 40}],
lambda chr: chr['age']),
{'name': 'fred', 'age': 40}),
])
def test_max_(case, expected):
assert _.max_(*case) == expected
@parametrize('case,expected', [
(([1, 2, 3],), 1),
(({'a': 3, 'b': 2, 'c': 1},), 1),
((['anaconda', 'bison', 'cat'], lambda x: len(x)), 'cat'),
(([{'name': 'barney', 'age': 36}, {'name': 'fred', 'age': 40}], 'age',),
{'name': 'barney', 'age': 36}),
(([{'name': 'barney', 'age': 36}, {'name': 'fred', 'age': 40}],
lambda chr: chr['age']),
{'name': 'barney', 'age': 36}),
])
def test_min_(case, expected):
assert _.min_(*case) == expected
@parametrize('case,expected', [
(([1, 2, 3], lambda item: item % 2), [[1, 3], [2]]),
(([1.2, 2.3, 3.4], lambda item: math.floor(item) % 2),
[[1.2, 3.4], [2.3]]),
(([{'name': 'barney', 'age': 36},
{'name': 'fred', 'age': 40, 'blocked': True},
{'name': 'pebbles', 'age': 1}],
{'age': 1}),
[[{'name': 'pebbles', 'age': 1}],
[{'name': 'barney', 'age': 36},
{'name': 'fred', 'age': 40, 'blocked': True}]]),
(([{'name': 'barney', 'age': 36},
{'name': 'fred', 'age': 40, 'blocked': True},
{'name': 'pebbles', 'age': 1}],
'blocked'),
[[{'name': 'fred', 'age': 40, 'blocked': True}],
[{'name': 'barney', 'age': 36},
{'name': 'pebbles', 'age': 1}]]),
])
def test_partition(case, expected):
assert _.partition(*case) == expected
@parametrize('case,filter_by,expected', [
([{'name': 'moe', 'age': 40}, {'name': 'larry', 'age': 50}],
'name',
['moe', 'larry'])
])
def test_pluck(case, filter_by, expected):
assert _.pluck(case, filter_by) == expected
@parametrize('case,expected', [
(([1, 2, 3], None), 1),
(([1, 2, 3], fixtures.reduce_callback0), 6),
(({'a': 1, 'b': 2, 'c': 3}, fixtures.reduce_callback1, {}),
{'a': 3, 'b': 6, 'c': 9})
])
def test_reduce_(case, expected):
assert _.reduce_(*case) == expected
@parametrize('case,exception', [
(([],), TypeError)
])
def test_reduce_raise(case, exception):
raised = False
try:
_.reduce_(*case)
except exception:
raised = True
assert raised
@parametrize('case', [
_.foldl,
_.inject
])
def test_reduce_aliases(case):
assert _.reduce_ is case
@parametrize('case,expected', [
(([1, 2, 3], None), 3),
(([1, 2, 3], fixtures.reduce_callback0), 6),
(([[0, 1], [2, 3], [4, 5]], fixtures.reduce_right_callback0),
[4, 5, 2, 3, 0, 1]),
(({'a': 1, 'b': 2, 'c': 3}, fixtures.reduce_callback1, {}),
{'a': 3, 'b': 6, 'c': 9})
])
def test_reduce_right(case, expected):
assert _.reduce_right(*case) == expected
@parametrize('case,exception', [
(([],), TypeError)
])
def test_reduce_right_exception(case, exception):
raised = False
try:
_.reduce_right(*case)
except exception:
raised = True
assert raised
@parametrize('case', [
_.foldr
])
def test_reduce_right_aliases(case):
assert _.reduce_right is case
@parametrize('case,expected', [
(([1, 2, 3], None), [1, 1]),
(([1, 2, 3], fixtures.reduce_callback0), [3, 6]),
(([1, 2, 3, 4, 5], fixtures.reduce_callback0, 0), [1, 3, 6, 10, 15])
])
def test_reductions(case, expected):
assert _.reductions(*case) == expected
@parametrize('case,expected', [
(([1, 2, 3], None), [3, 3]),
(([1, 2, 3], fixtures.reduce_callback0), [5, 6]),
(([[0, 1], [2, 3], [4, 5]], fixtures.reduce_right_callback0),
[[4, 5, 2, 3], [4, 5, 2, 3, 0, 1]]),
])
def test_reductions_right(case, expected):
assert _.reductions_right(*case) == expected
@parametrize('case,expected', [
(([0, True, False, None, 1, 2, 3],), [0, False, None]),
(([1, 2, 3, 4, 5, 6], lambda num: num % 2 == 0), [1, 3, 5]),
(([{'name': 'barney', 'age': 36, 'blocked': False},
{'name': 'fred', 'age': 40, 'blocked': True}],
'blocked'),
[{'name': 'barney', 'age': 36, 'blocked': False}]),
(([{'name': 'barney', 'age': 36, 'blocked': False},
{'name': 'fred', 'age': 40, 'blocked': True}],
{'age': 36}),
[{'name': 'fred', 'age': 40, 'blocked': True}]),
])
def test_reject(case, expected):
assert _.reject(*case) == expected
@parametrize('case', [
[1, 2, 3, 4, 5, 6],
])
def test_sample(case):
assert _.sample(case) in case
@parametrize('case', [
([1, 2, 3, 4, 5, 6], 2),
([1, 2, 3, 4, 5, 6], 3),
([1, 2, 3, 4, 5, 6], 4),
])
def test_sample_list(case):
collection, n = case
sample_n = _.sample(*case)
assert isinstance(sample_n, list)
assert len(sample_n) == min(n, len(collection))
assert set(sample_n).issubset(collection)
@parametrize('case', [
[1, 2, 3, 4, 5, 6],
{'one': 1, 'two': 2, 'three': 3}
])
def test_shuffle(case):
shuffled = _.shuffle(case)
assert len(shuffled) == len(case)
if isinstance(case, dict):
assert set(shuffled) == set(case.values())
else:
assert set(shuffled) == set(case)
@parametrize('case', [
[1, 2, 3, 4, 5],
{'1': 1, '2': 2, '3': 3}
])
def test_size(case):
assert _.size(case) == len(case)
@parametrize('case,expected', [
(([None, 0, 'yes', False], bool), True),
(([None, 0, 'yes', False],), True),
(([{'name': 'apple', 'organic': False, 'type': 'fruit'},
{'name': 'carrot', 'organic': True, 'type': 'vegetable'}],
'organic'),
True),
(([{'name': 'apple', 'organic': False, 'type': 'fruit'},
{'name': 'carrot', 'organic': True, 'type': 'vegetable'}],
{'type': 'meat'}),
False)
])
def test_some(case, expected):
assert _.some(*case) == expected
@parametrize('case,expected', [
(([1, 2, 3], lambda x: math.sin(x)), [3, 1, 2]),
(([{'name': 'barney', 'age': 36},
{'name': 'fred', 'age': 40},
{'name': 'barney', 'age': 26},
{'name': 'fred', 'age': 30}],
'age'),
[{'name': 'barney', 'age': 26},
{'name': 'fred', 'age': 30},
{'name': 'barney', 'age': 36},
{'name': 'fred', 'age': 40}]),
(({'a': 1, 'b': 2, 'c': 3}, lambda x: math.sin(x)), [3, 1, 2]),
(([1, 2, 3], lambda x: math.sin(x), True), [2, 1, 3]),
(([{'name': 'barney', 'age': 36},
{'name': 'fred', 'age': 40},
{'name': 'barney', 'age': 26},
{'name': 'fred', 'age': 30}],
'age',
True),
[{'name': 'fred', 'age': 40},
{'name': 'barney', 'age': 36},
{'name': 'fred', 'age': 30},
{'name': 'barney', 'age': 26}]),
(({'a': 1, 'b': 2, 'c': 3}, lambda x: math.sin(x), True), [2, 1, 3]),
])
def test_sort_by(case, expected):
assert _.sort_by(*case) == expected
@parametrize('case,expected', [
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
[]),
[{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}]),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['user', 'age']),
[{'user': 'barney', 'age': 26},
{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 30},
{'user': 'fred', 'age': 40}]),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['-user', 'age']),
[{'user': 'fred', 'age': 30},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'barney', 'age': 36}]),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['user', '-age']),
[{'user': 'barney', 'age': 36},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 40},
{'user': 'fred', 'age': 30}]),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['-user', '-age']),
[{'user': 'fred', 'age': 40},
{'user': 'fred', 'age': 30},
{'user': 'barney', 'age': 36},
{'user': 'barney', 'age': 26}]),
(({1: {'user': 'barney', 'age': 36},
2: {'user': 'fred', 'age': 40},
3: {'user': 'barney', 'age': 26},
4: {'user': 'fred', 'age': 30}},
['user', 'age']),
[{'user': 'barney', 'age': 26},
{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 30},
{'user': 'fred', 'age': 40}]),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
[],
True),
[{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}]),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['user', 'age'],
True),
list(reversed([{'user': 'barney', 'age': 26},
{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 30},
{'user': 'fred', 'age': 40}]))),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['-user', 'age'],
True),
list(reversed([{'user': 'fred', 'age': 30},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'barney', 'age': 36}]))),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['user', '-age'],
True),
list(reversed([{'user': 'barney', 'age': 36},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 40},
{'user': 'fred', 'age': 30}]))),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['-user', '-age'],
True),
list(reversed([{'user': 'fred', 'age': 40},
{'user': 'fred', 'age': 30},
{'user': 'barney', 'age': 36},
{'user': 'barney', 'age': 26}]))),
(({1: {'user': 'barney', 'age': 36},
2: {'user': 'fred', 'age': 40},
3: {'user': 'barney', 'age': 26},
4: {'user': 'fred', 'age': 30}},
['user', 'age'],
True),
list(reversed([{'user': 'barney', 'age': 26},
{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 30},
{'user': 'fred', 'age': 40}]))),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['user', 'age'],
[False, True],
True),
list(reversed([{'user': 'fred', 'age': 30},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'barney', 'age': 36}]))),
(([{'user': 'barney', 'age': 36},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'fred', 'age': 30}],
['user', 'age'],
[False],
True),
list(reversed([{'user': 'fred', 'age': 30},
{'user': 'fred', 'age': 40},
{'user': 'barney', 'age': 26},
{'user': 'barney', 'age': 36}]))),
])
def test_sort_by_all(case, expected):
assert _.sort_by_all(*case) == expected
@parametrize('case', [
_.sort_by_order
])
def test_sort_by_all_aliases(case):
assert _.sort_by_all is case
@parametrize('case,expected', [
('cat', ['c', 'a', 't']),
({'a': 1, 'b': 2, 'c': 3}, [1, 2, 3])
])
def test_to_list(case, expected):
assert set(_.to_list(case)) == set(expected)
@parametrize('case,filter_by,expected,', [
([{'name': 'moe', 'age': 40}, {'name': 'larry', 'age': 50}],
{'age': 40},
[{'name': 'moe', 'age': 40}])
])
def test_where(case, filter_by, expected):
assert _.where(case, filter_by) == expected
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics_utils."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from keras.testing_infra import test_combinations
from keras.utils import metrics_utils
@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))
class RaggedSizeOpTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([
{
'x_list': [1],
'y_list': [2]
},
{
'x_list': [1, 2],
'y_list': [2, 3]
},
{
'x_list': [1, 2, 4],
'y_list': [2, 3, 5]
},
{
'x_list': [[1, 2], [3, 4]],
'y_list': [[2, 3], [5, 6]]
},
])
def test_passing_dense_tensors(self, x_list, y_list):
x = tf.constant(x_list)
y = tf.constant(y_list)
[x,
y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
x.shape.assert_is_compatible_with(y.shape)
@parameterized.parameters([
{
'x_list': [1],
},
{
'x_list': [1, 2],
},
{
'x_list': [1, 2, 4],
},
{
'x_list': [[1, 2], [3, 4]],
},
])
def test_passing_one_dense_tensor(self, x_list):
x = tf.constant(x_list)
[x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x])
@parameterized.parameters([
{
'x_list': [1],
'y_list': [2]
},
{
'x_list': [1, 2],
'y_list': [2, 3]
},
{
'x_list': [1, 2, 4],
'y_list': [2, 3, 5]
},
{
'x_list': [[1, 2], [3, 4]],
'y_list': [[2, 3], [5, 6]]
},
{
'x_list': [[1, 2], [3, 4], [1]],
'y_list': [[2, 3], [5, 6], [3]]
},
{
'x_list': [[1, 2], [], [1]],
'y_list': [[2, 3], [], [3]]
},
])
def test_passing_both_ragged(self, x_list, y_list):
x = tf.ragged.constant(x_list)
y = tf.ragged.constant(y_list)
[x,
y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
x.shape.assert_is_compatible_with(y.shape)
@parameterized.parameters([
{
'x_list': [1],
},
{
'x_list': [1, 2],
},
{
'x_list': [1, 2, 4],
},
{
'x_list': [[1, 2], [3, 4]],
},
{
'x_list': [[1, 2], [3, 4], [1]],
},
{
'x_list': [[1, 2], [], [1]],
},
])
def test_passing_one_ragged(self, x_list):
x = tf.ragged.constant(x_list)
[x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x])
@parameterized.parameters([
{
'x_list': [1],
'y_list': [2],
'mask_list': [0]
},
{
'x_list': [1, 2],
'y_list': [2, 3],
'mask_list': [0, 1]
},
{
'x_list': [1, 2, 4],
'y_list': [2, 3, 5],
'mask_list': [1, 1, 1]
},
{
'x_list': [[1, 2], [3, 4]],
'y_list': [[2, 3], [5, 6]],
'mask_list': [[1, 1], [0, 1]]
},
{
'x_list': [[1, 2], [3, 4], [1]],
'y_list': [[2, 3], [5, 6], [3]],
'mask_list': [[1, 1], [0, 0], [1]]
},
{
'x_list': [[1, 2], [], [1]],
'y_list': [[2, 3], [], [3]],
'mask_list': [[1, 1], [], [0]]
},
])
def test_passing_both_ragged_with_mask(self, x_list, y_list, mask_list):
x = tf.ragged.constant(x_list)
y = tf.ragged.constant(y_list)
mask = tf.ragged.constant(mask_list)
[x, y], mask = \
metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y], mask)
x.shape.assert_is_compatible_with(y.shape)
y.shape.assert_is_compatible_with(mask.shape)
@parameterized.parameters([
{
'x_list': [1],
'mask_list': [0]
},
{
'x_list': [1, 2],
'mask_list': [0, 1]
},
{
'x_list': [1, 2, 4],
'mask_list': [1, 1, 1]
},
{
'x_list': [[1, 2], [3, 4]],
'mask_list': [[1, 1], [0, 1]]
},
{
'x_list': [[1, 2], [3, 4], [1]],
'mask_list': [[1, 1], [0, 0], [1]]
},
{
'x_list': [[1, 2], [], [1]],
'mask_list': [[1, 1], [], [0]]
},
])
def test_passing_one_ragged_with_mask(self, x_list, mask_list):
x = tf.ragged.constant(x_list)
mask = tf.ragged.constant(mask_list)
[x], mask = \
metrics_utils.ragged_assert_compatible_and_get_flat_values([x], mask)
x.shape.assert_is_compatible_with(mask.shape)
@parameterized.parameters([
{
'x_list': [[[1, 3]]],
'y_list': [[2, 3]]
},
])
def test_failing_different_ragged_and_dense_ranks(self, x_list, y_list):
x = tf.ragged.constant(x_list)
y = tf.ragged.constant(y_list)
with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises
[x, y
], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
@parameterized.parameters([
{
'x_list': [[[1, 3]]],
'y_list': [[[2, 3]]],
'mask_list': [[0, 1]]
},
])
def test_failing_different_mask_ranks(self, x_list, y_list, mask_list):
x = tf.ragged.constant(x_list)
y = tf.ragged.constant(y_list)
mask = tf.ragged.constant(mask_list)
with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises
[x, y
], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y],
mask)
# we do not support such cases that ragged_ranks are different but overall
# dimension shapes and sizes are identical due to adding too much performance
# overheads to the overall use cases.
def test_failing_different_ragged_ranks(self):
dt = tf.constant([[[1, 2]]])
# adding a ragged dimension
x = tf.RaggedTensor.from_row_splits(dt, row_splits=[0, 1])
y = tf.ragged.constant([[[[1, 2]]]])
with self.assertRaises(ValueError): # pylint: disable=g-error-prone-assert-raises
[x, y], _ = \
metrics_utils.ragged_assert_compatible_and_get_flat_values([x, y])
@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))
class FilterTopKTest(tf.test.TestCase, parameterized.TestCase):
def test_one_dimensional(self):
x = tf.constant([.3, .1, .2, -.5, 42.])
top_1 = self.evaluate(metrics_utils._filter_top_k(x=x, k=1))
top_2 = self.evaluate(metrics_utils._filter_top_k(x=x, k=2))
top_3 = self.evaluate(metrics_utils._filter_top_k(x=x, k=3))
self.assertAllClose(top_1, [
metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF,
metrics_utils.NEG_INF, 42.
])
self.assertAllClose(top_2, [
.3, metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF,
42.
])
self.assertAllClose(
top_3, [.3, metrics_utils.NEG_INF, .2, metrics_utils.NEG_INF, 42.])
def test_three_dimensional(self):
x = tf.constant([[[.3, .1, .2], [-.3, -.2, -.1]],
[[5., .2, 42.], [-.3, -.6, -.99]]])
top_2 = self.evaluate(metrics_utils._filter_top_k(x=x, k=2))
self.assertAllClose(
top_2,
[[[.3, metrics_utils.NEG_INF, .2], [metrics_utils.NEG_INF, -.2, -.1]],
[[5., metrics_utils.NEG_INF, 42.], [-.3, -.6, metrics_utils.NEG_INF]]])
def test_handles_dynamic_shapes(self):
# See b/150281686. # GOOGLE_INTERNAL
def _identity(x):
return x
def _filter_top_k(x):
# This loses the static shape.
x = tf.numpy_function(_identity, (x,), tf.float32)
return metrics_utils._filter_top_k(x=x, k=2)
x = tf.constant([.3, .1, .2, -.5, 42.])
top_2 = self.evaluate(_filter_top_k(x))
self.assertAllClose(top_2, [
.3, metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF,
42.
])
if __name__ == '__main__':
tf.test.main()
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Implementation of Last Layer Bayes.
We apply a preconditioning version, see the article for more details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import gin.tf
import numpy as np
import scipy
import six
import tensorflow.compat.v1 as tf
MAX_BYTES_MEM = 10**8
def _variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
@gin.configurable
class LastLayerBayesianPrecond(object):
"""Implement LLB on the features."""
def __init__(self, dataset, working_dir, model_dir,
dim_input, num_classes,
batch_size=gin.REQUIRED, step_size=gin.REQUIRED,
thinning_interval=gin.REQUIRED, num_samples=gin.REQUIRED,
sampler=gin.REQUIRED):
"""Initializes the class.
Args:
dataset: (features_train, y_train), (features_test, y_test)
working_dir: working directory where the outputs are written
model_dir: directory where the weights of the full network are.
dim_input: dimension of the input features
num_classes: number of classes
batch_size: batch size
step_size: step size
thinning_interval: thinning interval
num_samples: number of samples
sampler: sampler, eg 'sgd', 'sgld' or 'lmc'
"""
(self.x_train, self.y_train), (self.x_test, self.y_test) = dataset
self.n = self.x_train.shape[0]
self.dim_input = dim_input
self.num_classes = num_classes
self.sampler = sampler
self.batch_size = batch_size
self.step_size = step_size
self.thinning_interval = thinning_interval
self.num_samples = num_samples
self.working_dir = working_dir
self.model_dir = model_dir
# Preconditioning matrix
features = np.hstack((self.x_train, np.ones(self.n)[:, np.newaxis]))
sigma = np.dot(np.transpose(features), features)
sigma_half = scipy.linalg.sqrtm(sigma).astype(np.float32)
self.sigma_half_inv = np.linalg.inv(sigma_half).astype(np.float32)
self.build_graph()
def build_graph(self):
"""Builds the neural network graph."""
# define graph
self.g = tf.Graph()
with self.g.as_default():
# create and store a new session for the graph
self.sess = tf.Session()
# define placeholders
self.x = tf.placeholder(shape=[None, self.dim_input], dtype=tf.float32)
self.y = tf.placeholder(shape=[None, self.num_classes], dtype=tf.float32)
# linear layer(WX + b)
with tf.variable_scope('last_layer/dense') as scope:
weights = tf.get_variable('kernel',
[self.dim_input, self.num_classes],
dtype=tf.float32)
biases = tf.get_variable('bias', [self.num_classes], dtype=tf.float32)
wb = tf.concat([weights, tf.expand_dims(biases, axis=0)], 0)
wb_renorm = tf.matmul(self.sigma_half_inv, wb)
weights_renorm = wb_renorm[:self.dim_input, :]
biases_renorm = wb_renorm[-1, :]
self.z = tf.add(tf.matmul(self.x, weights_renorm),
biases_renorm, name=scope.name)
# Gaussian prior
# prior = tf.nn.l2_loss(weights) + tf.nn.l2_loss(biases)
# Non normalized loss, because of the preconditioning
self.loss = self.n * tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self.y, logits=self.z))
# Bayesian loss
self.bayesian_loss = self.loss # + prior
self.output_probs = tf.nn.softmax(self.z)
# Variables of the last layer
self.ll_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.ll_vars_concat = tf.concat(
[self.ll_vars[0],
tf.expand_dims(self.ll_vars[1], axis=0)], 0)
# Summary
_variable_summaries(self.ll_vars_concat)
# saving the weights of last layer when running SGLD/SGD/MCMC algorithm
self.saver = tf.train.Saver(var_list=self.ll_vars,
max_to_keep=self.num_samples)
self.gd_opt = tf.train.GradientDescentOptimizer(self.step_size)
# SGLD optimizer for the last layer
if self.sampler in ['sgld', 'lmc']:
grads_vars = self.gd_opt.compute_gradients(self.bayesian_loss)
grads_vars_sgld = []
for g, v in grads_vars:
if g is not None:
s = list(v.name)
s[v.name.rindex(':')] = '_'
# Adding Gaussian noise to the gradient
gaussian_noise = (np.sqrt(2. / self.step_size)
* tf.random_normal(tf.shape(g)))
g_sgld = g + gaussian_noise
tf.summary.histogram(''.join(s) + '/grad_hist_mcmc',
g)
tf.summary.histogram(''.join(s) + '/gaussian_noise_hist_mcmc',
gaussian_noise)
tf.summary.histogram(''.join(s) + '/grad_total_hist_mcmc',
g_sgld)
grads_vars_sgld.append((g_sgld, v))
self.train_op = self.gd_opt.apply_gradients(grads_vars_sgld)
# SGD optimizer for the last layer
if self.sampler == 'sgd':
grads_vars_sgd = self.gd_opt.compute_gradients(self.loss)
self.train_op = self.gd_opt.apply_gradients(grads_vars_sgd)
for g, v in grads_vars_sgd:
if g is not None:
s = list(v.name)
s[v.name.rindex(':')] = '_'
tf.summary.histogram(''.join(s) + '/grad_hist_sgd', g)
# Merge all the summaries and write them out
self.all_summaries = tf.summary.merge_all()
location = os.path.join(self.working_dir, 'logs')
self.writer = tf.summary.FileWriter(
location,
graph=self.g)
saver_network = tf.train.Saver(var_list=self.ll_vars)
print('loading the network ...')
# Restores from checkpoint
saver_network.restore(self.sess, self.model_dir)
print('Graph successfully loaded.')
def next_batch(self):
"""Give the next batch of training data."""
indx = np.random.choice(self.n, self.batch_size, replace=False)
return self.x_train[indx, :], self.y_train[indx]
def sample(self, reinitialize=False):
"""Samples weights for the network, displaying training and test errors."""
# we store training loss for sanity check
training_loss_h, test_loss_h = [], []
# Keep only one over thinning_interval sample
num_iters = self.num_samples * self.thinning_interval
# Saving the weights of the last layer
num_ll_weights = int((self.dim_input + 1) * self.num_classes)
sampled_weights = np.zeros((self.num_samples, num_ll_weights))
# Random initialization of the weights if needed.
if reinitialize:
self.sess.run(tf.variables_initializer(self.ll_vars))
# sampling
init_t = time.time()
print('-----------------------------------------------------')
print('Starting sampling of the Bayesian Neural Network by ' +
six.ensure_str(self.sampler))
for i in np.arange(0, num_iters):
batch_x, batch_y = self.next_batch()
feed_dict = {self.x: batch_x, self.y: batch_y}
self.sess.run([self.train_op], feed_dict=feed_dict)
if (i+1) % self.thinning_interval == 0:
summary, loss_v, ll_vars_v = self.sess.run([
self.all_summaries, self.loss,
self.ll_vars_concat], feed_dict=feed_dict)
training_loss_h.append(loss_v)
self.writer.add_summary(summary, i+1)
self.writer.flush()
sampled_weights[i // self.thinning_interval,
:] = ll_vars_v.flatten('F')
feed_dict_test = {self.x: self.x_test, self.y: self.y_test}
test_loss_v = self.sess.run([self.loss], feed_dict=feed_dict_test)
test_loss_h.append(test_loss_v)
msg = ('{} steps. Loss = {}. \t Test Loss = {}.').format(
str(i+1), str(loss_v), str(test_loss_v))
print(msg)
self.saver.save(
self.sess,
os.path.join(self.working_dir, 'weights/saved-last-weights'),
global_step=i+1,
write_meta_graph=False)
print('-----------------------------------------------------')
print('Training complete after {} seconds.'.format(time.time() - init_t))
print('-----------------------------------------------------')
self.writer.close()
# Multiply the weights by sigma_half_inv
dim_input_bias = self.dim_input + 1
for i in np.arange(self.num_classes):
sampled_weights[:, (i*dim_input_bias):((i+1)*dim_input_bias)] = np.dot(
sampled_weights[:, (i*dim_input_bias):((i+1)*dim_input_bias)],
self.sigma_half_inv)
return training_loss_h, test_loss_h, sampled_weights
def predict(self, x):
"""Predict the output probabilities.
Assume that self.sample() has been executed before.
Args:
x: features on which to compute predicted probabilities.
"""
# set hyper-parameters
thinning_interval = self.thinning_interval
num_samples = self.num_samples
num_classes = self.num_classes
n = x.shape[0]
# initialize tensor of results
# Need to separate into several pieces for memory issues
max_num_steps = MAX_BYTES_MEM // (num_classes * n)
q = num_samples // max_num_steps
r = num_samples % max_num_steps
for k in np.arange(q):
probabilities_tab = np.zeros((x.shape[0],
self.num_classes,
max_num_steps))
for i in np.arange(max_num_steps):
step = (k * max_num_steps + i + 1) * thinning_interval
file_name = os.path.join(self.working_dir,
'weights/saved-last-weights-' + str(step))
self.saver.restore(self.sess, file_name)
probabilities_v = self.sess.run(
self.output_probs, feed_dict={self.x: x})
probabilities_tab[:, :, i] = probabilities_v
save_name = os.path.join(self.working_dir,
'proba_tab_' + str(k) + '.npy')
with tf.gfile.Open(save_name, 'wb') as f:
np.save(f, probabilities_tab)
if r > 0:
probabilities_tab = np.zeros((x.shape[0],
self.num_classes,
r))
for i in np.arange(r):
step = (q * max_num_steps + i + 1) * thinning_interval
file_name = os.path.join(self.working_dir,
'weights/saved-last-weights-' + str(step))
self.saver.restore(self.sess, file_name)
probabilities_v = self.sess.run(
self.output_probs, feed_dict={self.x: x})
probabilities_tab[:, :, i] = probabilities_v
save_name = os.path.join(self.working_dir,
'proba_tab_' + str(q) + '.npy')
with tf.gfile.Open(save_name, 'wb') as f:
np.save(f, probabilities_tab)
| |
# packet.py
#
# Copyright 2002-2005,2007 Wichert Akkerman <wichert@wiggy.net>
#
# A RADIUS packet as defined in RFC 2138
import struct
import random
try:
import hashlib
md5_constructor = hashlib.md5
except ImportError:
# BBB for python 2.4
import md5
md5_constructor = md5.new
import six
import tools
# Packet codes
AccessRequest = 1
AccessAccept = 2
AccessReject = 3
AccountingRequest = 4
AccountingResponse = 5
AccessChallenge = 11
StatusServer = 12
StatusClient = 13
DisconnectRequest = 40
DisconnectACK = 41
DisconnectNAK = 42
CoARequest = 43
CoAACK = 44
CoANAK = 45
# Use cryptographic-safe random generator as provided by the OS.
random_generator = random.SystemRandom()
# Current ID
CurrentID = random_generator.randrange(1, 255)
class PacketError(Exception):
pass
class Packet(dict):
"""Packet acts like a standard python map to provide simple access
to the RADIUS attributes. Since RADIUS allows for repeated
attributes the value will always be a sequence. pyrad makes sure
to preserve the ordering when encoding and decoding packets.
There are two ways to use the map intereface: if attribute
names are used pyrad take care of en-/decoding data. If
the attribute type number (or a vendor ID/attribute type
tuple for vendor attributes) is used you work with the
raw data.
Normally you will not use this class directly, but one of the
:obj:`AuthPacket` or :obj:`AcctPacket` classes.
"""
def __init__(self, code=0, id=None, secret=six.b(''), authenticator=None,
**attributes):
"""Constructor
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param id: packet identifaction number
:type id: integer (8 bits)
:param code: packet type code
:type code: integer (8bits)
:param packet: raw packet to decode
:type packet: string
"""
dict.__init__(self)
self.code = code
if id is not None:
self.id = id
else:
self.id = CreateID()
if not isinstance(secret, six.binary_type):
raise TypeError('secret must be a binary string')
self.secret = secret
if authenticator is not None and \
not isinstance(authenticator, six.binary_type):
raise TypeError('authenticator must be a binary string')
self.authenticator = authenticator
if 'dict' in attributes:
self.dict = attributes['dict']
if 'packet' in attributes:
self.DecodePacket(attributes['packet'])
for (key, value) in attributes.items():
if key in ['dict', 'fd', 'packet']:
continue
key = key.replace('_', '-')
self.AddAttribute(key, value)
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return Packet(id=self.id, secret=self.secret,
authenticator=self.authenticator, dict=self.dict,
**attributes)
def _DecodeValue(self, attr, value):
if attr.values.HasBackward(value):
return attr.values.GetBackward(value)
else:
return tools.DecodeAttr(attr.type, value)
def _EncodeValue(self, attr, value):
if attr.values.HasForward(value):
return attr.values.GetForward(value)
else:
return tools.EncodeAttr(attr.type, value)
def _EncodeKeyValues(self, key, values):
if not isinstance(key, str):
return (key, values)
attr = self.dict.attributes[key]
if attr.vendor:
key = (self.dict.vendors.GetForward(attr.vendor), attr.code)
else:
key = attr.code
return (key, [self._EncodeValue(attr, v) for v in values])
def _EncodeKey(self, key):
if not isinstance(key, str):
return key
attr = self.dict.attributes[key]
if attr.vendor:
return (self.dict.vendors.GetForward(attr.vendor), attr.code)
else:
return attr.code
def _DecodeKey(self, key):
"""Turn a key into a string if possible"""
if self.dict.attrindex.HasBackward(key):
return self.dict.attrindex.GetBackward(key)
return key
def AddAttribute(self, key, value):
"""Add an attribute to the packet.
:param key: attribute name or identification
:type key: string, attribute code or (vendor code, attribute code)
tuple
:param value: value
:type value: depends on type of attribute
"""
(key, value) = self._EncodeKeyValues(key, [value])
value = value[0]
self.setdefault(key, []).append(value)
def __getitem__(self, key):
if not isinstance(key, six.string_types):
return dict.__getitem__(self, key)
values = dict.__getitem__(self, self._EncodeKey(key))
attr = self.dict.attributes[key]
res = []
for v in values:
res.append(self._DecodeValue(attr, v))
return res
def __contains__(self, key):
try:
return dict.__contains__(self, self._EncodeKey(key))
except KeyError:
return False
has_key = __contains__
def __delitem__(self, key):
dict.__delitem__(self, self._EncodeKey(key))
def __setitem__(self, key, item):
if isinstance(key, six.string_types):
(key, item) = self._EncodeKeyValues(key, [item])
dict.__setitem__(self, key, item)
else:
assert isinstance(item, list)
dict.__setitem__(self, key, item)
def keys(self):
return [self._DecodeKey(key) for key in dict.keys(self)]
@staticmethod
def CreateAuthenticator():
"""Create a packet autenticator. All RADIUS packets contain a sixteen
byte authenticator which is used to authenticate replies from the
RADIUS server and in the password hiding algorithm. This function
returns a suitable random string that can be used as an authenticator.
:return: valid packet authenticator
:rtype: binary string
"""
data = []
for i in range(16):
data.append(random_generator.randrange(0, 256))
if six.PY3:
return bytes(data)
else:
return ''.join(chr(b) for b in data)
def CreateID(self):
"""Create a packet ID. All RADIUS requests have a ID which is used to
identify a request. This is used to detect retries and replay attacks.
This function returns a suitable random number that can be used as ID.
:return: ID number
:rtype: integer
"""
return random_generator.randrange(0, 256)
def ReplyPacket(self):
"""Create a ready-to-transmit authentication reply packet.
Returns a RADIUS packet which can be directly transmitted
to a RADIUS server. This differs with Packet() in how
the authenticator is calculated.
:return: raw packet
:rtype: string
"""
assert(self.authenticator)
assert(self.secret is not None)
attr = self._PktEncodeAttributes()
header = struct.pack('!BBH', self.code, self.id, (20 + len(attr)))
authenticator = md5_constructor(header[0:4] + self.authenticator
+ attr + self.secret).digest()
return header + authenticator + attr
def VerifyReply(self, reply, rawreply=None):
if reply.id != self.id:
return False
if rawreply is None:
rawreply = reply.ReplyPacket()
hash = md5_constructor(rawreply[0:4] + self.authenticator +
rawreply[20:] + self.secret).digest()
if hash != rawreply[4:20]:
return False
return True
def _PktEncodeAttribute(self, key, value):
if isinstance(key, tuple):
value = struct.pack('!L', key[0]) + \
self._PktEncodeAttribute(key[1], value)
key = 26
return struct.pack('!BB', key, (len(value) + 2)) + value
def _PktEncodeAttributes(self):
result = six.b('')
for (code, datalst) in self.items():
for data in datalst:
result += self._PktEncodeAttribute(code, data)
return result
def _PktDecodeVendorAttribute(self, data):
# Check if this packet is long enough to be in the
# RFC2865 recommended form
if len(data) < 6:
return (26, data)
(vendor, type, length) = struct.unpack('!LBB', data[:6])[0:3]
# Another sanity check
if len(data) != length + 4:
return (26, data)
return ((vendor, type), data[6:])
def DecodePacket(self, packet):
"""Initialize the object from raw packet data. Decode a packet as
received from the network and decode it.
:param packet: raw packet
:type packet: string"""
try:
(self.code, self.id, length, self.authenticator) = \
struct.unpack('!BBH16s', packet[0:20])
except struct.error:
raise PacketError('Packet header is corrupt')
if len(packet) != length:
raise PacketError('Packet has invalid length')
if length > 8192:
raise PacketError('Packet length is too long (%d)' % length)
self.clear()
packet = packet[20:]
while packet:
try:
(key, attrlen) = struct.unpack('!BB', packet[0:2])
except struct.error:
raise PacketError('Attribute header is corrupt')
if attrlen < 2:
raise PacketError(
'Attribute length is too small (%d)' % attrlen)
value = packet[2:attrlen]
if key == 26:
(key, value) = self._PktDecodeVendorAttribute(value)
self.setdefault(key, []).append(value)
packet = packet[attrlen:]
class AuthPacket(Packet):
def __init__(self, code=AccessRequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
"""Constructor
:param code: packet type code
:type code: integer (8bits)
:param id: packet identifaction number
:type id: integer (8 bits)
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param packet: raw packet to decode
:type packet: string
"""
Packet.__init__(self, code, id, secret, authenticator, **attributes)
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return AuthPacket(AccessAccept, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
def RequestPacket(self):
"""Create a ready-to-transmit authentication request packet.
Return a RADIUS packet which can be directly transmitted
to a RADIUS server.
:return: raw packet
:rtype: string
"""
attr = self._PktEncodeAttributes()
if self.authenticator is None:
self.authenticator = self.CreateAuthenticator()
if self.id is None:
self.id = self.CreateID()
header = struct.pack('!BBH16s', self.code, self.id,
(20 + len(attr)), self.authenticator)
return header + attr
def PwDecrypt(self, password):
"""Unobfuscate a RADIUS password. RADIUS hides passwords in packets by
using an algorithm based on the MD5 hash of the packet authenticator
and RADIUS secret. This function reverses the obfuscation process.
:param password: obfuscated form of password
:type password: binary string
:return: plaintext password
:rtype: unicode string
"""
buf = password
pw = six.b('')
last = self.authenticator
while buf:
hash = md5_constructor(self.secret + last).digest()
if six.PY3:
for i in range(16):
pw += bytes((hash[i] ^ buf[i],))
else:
for i in range(16):
pw += chr(ord(hash[i]) ^ ord(buf[i]))
(last, buf) = (buf[:16], buf[16:])
while pw.endswith(six.b('\x00')):
pw = pw[:-1]
return pw.decode('utf-8')
def PwCrypt(self, password):
"""Obfuscate password.
RADIUS hides passwords in packets by using an algorithm
based on the MD5 hash of the packet authenticator and RADIUS
secret. If no authenticator has been set before calling PwCrypt
one is created automatically. Changing the authenticator after
setting a password that has been encrypted using this function
will not work.
:param password: plaintext password
:type password: unicode stringn
:return: obfuscated version of the password
:rtype: binary string
"""
if self.authenticator is None:
self.authenticator = self.CreateAuthenticator()
if isinstance(password, six.text_type):
password = password.encode('utf-8')
buf = password
if len(password) % 16 != 0:
buf += six.b('\x00') * (16 - (len(password) % 16))
hash = md5_constructor(self.secret + self.authenticator).digest()
result = six.b('')
last = self.authenticator
while buf:
hash = md5_constructor(self.secret + last).digest()
if six.PY3:
for i in range(16):
result += bytes((hash[i] ^ buf[i],))
else:
for i in range(16):
result += chr(ord(hash[i]) ^ ord(buf[i]))
last = result[-16:]
buf = buf[16:]
return result
class AcctPacket(Packet):
"""RADIUS accounting packets. This class is a specialization
of the generic :obj:`Packet` class for accounting packets.
"""
def __init__(self, code=AccountingRequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
"""Constructor
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param id: packet identifaction number
:type id: integer (8 bits)
:param code: packet type code
:type code: integer (8bits)
:param packet: raw packet to decode
:type packet: string
"""
Packet.__init__(self, code, id, secret, authenticator, **attributes)
if 'packet' in attributes:
self.raw_packet = attributes['packet']
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return AcctPacket(AccountingResponse, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
def VerifyAcctRequest(self):
"""Verify request authenticator.
:return: True if verification failed else False
:rtype: boolean
"""
assert(self.raw_packet)
hash = md5_constructor(self.raw_packet[0:4] + 16 * six.b('\x00') +
self.raw_packet[20:] + self.secret).digest()
return hash == self.authenticator
def RequestPacket(self):
"""Create a ready-to-transmit authentication request packet.
Return a RADIUS packet which can be directly transmitted
to a RADIUS server.
:return: raw packet
:rtype: string
"""
attr = self._PktEncodeAttributes()
if self.id is None:
self.id = self.CreateID()
header = struct.pack('!BBH', self.code, self.id, (20 + len(attr)))
self.authenticator = md5_constructor(header[0:4] + 16 * six.b('\x00') + attr
+ self.secret).digest()
return header + self.authenticator + attr
class CoAPacket(Packet):
"""RADIUS CoA packets. This class is a specialization
of the generic :obj:`Packet` class for CoA packets.
"""
def __init__(self, code=CoARequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
"""Constructor
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param id: packet identifaction number
:type id: integer (8 bits)
:param code: packet type code
:type code: integer (8bits)
:param packet: raw packet to decode
:type packet: string
"""
Packet.__init__(self, code, id, secret, authenticator, **attributes)
if 'packet' in attributes:
self.raw_packet = attributes['packet']
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return CoAPacket(CoAACK, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
def VerifyCoARequest(self):
"""Verify request authenticator.
:return: True if verification failed else False
:rtype: boolean
"""
assert(self.raw_packet)
hash = md5_constructor(self.raw_packet[0:4] + 16 * six.b('\x00') +
self.raw_packet[20:] + self.secret).digest()
return hash == self.authenticator
def RequestPacket(self):
"""Create a ready-to-transmit CoA request packet.
Return a RADIUS packet which can be directly transmitted
to a RADIUS server.
:return: raw packet
:rtype: string
"""
attr = self._PktEncodeAttributes()
if self.id is None:
self.id = self.CreateID()
header = struct.pack('!BBH', self.code, self.id, (20 + len(attr)))
self.authenticator = md5_constructor(header[0:4] + 16 * six.b('\x00') + attr
+ self.secret).digest()
return header + self.authenticator + attr
def CreateID():
"""Generate a packet ID.
:return: packet ID
:rtype: 8 bit integer
"""
global CurrentID
CurrentID = (CurrentID + 1) % 256
return CurrentID
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'TextAnswer.answer'
db.alter_column(u'survey_textanswer', 'answer', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'AnswerRule.action'
db.alter_column(u'survey_answerrule', 'action', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'AnswerRule.condition'
db.alter_column(u'survey_answerrule', 'condition', self.gf('django.db.models.fields.CharField')(max_length=100))
def backwards(self, orm):
# Changing field 'TextAnswer.answer'
db.alter_column(u'survey_textanswer', 'answer', self.gf('django.db.models.fields.CharField')(max_length=150))
# Changing field 'AnswerRule.action'
db.alter_column(u'survey_answerrule', 'action', self.gf('django.db.models.fields.CharField')(max_length=150))
# Changing field 'AnswerRule.condition'
db.alter_column(u'survey_answerrule', 'condition', self.gf('django.db.models.fields.CharField')(max_length=150))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'rule'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.backend': {
'Meta': {'object_name': 'Backend'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'survey.batch': {
'Meta': {'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batches'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.batchlocationstatus': {
'Meta': {'object_name': 'BatchLocationStatus'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_locations'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_batches'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.children': {
'Meta': {'object_name': 'Children'},
'aged_between_0_5_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_12_23_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_13_17_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_24_59_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_5_12_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_6_11_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'children'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.formula': {
'Meta': {'object_name': 'Formula'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_denominator'", 'to': "orm['survey.Question']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'numerator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_numerator'", 'to': "orm['survey.Question']"})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'number_of_females': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'number_of_males': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'survey.householdbatchcompletion': {
'Meta': {'object_name': 'HouseholdBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'household': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'head'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': "'16'", 'max_length': '100'}),
'resident_since_month': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'resident_since_year': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1984'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'})
},
'survey.indicator': {
'Meta': {'object_name': 'Indicator'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicators'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Backend']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'weights': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Indicator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.randomhouseholdselection': {
'Meta': {'object_name': 'RandomHouseHoldSelection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_of_households': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'selected_households': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'survey.women': {
'Meta': {'object_name': 'Women'},
'aged_between_15_19_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_20_49_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'women'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['survey']
| |
import sys, socket, errno, struct, weakref, json, select, logging, time
import pyev
from .. import cnscom
###
L = logging.getLogger("cnscon")
###
if sys.platform != 'win32':
BUFSIZE = select.PIPE_BUF
else:
BUFSIZE = 512
###
class console_connection(object):
'''Server side of console communication IPC'''
NONBLOCKING = frozenset([errno.EAGAIN, errno.EWOULDBLOCK])
def __init__(self, sock, address, serverapp):
self.serverapp = serverapp
self.sock = sock
self.sock.setblocking(0)
# Tuple of (socket family, socket type, socket protocol, ssl)
self.descr = (
_socket_families_map.get(self.sock.family, self.sock.family),
_socket_type_map.get(self.sock.type, self.sock.type),
_socket_proto_map.get(self.sock.proto, self.sock.proto),
None #TODO: SSL goes here ...
)
self.address = address
self.connected_at = time.time()
self.read_buf = ""
self.write_buf = None
self.yield_enabled = False
self.return_expected = False # This is synchronization element used in asserts preventing IPC goes out of sync
self.tailf_enabled = False
self.watcher = pyev.Io(self.sock._sock, pyev.EV_READ, serverapp.loop, self.io_cb)
self.watcher.start()
L.debug("Console connection open ({0})".format(self.address))
def __del__(self):
self.close()
def reset(self, events):
self.watcher.stop()
self.watcher.set(self.sock._sock, events)
self.watcher.start()
def io_cb(self, watcher, revents):
try:
if (revents & pyev.EV_READ) == pyev.EV_READ:
self.handle_read()
if self.sock is None: return # Socket has been just closed
if (revents & pyev.EV_WRITE) == pyev.EV_WRITE:
self.handle_write()
except:
L.exception("Exception during IO on console connection:")
def handle_read(self):
try:
buf = self.sock.recv(1024)
except socket.error as err:
if err.args[0] not in self.NONBLOCKING:
L.error("Error when reading from console connection socket: {0}".format(err))
self.handle_error()
return
if len(buf) > 0:
self.read_buf += buf
while len(self.read_buf) >= 4:
magic, callid, paramlen = struct.unpack(cnscom.call_struct_fmt, self.read_buf[:4])
if magic != cnscom.call_magic:
L.warning("Invalid data stream on control port")
self.handle_error()
return
if (paramlen + 4) <= len(self.read_buf):
params = self.read_buf[4:4+paramlen]
self.read_buf = self.read_buf[4+paramlen:]
self.return_expected = True
try:
ret = self.serverapp.dispatch_svrcall(self, callid, params)
except Exception, e:
if not isinstance(e, cnscom.svrcall_error):
L.exception("Exception during dispatching console call")
self.send_exception(e, callid)
else:
if ret == deffered_return: return
self.send_return(ret, callid)
else:
L.debug("Connection closed by peer")
self.handle_error()
def handle_write(self):
try:
sent = self.sock.send(self.write_buf[:BUFSIZE])
except socket.error as err:
if err.args[0] not in self.NONBLOCKING:
#TODO: Log "error writing to {0}".format(self.sock)
self.handle_error()
return
else :
self.write_buf = self.write_buf[sent:]
if len(self.write_buf) == 0:
self.reset(pyev.EV_READ)
self.write_buf = None
def write(self, data):
if self.sock is None:
L.warning("Socket is closed - write operation is ignored")
return
#TODO: Close socket if write buffer is tooo long
if self.write_buf is None:
self.write_buf = data
self.reset(pyev.EV_READ | pyev.EV_WRITE)
else:
self.write_buf += data
def close(self):
if self.watcher is not None:
self.watcher.stop()
self.watcher = None
if self.sock is not None:
self.sock.close()
self.sock = None
def handle_error(self):
L.debug("Console connection closed.")
self.close()
def send_return(self, ret, callid='-'):
'''
Internal function that manages communication of response (type return) to the console (client).
'''
assert self.return_expected
self.yield_enabled = False
ret = str(ret)
lenret = len(ret)
if lenret >= 0x7fff:
self.handle_error()
raise RuntimeError("Transmitted return value is too long (callid={0})".format(callid))
self.write(struct.pack(cnscom.resp_struct_fmt, cnscom.resp_magic, cnscom.resp_return, lenret) + ret)
self.return_expected = False
def send_exception(self, e, callid='-'):
'''
Internal function that manages communication of response (type exception) to the console (client).
'''
assert self.return_expected, "Raised exception when return is not expected"
self.yield_enabled = False
ret = str(e)
lenret = len(ret)
if lenret >= 0x7fff:
self.handle_error()
raise RuntimeError("Transmitted exception is too long (callid={0})".format(callid))
self.write(struct.pack(cnscom.resp_struct_fmt, cnscom.resp_magic, cnscom.resp_exception, lenret) + ret)
self.return_expected = False
def yield_message(self, message):
if not self.yield_enabled: return
assert self.return_expected
messagelen = len(message)
if messagelen >= 0x7fff:
raise RuntimeError("Transmitted yield message is too long.")
self.write(struct.pack(cnscom.resp_struct_fmt, cnscom.resp_magic, cnscom.resp_yield_message, messagelen) + message)
def send_tailf(self, data):
if not self.tailf_enabled: return
datalen = len(data)
if datalen >= 0x7fff:
raise RuntimeError("Transmitted tailf data are too long.")
self.write(struct.pack(cnscom.resp_struct_fmt, cnscom.resp_magic, cnscom.resp_tailf_data, datalen) + data)
###
class message_yield_loghandler(logging.Handler):
'''
Message yield(ing) log handler provides functionality to propagate log messages to connected consoles.
It automatically emits all log records that are submitted into relevant logger (e.g. Lmy = logging.getLogger("my") ) and forwards them
as resp_yield_message to connected consoles (yield has to be enabled on particular connection see yield_enabled).
'''
def __init__(self, serverapp):
logging.Handler.__init__(self)
self.serverapp = weakref.ref(serverapp)
def emit(self, record):
serverapp = self.serverapp()
if serverapp is None: return
msg = json.dumps({
'msg': record.msg,
'args': record.args,
'funcName': record.funcName,
'lineno': record.lineno,
'levelno': record.levelno,
'levelname': record.levelname,
'name': record.name,
'pathname': record.pathname,
})
for conn in serverapp.conns:
conn.yield_message(msg)
###
class deffered_return(object): pass # This is just a symbol definition
#
_socket_families_map = {
socket.AF_UNIX: 'AF_UNIX',
socket.AF_INET: 'AF_INET',
socket.AF_INET6: 'AF_INET6',
}
_socket_type_map = {
socket.SOCK_STREAM: 'SOCK_STREAM',
socket.SOCK_DGRAM: 'SOCK_DGRAM',
}
_socket_proto_map = {
socket.IPPROTO_TCP: 'IPPROTO_TCP',
}
| |
# -*- coding: utf-8 -*-
# -*- coding: mbcs -*-
from abaqus import *
from abaqusConstants import *
from caeModules import *
from interaction import *
from optimization import *
from sketch import *
from visualization import *
from connectorBehavior import *
import regionToolset
#session.journalOptions.setValues(replayGeometry=COORDINATE,recoverGeometry=COORDINATE)
span=5.0 #span unit:m
nSpan=2 #number of span
Gravity = 9.8 #acceleration of gravity
#-----------------------------------------------------
# Create a model.
myModel = mdb.Model(name='cBeamModel')
#-----------------------------------------------------
from part import *
# Create a sketch for the base feature.
mySketch1 = myModel.ConstrainedSketch(name='beamSketch1',sheetSize=10.0)
# Create the line.
mySketch1.Line(point1=(0.0, 0.0), point2=(nSpan*span, 0.0))
# Create a three-dimensional, deformable part.
myBeamPart1 = myModel.Part(name='beamPart1', dimensionality=THREE_D, type=DEFORMABLE_BODY)
# Create the part's base feature
myBeamPart1.BaseWire(sketch=mySketch1)
myBeamPart1.PartitionEdgeByPoint(edge=
myBeamPart1.edges.findAt((2.5, 0.0, 0.0), ),
point=myBeamPart1.InterestingPoint(
myBeamPart1.edges.findAt((2.5, 0.0, 0.0), ),
MIDDLE))
#-----------------------------------------------------
from material import *
# Create a material.
myC50 = myModel.Material(name='Material-C50', description='Concrete C50')
# Create the elastic properties
#elasticProperties = (209.E9, 0.28)
#mySteel.Elastic(table=(elasticProperties, ) )
#It(umat) seems to be no use in the situation in the "integration=BEFORE_ANALYSIS"
myC50.Density(table=((2500.0, ), ))
myC50.Elastic(table=((34500000000.0, 0.2), )) #3.45e10N/m^2
#-------------------------------------------------------
from section import *
# Create the beam section.
#myModel.GeneralizedProfile(name='beamProfile', area=3.24, i11=0.153819, i12=0.264036, i22=21.7272, j=0.0060, gammaO=0.0, gammaW=0.0)
myModel.RectangularProfile(name='beamProfile', a=0.3, b=0.4)
myModel.BeamSection(name='beamSection', integration=BEFORE_ANALYSIS,density=2549.0,
poissonRatio=0.20, beamShape=CONSTANT, profile='beamProfile', thermalExpansion=OFF,
temperatureDependency=OFF, dependencies=0, table=((34500000000.0, 13800000000.0), ),
alphaDamping=0.0,betaDamping=0.0, compositeDamping=0.0, centroid=(0.0, 0.0),
shearCenter=(0.0, 0.0), consistentMassMatrix=False)
###
beamRegion=regionToolset.Region(edges=myBeamPart1.edges)
myBeamPart1.SectionAssignment(region=beamRegion, sectionName='beamSection',
offset=0.0, offsetField='',offsetType=MIDDLE_SURFACE,
thicknessAssignment=FROM_SECTION)
myModel.parts['beamPart1'].assignBeamSectionOrientation(method=
N1_COSINES, n1=(0.0, 0.0, 1.0), region=Region(
edges=myBeamPart1.edges.findAt(((0.5, 0.0, 0.0),
), ((2.5, 0.0, 0.0), ), )))
#-------------------------------------------------------
from assembly import *
# Create a part instance.
myAssembly = myModel.rootAssembly
myAssembly.DatumCsysByDefault(CARTESIAN)
myBeamInstance1 = myAssembly.Instance(name='beamInstance1',
part=myBeamPart1, dependent=ON)
#-------------------------------------------------------
from step import *
# Create a step. The time period of the static step is 1.0,
# and the initial incrementation is 0.1; the step is created
# after the initial step.
myModel.StaticStep(name='beamStep', previous='Initial',
nlgeom=OFF, description='Load of the beam.')
myModel.StaticStep(name='Step-Gravity', previous='beamStep',
minInc=0.001, initialInc=0.2, description='Load of the Gravity.')
#-------------------------------------------------------
from load import *
v=myAssembly.instances['beamInstance1'].vertices
verts=v.findAt(((0.0, 0.0, 0.0), ),)
myAssembly.Set(vertices=verts,name='Set-fix1')
region=myAssembly.sets['Set-fix1']
myModel.DisplacementBC(name='BC-1', createStepName='beamStep',
region=region, u1=0.0, u2=0.0, u3=0.0, ur1=0.0, ur2=0.0, ur3=UNSET,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM,fieldName='',
localCsys=None)
for i in range(2,nSpan+1):
v=myAssembly.instances['beamInstance'+str(i-1)].vertices
verts=v.findAt((((i-1)*span, 0.0, 0.0), ),)
myAssembly.Set(vertices=verts,name='Set-fix'+str(i))
region=myAssembly.sets['Set-fix'+str(i)]
myModel.DisplacementBC(name='BC-' + str(i), createStepName='beamStep',
region=region, u1=UNSET, u2=0.0, u3=UNSET, ur1=0.0, ur2=0.0, ur3=UNSET,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM,fieldName='',
localCsys=None)
#---------------------
#the end of the cBeam
v=myAssembly.instances['beamInstance1'].vertices
verts=v.findAt(((nSpan*span, 0.0, 0.0), ),)
myAssembly.Set(vertices=verts,name='Set-fix'+str(nSpan+1))
region=myAssembly.sets['Set-fix'+str(nSpan+1)]
myModel.DisplacementBC(name='BC-'+str(nSpan+1), createStepName='beamStep',
region=region, u1=UNSET, u2=0.0, u3=UNSET, ur1=0.0, ur2=0.0, ur3=UNSET,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM,fieldName='',
localCsys=None)
e = myBeamInstance1.edges
setGravity = myAssembly.Set(edges=e, name='Set4Gravity1')
Load1 = myModel.Gravity(name='Load-Gravity1',
createStepName='Step-Gravity', comp2=-1.0*Gravity, field='',
distributionType=UNIFORM, region=setGravity)
#-------------------------------------------------------
from mesh import *
# Assign an element type to the part instance.
#region = (myInstance.cells,)
#elemType = mesh.ElemType(elemCode=B31, elemLibrary=STANDARD)
#myAssembly.setElementType(regions=region, elemTypes=(elemType,))
# Seed the part instance.
myBeamPart1.seedPart(size=span/5,
deviationFactor=0.1, minSizeFactor=0.1)
#need:
#from abaqus import *
#from abaqusConstants import *
elemType1=mesh.ElemType(elemCode=B31)
pR=(myBeamPart1.edges,)
myBeamPart1.setElementType(regions=pR, elemTypes=(elemType1,))
# Mesh the part instance.
myBeamPart1.generateMesh()
#-------------------------------------------------------
myAssembly.regenerate()
#-------------------------------------------------------
from job import *
# Create an analysis job for the model and submit it.
jobName='cBeam'
myJob=mdb.Job(name=jobName, model='cBeamModel')
myJob.submit(consistencyChecking=OFF)
#-----------------------------------
from odbAccess import *
myJob.waitForCompletion()
#ms=myJob.messages[-1]
instanceName='beamInstance1'
stepName='Step-Gravity'
frame=1
x,y=[],[]
#if ms.type==JOB_COMPLETED:
odbPath=jobName+'.odb'
o=openOdb(path=odbPath,readOnly=True)
ns=o.rootAssembly.instances[instanceName.upper()].nodes
fop=o.steps[stepName].getFrame(frameValue=frame).fieldOutputs['U'].values
(u1, u2, u3) = fop[i].data
print str(u2)
with open("nu.txt","w") as f:
f.write(str(u2))
o.close()
#for i in range(len(ns)):
# (x1,y1,z1)=ns[i].coordinates
# (u1,u2)=fop[i].data
# x.append(x1)
# y.append(u2)
#o.close()
# Save by ldn
| |
# -*- coding: utf-8 -*-
"""
Common classes and methods for PyLTI module
"""
from __future__ import absolute_import
import logging
import oauth2
import oauth.oauth as oauth
from xml.etree import ElementTree as etree
log = logging.getLogger('pylti.common') # pylint: disable=invalid-name
LTI_PROPERTY_LIST = [
'oauth_consumer_key',
'launch_presentation_return_url',
'user_id',
'oauth_nonce',
'context_label',
'context_id',
'resource_link_title',
'resource_link_id',
'lis_person_contact_email_primary',
'lis_person_contact_emailprimary',
'lis_person_name_full',
'lis_person_name_family',
'lis_person_name_given',
'lis_result_sourcedid',
'lis_person_sourcedid',
'launch_type',
'lti_message',
'lti_version',
'roles',
'lis_outcome_service_url'
]
LTI_ROLES = {
u'staff': [u'Administrator', u'Instructor', ],
u'instructor': [u'Instructor', ],
u'administrator': [u'Administrator', ],
u'student': [u'Student', u'Learner', ]
# There is also a special role u'any' that ignores role check
}
LTI_SESSION_KEY = u'lti_authenticated'
LTI_REQUEST_TYPE = [u'any', u'initial', u'session']
class LTIOAuthDataStore(oauth.OAuthDataStore):
# pylint: disable=abstract-method
"""
Largely taken from reference implementation
for app engine at https://code.google.com/p/ims-dev/
"""
def __init__(self, consumers):
"""
Create OAuth store
"""
oauth.OAuthDataStore.__init__(self)
self.consumers = consumers
def lookup_consumer(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
secret = consumer.get('secret', None)
if not secret:
log.critical(('Consumer %s, is missing secret'
'in settings file, and needs correction.'), key)
return None
return oauth.OAuthConsumer(key, secret)
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""
Lookup nonce should check if nonce was already used
by this consumer in the past.
Reusing nonce is bad: http://cwe.mitre.org/data/definitions/323.html
Not implemented.
"""
return None
class LTIException(Exception):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTINotInSessionException(LTIException):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTIRoleException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
class LTIPostMessageException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
def _post_patched_request(consumers, lti_key, body,
url, method, content_type):
"""
Authorization header needs to be capitalized for some LTI clients
this function ensures that header is capitalized
:param body: body of the call
:param client: OAuth Client
:param url: outcome url
:return: response
"""
# pylint: disable=too-many-locals, too-many-arguments
oauth_store = LTIOAuthDataStore(consumers)
oauth_server = oauth.OAuthServer(oauth_store)
oauth_server.add_signature_method(oauth.OAuthSignatureMethod_HMAC_SHA1())
lti_consumer = oauth_store.lookup_consumer(lti_key)
lti_cert = oauth_store.lookup_cert(lti_key)
secret = lti_consumer.secret
consumer = oauth2.Consumer(key=lti_key, secret=secret)
client = oauth2.Client(consumer)
if lti_cert:
client.add_certificate(key=lti_cert, cert=lti_cert, domain='')
log.debug("cert %s", lti_cert)
import httplib2
http = httplib2.Http
# pylint: disable=protected-access
normalize = http._normalize_headers
def my_normalize(self, headers):
""" This function patches Authorization header """
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
log.debug("headers")
log.debug(headers)
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
url,
method,
body=body,
headers={'Content-Type': content_type})
http = httplib2.Http
# pylint: disable=protected-access
http._normalize_headers = monkey_patch_function
log.debug("key %s", lti_key)
log.debug("secret %s", secret)
log.debug("url %s", url)
log.debug("response %s", response)
log.debug("content %s", format(content))
return response, content
def post_message(consumers, lti_key, url, body):
"""
Posts a signed message to LTI consumer
:param consumers: consumers from config
:param lti_key: key to find appropriate consumer
:param url: post url
:param body: xml body
:return: success
"""
content_type = 'application/xml'
method = 'POST'
(_, content) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = "<imsx_codeMajor>success</imsx_codeMajor>" in content
log.debug("is success %s", is_success)
return is_success
def post_message2(consumers, lti_key, url, body,
method='POST', content_type='application/xml'):
"""
Posts a signed message to LTI consumer using LTI 2.0 format
:param: consumers: consumers from config
:param: lti_key: key to find appropriate consumer
:param: url: post url
:param: body: xml body
:return: success
"""
# pylint: disable=too-many-arguments
(response, _) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = response.status == 200
log.debug("is success %s", is_success)
return is_success
def verify_request_common(consumers, url, method, headers, params):
"""
Verifies that request is valid
:param consumers: consumers from config file
:param url: request url
:param method: request method
:param headers: request headers
:param params: request params
:return: is request valid
"""
log.debug("consumers %s", consumers)
log.debug("url %s", url)
log.debug("method %s", method)
log.debug("headers %s", headers)
log.debug("params %s", params)
oauth_store = LTIOAuthDataStore(consumers)
oauth_server = oauth.OAuthServer(oauth_store)
oauth_server.add_signature_method(
oauth.OAuthSignatureMethod_PLAINTEXT())
oauth_server.add_signature_method(
oauth.OAuthSignatureMethod_HMAC_SHA1())
# Check header for SSL before selecting the url
if headers.get('X-Forwarded-Proto', 'http') == 'https':
url = url.replace('http:', 'https:', 1)
oauth_request = oauth.OAuthRequest.from_request(
method,
url,
headers=dict(headers),
parameters=params
)
if not oauth_request:
log.info('Received non oauth request on oauth protected page')
raise LTIException('This page requires a valid oauth session '
'or request')
try:
# pylint: disable=protected-access
consumer = oauth_server._get_consumer(oauth_request)
oauth_server._check_signature(oauth_request, consumer, None)
except oauth.OAuthError:
# Rethrow our own for nice error handling (don't print
# error message as it will contain the key
raise LTIException("OAuth error: Please check your key and secret")
return True
def generate_request_xml(message_identifier_id, operation,
lis_result_sourcedid, score):
# pylint: disable=too-many-locals
"""
Generates LTI 1.1 XML for posting result to LTI consumer.
:param message_identifier_id:
:param operation:
:param lis_result_sourcedid:
:param score:
:return: XML string
"""
root = etree.Element(u'imsx_POXEnvelopeRequest',
xmlns=u'http://www.imsglobal.org/services/'
u'ltiv1p1/xsd/imsoms_v1p0')
header = etree.SubElement(root, 'imsx_POXHeader')
header_info = etree.SubElement(header, 'imsx_POXRequestHeaderInfo')
version = etree.SubElement(header_info, 'imsx_version')
version.text = 'V1.0'
message_identifier = etree.SubElement(header_info,
'imsx_messageIdentifier')
message_identifier.text = message_identifier_id
body = etree.SubElement(root, 'imsx_POXBody')
xml_request = etree.SubElement(body, '%s%s' % (operation, 'Request'))
record = etree.SubElement(xml_request, 'resultRecord')
guid = etree.SubElement(record, 'sourcedGUID')
sourcedid = etree.SubElement(guid, 'sourcedId')
sourcedid.text = lis_result_sourcedid
if score is not None:
result = etree.SubElement(record, 'result')
result_score = etree.SubElement(result, 'resultScore')
language = etree.SubElement(result_score, 'language')
language.text = 'en'
text_string = etree.SubElement(result_score, 'textString')
text_string.text = score.__str__()
ret = "<?xml version='1.0' encoding='utf-8'?>\n{}".format(
etree.tostring(root, encoding='utf-8'))
log.debug("XML Response: \n%s", ret)
return ret
| |
import py
from rpython.memory.gctransform.test.test_transform import rtype, rtype_and_transform, getops
from rpython.memory.gctransform.test.test_transform import LLInterpedTranformerTests
from rpython.memory.gctransform.refcounting import RefcountingGCTransformer
from rpython.rtyper.lltypesystem import lltype
from rpython.translator.translator import TranslationContext, graphof
from rpython.translator.c.gc import RefcountingGcPolicy
from rpython.conftest import option
class TestLLInterpedRefcounting(LLInterpedTranformerTests):
gcpolicy = RefcountingGcPolicy
def test_llinterp_refcounted_graph_with_del(self):
from rpython.annotator.model import SomeInteger
class D:
pass
delcounter = D()
delcounter.dels = 0
class C:
def __del__(self):
delcounter.dels += 1
c = C()
c.x = 1
def h(x):
if x:
return c
else:
d = C()
d.x = 2
return d
def g(x):
return h(x).x
def f(x):
r = g(x)
return r + delcounter.dels
llinterp, graph = self.llinterpreter_for_transformed_graph(f, [SomeInteger()])
res = llinterp.eval_graph(graph, [1])
assert res == 1
res = llinterp.eval_graph(graph, [0])
assert res == 3
def test_raw_instance_flavor(self):
# crashes for now because X() is not initialized with zeroes when
# it is allocated, but it's probably illegal to try to store
# references into a raw-malloced instance
py.test.skip("a probably-illegal test")
class State:
pass
state = State()
class Y:
def __del__(self):
state.freed_counter += 1
class X:
_alloc_flavor_ = 'raw'
def g():
x = X()
x.y = Y()
return x
def f():
from rpython.rlib.objectmodel import free_non_gc_object
state.freed_counter = 0
x = g()
assert state.freed_counter == 0
x.y = None
assert state.freed_counter == 1
free_non_gc_object(x)
# for now we have no automatic decref when free_non_gc_object() is
# called
llinterp, graph = self.llinterpreter_for_transformed_graph(f, [])
llinterp.eval_graph(graph, [])
def test_simple_barrier():
S = lltype.GcStruct("S", ('x', lltype.Signed))
T = lltype.GcStruct("T", ('s', lltype.Ptr(S)))
def f():
s1 = lltype.malloc(S)
s1.x = 1
s2 = lltype.malloc(S)
s2.x = 2
t = lltype.malloc(T)
t.s = s1
t.s = s2
return t
t, transformer = rtype_and_transform(f, [], RefcountingGCTransformer,
check=False)
graph = graphof(t, f)
ops = getops(graph)
assert len(ops['getfield']) == 2
assert len(ops['bare_setfield']) == 4
def test_arraybarrier():
S = lltype.GcStruct("S", ('x', lltype.Signed))
A = lltype.GcArray(lltype.Ptr(S))
def f():
s1 = lltype.malloc(S)
s1.x = 1
s2 = lltype.malloc(S)
s2.x = 2
a = lltype.malloc(A, 1)
a[0] = s1
a[0] = s2
t, transformer = rtype_and_transform(f, [], RefcountingGCTransformer,
check=False)
graph = graphof(t, f)
ops = getops(graph)
assert len(ops['getarrayitem']) == 2
assert len(ops['bare_setarrayitem']) == 2
assert len(ops['bare_setfield']) == 2
def make_deallocator(TYPE,
attr="static_deallocation_funcptr_for_type",
cls=RefcountingGCTransformer):
if TYPE._is_varsize():
def f():
return lltype.malloc(TYPE, 1)
else:
def f():
return lltype.malloc(TYPE)
t = TranslationContext()
t.buildannotator().build_types(f, [])
t.buildrtyper().specialize()
transformer = cls(t)
fptr = getattr(transformer, attr)(TYPE)
transformer.transform_graph(graphof(t, f))
transformer.finish(backendopt=False)
if option.view:
t.view()
if fptr:
return fptr._obj.graph, t
else:
return None, t
def test_deallocator_simple():
S = lltype.GcStruct("S", ('x', lltype.Signed))
dgraph, t = make_deallocator(S)
ops = []
for block in dgraph.iterblocks():
ops.extend([op for op in block.operations if op.opname != 'same_as']) # XXX
assert len(ops) == 1
op = ops[0]
assert op.opname == 'gc_free'
def test_deallocator_less_simple():
TPtr = lltype.Ptr(lltype.GcStruct("T", ('a', lltype.Signed)))
S = lltype.GcStruct(
"S",
('x', lltype.Signed),
('y', TPtr),
('z', TPtr),
)
dgraph, t = make_deallocator(S)
ops = getops(dgraph)
assert len(ops['direct_call']) == 2
assert len(ops['getfield']) == 2
assert len(ops['gc_free']) == 1
def test_deallocator_array():
TPtr = lltype.Ptr(lltype.GcStruct("T", ('a', lltype.Signed)))
GcA = lltype.GcArray(('x', TPtr), ('y', TPtr))
A = lltype.Array(('x', TPtr), ('y', TPtr))
APtr = lltype.Ptr(GcA)
S = lltype.GcStruct('S', ('t', TPtr), ('x', lltype.Signed), ('aptr', APtr),
('rest', A))
dgraph, t = make_deallocator(S)
ops = getops(dgraph)
assert len(ops['direct_call']) == 4
assert len(ops['getfield']) == 2
assert len(ops['getinteriorfield']) == 2
assert len(ops['getinteriorarraysize']) == 1
assert len(ops['gc_free']) == 1
def test_deallocator_with_destructor():
S = lltype.GcStruct("S", ('x', lltype.Signed), rtti=True)
def f(s):
s.x = 1
def type_info_S(p):
return lltype.getRuntimeTypeInfo(S)
qp = lltype.functionptr(lltype.FuncType([lltype.Ptr(S)],
lltype.Ptr(lltype.RuntimeTypeInfo)),
"type_info_S",
_callable=type_info_S)
dp = lltype.functionptr(lltype.FuncType([lltype.Ptr(S)],
lltype.Void),
"destructor_funcptr",
_callable=f)
pinf = lltype.attachRuntimeTypeInfo(S, qp, destrptr=dp)
graph, t = make_deallocator(S)
def test_caching_dynamic_deallocator():
S = lltype.GcStruct("S", ('x', lltype.Signed), rtti=True)
S1 = lltype.GcStruct("S1", ('s', S), ('y', lltype.Signed), rtti=True)
T = lltype.GcStruct("T", ('x', lltype.Signed), rtti=True)
def f_S(s):
s.x = 1
def f_S1(s1):
s1.s.x = 1
s1.y = 2
def f_T(s):
s.x = 1
def type_info_S(p):
return lltype.getRuntimeTypeInfo(S)
def type_info_T(p):
return lltype.getRuntimeTypeInfo(T)
qp = lltype.functionptr(lltype.FuncType([lltype.Ptr(S)],
lltype.Ptr(lltype.RuntimeTypeInfo)),
"type_info_S",
_callable=type_info_S)
dp = lltype.functionptr(lltype.FuncType([lltype.Ptr(S)],
lltype.Void),
"destructor_funcptr",
_callable=f_S)
pinf = lltype.attachRuntimeTypeInfo(S, qp, destrptr=dp)
dp = lltype.functionptr(lltype.FuncType([lltype.Ptr(S)],
lltype.Void),
"destructor_funcptr",
_callable=f_S1)
pinf = lltype.attachRuntimeTypeInfo(S1, qp, destrptr=dp)
qp = lltype.functionptr(lltype.FuncType([lltype.Ptr(T)],
lltype.Ptr(lltype.RuntimeTypeInfo)),
"type_info_S",
_callable=type_info_T)
dp = lltype.functionptr(lltype.FuncType([lltype.Ptr(T)],
lltype.Void),
"destructor_funcptr",
_callable=f_T)
pinf = lltype.attachRuntimeTypeInfo(T, qp, destrptr=dp)
def f():
pass
t = TranslationContext()
t.buildannotator().build_types(f, [])
t.buildrtyper().specialize()
transformer = RefcountingGCTransformer(t)
p_S = transformer.dynamic_deallocation_funcptr_for_type(S)
p_S1 = transformer.dynamic_deallocation_funcptr_for_type(S1)
p_T = transformer.dynamic_deallocation_funcptr_for_type(T)
assert p_S is not p_T
assert p_S is p_S1
def test_dynamic_deallocator():
class A(object):
pass
class B(A):
pass
def f(x):
a = A()
a.x = 1
b = B()
b.x = 2
b.y = 3
if x:
c = a
else:
c = b
return c.x
t, transformer = rtype_and_transform(
f, [int], RefcountingGCTransformer, check=False)
fgraph = graphof(t, f)
s_instance = t.annotator.bookkeeper.valueoftype(A)
TYPE = t.rtyper.getrepr(s_instance).lowleveltype.TO
p = transformer.dynamic_deallocation_funcptr_for_type(TYPE)
t.rtyper.specialize_more_blocks()
def test_recursive_structure():
F = lltype.GcForwardReference()
S = lltype.GcStruct('abc', ('x', lltype.Ptr(F)))
F.become(S)
def f():
s1 = lltype.malloc(S)
s2 = lltype.malloc(S)
s1.x = s2
t, transformer = rtype_and_transform(
f, [], RefcountingGCTransformer, check=False)
def test_dont_decref_nongc_pointers():
S = lltype.GcStruct('S',
('x', lltype.Ptr(lltype.Struct('T', ('x', lltype.Signed)))),
('y', lltype.Ptr(lltype.GcStruct('Y', ('x', lltype.Signed))))
)
def f():
pass
graph, t = make_deallocator(S)
ops = getops(graph)
assert len(ops['direct_call']) == 1
| |
#!/usr/bin/env python
# coding=utf-8
from binance.client import Client
import pytest
import requests_mock
client = Client("api_key", "api_secret")
def test_exact_amount():
"""Test Exact amount returned"""
first_available_res = [
[
1500004800000,
"0.00005000",
"0.00005300",
"0.00001000",
"0.00004790",
"663152.00000000",
1500004859999,
"30.55108144",
43,
"559224.00000000",
"25.65468144",
"83431971.04346950",
]
]
first_res = []
row = [
1519892340000,
"0.00099400",
"0.00099810",
"0.00099400",
"0.00099810",
"4806.04000000",
1519892399999,
"4.78553253",
154,
"1785.14000000",
"1.77837524",
"0",
]
for i in range(0, 500):
first_res.append(row)
second_res = []
with requests_mock.mock() as m:
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1&startTime=0&symbol=BNBBTC",
json=first_available_res,
)
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1000&startTime=1519862400000&symbol=BNBBTC",
json=first_res,
)
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1000&startTime=1519892400000&symbol=BNBBTC",
json=second_res,
)
klines = client.get_historical_klines(
symbol="BNBBTC", interval=Client.KLINE_INTERVAL_1MINUTE, start_str="1st March 2018"
)
assert len(klines) == 500
def test_start_and_end_str():
"""Test start_str and end_str work correctly with string"""
first_available_res = [
[
1500004800000,
"0.00005000",
"0.00005300",
"0.00001000",
"0.00004790",
"663152.00000000",
1500004859999,
"30.55108144",
43,
"559224.00000000",
"25.65468144",
"83431971.04346950",
]
]
first_res = []
row = [
1519892340000,
"0.00099400",
"0.00099810",
"0.00099400",
"0.00099810",
"4806.04000000",
1519892399999,
"4.78553253",
154,
"1785.14000000",
"1.77837524",
"0",
]
for i in range(0, 300):
first_res.append(row)
with requests_mock.mock() as m:
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1&startTime=0&symbol=BNBBTC",
json=first_available_res,
)
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1000&startTime=1519862400000&endTime=1519880400000&symbol=BNBBTC",
json=first_res,
)
klines = client.get_historical_klines(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str="1st March 2018",
end_str="1st March 2018 05:00:00",
)
assert len(klines) == 300
def test_start_and_end_timestamp():
"""Test start_str and end_str work correctly with integer timestamp"""
first_available_res = [
[
1500004800000,
"0.00005000",
"0.00005300",
"0.00001000",
"0.00004790",
"663152.00000000",
1500004859999,
"30.55108144",
43,
"559224.00000000",
"25.65468144",
"83431971.04346950",
]
]
first_res = []
row = [
1519892340000,
"0.00099400",
"0.00099810",
"0.00099400",
"0.00099810",
"4806.04000000",
1519892399999,
"4.78553253",
154,
"1785.14000000",
"1.77837524",
"0",
]
for i in range(0, 300):
first_res.append(row)
with requests_mock.mock() as m:
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1&startTime=0&symbol=BNBBTC",
json=first_available_res,
)
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1000&startTime=1519862400000&endTime=1519880400000&symbol=BNBBTC",
json=first_res,
)
klines = client.get_historical_klines(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str=1519862400000,
end_str=1519880400000,
)
assert len(klines) == 300
def test_historical_kline_generator():
"""Test kline historical generator"""
first_available_res = [
[
1500004800000,
"0.00005000",
"0.00005300",
"0.00001000",
"0.00004790",
"663152.00000000",
1500004859999,
"30.55108144",
43,
"559224.00000000",
"25.65468144",
"83431971.04346950",
]
]
first_res = []
row = [
1519892340000,
"0.00099400",
"0.00099810",
"0.00099400",
"0.00099810",
"4806.04000000",
1519892399999,
"4.78553253",
154,
"1785.14000000",
"1.77837524",
"0",
]
for i in range(0, 300):
first_res.append(row)
with requests_mock.mock() as m:
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1&startTime=0&symbol=BNBBTC",
json=first_available_res,
)
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1000&startTime=1519862400000&endTime=1519880400000&symbol=BNBBTC",
json=first_res,
)
klines = client.get_historical_klines_generator(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str=1519862400000,
end_str=1519880400000,
)
for i in range(300):
assert len(next(klines)) > 0
with pytest.raises(StopIteration):
next(klines)
def test_historical_kline_generator_empty_response():
"""Test kline historical generator if an empty list is returned from API"""
first_available_res = [
[
1500004800000,
"0.00005000",
"0.00005300",
"0.00001000",
"0.00004790",
"663152.00000000",
1500004859999,
"30.55108144",
43,
"559224.00000000",
"25.65468144",
"83431971.04346950",
]
]
first_res = []
with requests_mock.mock() as m:
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1&startTime=0&symbol=BNBBTC",
json=first_available_res,
)
m.get(
"https://api.binance.com/api/v3/klines?interval=1m&limit=1000&startTime=1519862400000&endTime=1519880400000&symbol=BNBBTC",
json=first_res,
)
klines = client.get_historical_klines_generator(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str=1519862400000,
end_str=1519880400000,
)
with pytest.raises(StopIteration):
next(klines)
| |
import os
import base64
import jinja2
import string
import sys
import urllib2
import urlparse
import xmlrpclib
from xos.config import Config
from core.models import Service, ServiceController, ServiceControllerResource, LoadableModule, LoadableModuleResource, XOS
from xos.logger import Logger, logging
from django.utils import timezone
logger = Logger(level=logging.INFO)
def add_unique(list, item):
if not item in list:
list.append(item)
class XOSBuilder(object):
UI_KINDS=["models", "admin", "admin_template", "django_library", "rest_service", "rest_tenant", "tosca_custom_types", "tosca_resource","public_key"]
SYNC_CONTROLLER_KINDS=["synchronizer", "private_key", "public_key"]
SYNC_ALLCONTROLLER_KINDS=["models", "django_library"]
def __init__(self):
self.source_sync_image = "xosproject/xos-synchronizer-openstack"
self.build_dir = "/opt/xos/BUILD/"
self.build_tainted = False
# stuff that has to do with downloading
def get_base_dest_dir(self, scr):
xos_base = "opt/xos"
service_name = scr.loadable_module.name
base_dirs = {"models": "%s/services/%s/" % (xos_base, service_name),
"admin": "%s/services/%s/" % (xos_base, service_name),
"admin_template": "%s/services/%s/templates/" % (xos_base, service_name),
"django_library": "%s/services/%s/" % (xos_base, service_name),
"synchronizer": "%s/synchronizers/%s/" % (xos_base, service_name),
"tosca_custom_types": "%s/tosca/custom_types/" % (xos_base),
"tosca_resource": "%s/tosca/resources/" % (xos_base),
"rest_service": "%s/api/service/" % (xos_base),
"rest_tenant": "%s/api/tenant/" % (xos_base),
"private_key": "%s/services/%s/keys/" % (xos_base, service_name),
"public_key": "%s/services/%s/keys/" % (xos_base, service_name)}
dest_dir = base_dirs[scr.kind]
return dest_dir
def get_dest_dir(self, scr):
dest_dir = self.get_base_dest_dir(scr)
if scr.subdirectory:
dest_dir = os.path.join(dest_dir, scr.subdirectory)
return dest_dir
def get_build_fn(self, scr):
dest_dir = self.get_dest_dir(scr)
dest_fn = os.path.split(urlparse.urlsplit(scr.full_url).path)[-1]
return os.path.join(dest_dir, dest_fn)
def get_download_fn(self, scr):
dest_fn = self.get_build_fn(scr)
return os.path.join(self.build_dir, dest_fn)
def read_manifest(self, scr, fn):
manifest = []
manifest_lines = file(fn).readlines()
manifest_lines = [x.strip() for x in manifest_lines]
manifest_lines = [x for x in manifest_lines if x]
for line in manifest_lines:
url_parts = urlparse.urlsplit(scr.full_url)
new_path = os.path.join(os.path.join(*os.path.split(url_parts.path)[:-1]),line)
url = urlparse.urlunsplit( (url_parts.scheme, url_parts.netloc, new_path, url_parts.query, url_parts.fragment) )
build_fn = os.path.join(self.get_dest_dir(scr), line)
download_fn = os.path.join(self.build_dir, build_fn)
manifest.append( (url, download_fn, build_fn) )
return manifest
def download_file(self, url, dest_fn):
logger.info("Download %s to %s" % (url, dest_fn))
if not os.path.exists(os.path.dirname(dest_fn)):
os.makedirs(os.path.dirname(dest_fn))
obj = urllib2.urlopen(url)
file(dest_fn,"w").write(obj.read())
# make python files executable
if dest_fn.endswith(".py"): # and contents.startswith("#!"):
os.chmod(dest_fn, 0755)
def download_resource(self, scr):
if scr.format == "manifest":
manifest_fn = self.get_download_fn(scr)
self.download_file(scr.full_url, manifest_fn)
manifest = self.read_manifest(scr, manifest_fn)
for (url, download_fn, build_fn) in manifest:
self.download_file(url, download_fn)
else:
self.download_file(scr.full_url, self.get_download_fn(scr))
# XXX docker creates a new container and commits it for every single COPY
# line in the dockerfile. This causes services with many files (for example,
# vsg) to take ~ 10-15 minutes to build the docker file. So instead we'll copy
# the whole build directory, and then run a script that copies the files
# we want.
# def get_docker_lines(self, scr):
# if scr.format == "manifest":
# manifest_fn = self.get_download_fn(scr)
# manifest = self.read_manifest(scr, manifest_fn)
# lines = []
# for (url, download_fn, build_fn) in manifest:
# script.append("mkdir -p
# #lines.append("COPY %s /%s" % (build_fn, build_fn))
# return lines
# else:
# build_fn = self.get_build_fn(scr)
# #return ["COPY %s /%s" % (build_fn, build_fn)]
# def get_controller_docker_lines(self, controller, kinds):
# need_service_init_py = False
# dockerfile=[]
# for scr in controller.loadable_module_resources.all():
# if scr.kind in kinds:
# lines = self.get_docker_lines(scr)
# dockerfile = dockerfile + lines
# if scr.kind in ["admin", "models"]:
# need_service_init_py = True
#
# if need_service_init_py:
# file(os.path.join(self.build_dir, "opt/xos/empty__init__.py"),"w").write("")
# dockerfile.append("COPY opt/xos/empty__init__.py /opt/xos/services/%s/__init__.py" % controller.name)
#
# return dockerfile
def get_script_lines(self, scr):
if scr.format == "manifest":
manifest_fn = self.get_download_fn(scr)
manifest = self.read_manifest(scr, manifest_fn)
lines = []
for (url, download_fn, build_fn) in manifest:
lines.append("mkdir -p /%s" % os.path.dirname(build_fn))
lines.append("cp /build/%s /%s" % (build_fn, build_fn))
return lines
else:
build_fn = self.get_build_fn(scr)
return ["mkdir -p /%s" % os.path.dirname(build_fn),
"cp /build/%s /%s" % (build_fn, build_fn)]
def get_controller_script_lines(self, controller, kinds):
need_service_init_py = False
script=[]
inits=[]
for scr in list(controller.loadable_module_resources.all()):
if not (scr.kind in kinds):
continue
# Check and see if the resource we're trying to install has
# disappeared. This may happen if the onboarding synchronizer
# container has been destroyed and restarted. In this case, flag
# the resource for re-download, and set the build_tainted bit
# so we can throw an exception after we've evaluated all
# resources.
download_fn = self.get_download_fn(scr)
if not os.path.exists(download_fn):
logger.info("File %s is missing; dirtying the resource" % download_fn)
scr.backend_status = "2 - download_fn is missing"
scr.updated = timezone.now()
scr.save(update_fields=['backend_status', 'updated'])
self.build_tainted = True
continue
lines = self.get_script_lines(scr)
script = script + lines
# compute the set of __init__.py files that we will need
if scr.kind in ["admin", "models", "rest_service", "rest_tenant"]:
dir = self.get_base_dest_dir(scr)
add_unique(inits, dir)
if scr.subdirectory:
for part in scr.subdirectory.split("/"):
dir = os.path.join(dir, part)
add_unique(inits, dir)
for init in inits:
script.append("echo > %s" % os.path.join("/",init,"__init__.py"))
return script
def check_controller_unready(self, controller):
unready_resources=[]
for scr in controller.loadable_module_resources.all():
if (not scr.backend_status) or (not scr.backend_status.startswith("1")):
unready_resources.append(scr)
return unready_resources
# stuff that has to do with building
def create_xos_app_data(self, name, script, app_list, migration_list):
if not os.path.exists(os.path.join(self.build_dir,"opt/xos/xos")):
os.makedirs(os.path.join(self.build_dir,"opt/xos/xos"))
if app_list:
script.append("mkdir -p /opt/xos/xos")
script.append("cp /build/opt/xos/xos/%s_xosbuilder_app_list /opt/xos/xos/xosbuilder_app_list" % name)
#dockerfile.append("COPY opt/xos/xos/%s_xosbuilder_app_list /opt/xos/xos/xosbuilder_app_list" % name)
file(os.path.join(self.build_dir, "opt/xos/xos/%s_xosbuilder_app_list") % name, "w").write("\n".join(app_list)+"\n")
if migration_list:
script.append("mkdir -p /opt/xos/xos")
script.append("cp /build/opt/xos/xos/%s_xosbuilder_migration_list /opt/xos/xos/xosbuilder_migration_list" % name)
#dockerfile.append("COPY opt/xos/xos/%s_xosbuilder_migration_list /opt/xos/xos/xosbuilder_migration_list" % name)
file(os.path.join(self.build_dir, "opt/xos/xos/%s_xosbuilder_migration_list") % name, "w").write("\n".join(migration_list)+"\n")
def create_ui_dockerfile(self):
self.build_tainted = False
xos = XOS.objects.all()[0]
dockerfile_fn = "Dockerfile.UI"
app_list = []
migration_list = []
dockerfile = ["FROM %s" % xos.source_ui_image]
script = []
for controller in LoadableModule.objects.all():
if self.check_controller_unready(controller):
logger.warning("Loadable Module %s has unready resources" % str(controller))
continue
#dockerfile = dockerfile + self.get_controller_docker_lines(controller, self.UI_KINDS)
script = script + self.get_controller_script_lines(controller, self.UI_KINDS)
if controller.loadable_module_resources.filter(kind="models").exists():
app_list.append("services." + controller.name)
migration_list.append(controller.name)
self.create_xos_app_data("ui", script, app_list, migration_list)
file(os.path.join(self.build_dir, "install-xos.sh"), "w").write("\n".join(script)+"\n")
dockerfile.append("COPY . /build/")
dockerfile.append("RUN bash /build/install-xos.sh")
file(os.path.join(self.build_dir, dockerfile_fn), "w").write("\n".join(dockerfile)+"\n")
if self.build_tainted:
raise Exception("Build was tainted due to errors")
return {"dockerfile_fn": dockerfile_fn,
"docker_image_name": "xosproject/xos-ui"}
def create_synchronizer_dockerfile(self, controller):
self.build_tainted = False
if not controller.loadable_module_resources.filter(kind="synchronizer").exists():
# it doesn't have a synchronizer, therefore it doesn't need a dockerfile
return None
# bake in the synchronizer from this controller
sync_lines = self.get_controller_script_lines(controller, self.SYNC_CONTROLLER_KINDS)
if self.build_tainted:
raise Exception("Build was tainted due to errors")
# If there's no sync_lines for this ServiceController, then it must not
# have a synchronizer.
if not sync_lines:
return None
dockerfile_fn = "Dockerfile.%s" % controller.name
dockerfile = ["FROM %s" % self.source_sync_image]
script = []
# Now bake in models from this controller as well as the others
# It's important to bake all services in, because some services'
# synchronizers may depend on models from another service.
app_list = []
for c in LoadableModule.objects.all():
script = script + self.get_controller_script_lines(c, self.SYNC_ALLCONTROLLER_KINDS)
if c.loadable_module_resources.filter(kind="models").exists():
app_list.append("services." + c.name)
self.create_xos_app_data(controller.name, script, app_list, None)
script = script + sync_lines
file(os.path.join(self.build_dir, "install-%s.sh" % controller.name), "w").write("\n".join(script)+"\n")
dockerfile.append("COPY . /build/")
dockerfile.append("RUN bash /build/install-%s.sh" % controller.name)
file(os.path.join(self.build_dir, dockerfile_fn), "w").write("\n".join(dockerfile)+"\n")
if self.build_tainted:
raise Exception("Build was tainted due to errors")
return {"dockerfile_fn": dockerfile_fn,
"docker_image_name": "xosproject/xos-synchronizer-%s" % controller.name}
def create_docker_compose(self):
xos = XOS.objects.all()[0]
volume_list = []
for volume in xos.volumes.all():
volume_list.append({"host_path": volume.host_path,
"container_path": volume.container_path,
"read_only": volume.read_only})
if xos.extra_hosts:
extra_hosts = [x.strip() for x in xos.extra_hosts.split(",")]
else:
extra_hosts = []
containers = {}
# containers["xos_db"] = \
# {"image": "xosproject/xos-postgres",
# "expose": [5432]}
containers["xos_ui"] = \
{"image": "xosproject/xos-ui",
"command": "python /opt/xos/manage.py runserver 0.0.0.0:%d --insecure --makemigrations" % xos.ui_port,
"ports": {"%d"%xos.ui_port : "%d"%xos.ui_port},
#"links": ["xos_db"],
"external_links": ["%s:%s" % (xos.db_container_name, "xos_db")],
"extra_hosts": extra_hosts,
"volumes": volume_list}
if xos.no_start:
containers["xos_ui"]["command"] = "sleep 864000"
# containers["xos_bootstrap_ui"] = {"image": "xosproject/xos",
# "command": "python /opt/xos/manage.py runserver 0.0.0.0:%d --insecure --makemigrations" % xos.bootstrap_ui_port,
# "ports": {"%d"%xos.bootstrap_ui_port : "%d"%xos.bootstrap_ui_port},
# #"external_links": ["%s:%s" % (xos.db_container_name, "xos_db")],
# "links": ["xos_db"],
# "volumes": volume_list}
if not xos.frontend_only:
for c in ServiceController.objects.all():
if self.check_controller_unready(c):
logger.warning("Controller %s has unready resources" % str(c))
continue
if c.loadable_module_resources.filter(kind="synchronizer").exists():
if c.synchronizer_run and c.synchronizer_config:
command = 'bash -c "sleep 120; cd /opt/xos/synchronizers/%s; python ./%s -C %s"' % (c.name, c.synchronizer_run, c.synchronizer_config)
else:
command = 'bash -c "sleep 120; cd /opt/xos/synchronizers/%s; bash ./run.sh"' % c.name
containers["xos_synchronizer_%s" % c.name] = \
{"image": "xosproject/xos-synchronizer-%s" % c.name,
"command": command,
"external_links": ["%s:%s" % (xos.db_container_name, "xos_db")],
"extra_hosts": extra_hosts,
"volumes": volume_list}
if c.no_start:
containers["xos_synchronizer_%s" % c.name]["command"] = "sleep 864000"
vars = { "containers": containers }
template_loader = jinja2.FileSystemLoader( "/opt/xos/synchronizers/onboarding/templates/" )
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template("docker-compose.yml.j2")
buffer = template.render(vars)
if not os.path.exists("/opt/xos/synchronizers/onboarding/docker-compose"):
os.makedirs("/opt/xos/synchronizers/onboarding/docker-compose")
file("/opt/xos/synchronizers/onboarding/docker-compose/docker-compose.yml", "w").write(buffer)
# def build_xos(self):
# dockerfiles=[]
# dockerfiles.append(self.create_ui_dockerfile())
#
# for controller in ServiceController.objects.all():
# dockerfiles.append(self.create_synchronizer_dockerfile(controller))
| |
# Copyright 2012 NTT Data. All Rights Reserved.
# Copyright 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import tempfile
import ddt
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import fileutils
import six
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.unit import fake_instance
from nova.tests import uuidsentinel as uuids
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
@ddt.ddt
class LibvirtUtilsTestCase(test.NoDBTestCase):
@mock.patch('nova.utils.execute')
def test_copy_image_local(self, mock_execute):
libvirt_utils.copy_image('src', 'dest')
mock_execute.assert_called_once_with('cp', '-r', 'src', 'dest')
@mock.patch('nova.virt.libvirt.volume.remotefs.SshDriver.copy_file')
def test_copy_image_remote_ssh(self, mock_rem_fs_remove):
self.flags(remote_filesystem_transport='ssh', group='libvirt')
libvirt_utils.copy_image('src', 'dest', host='host')
mock_rem_fs_remove.assert_called_once_with('src', 'host:dest',
on_completion=None, on_execute=None, compression=True)
@mock.patch('nova.virt.libvirt.volume.remotefs.RsyncDriver.copy_file')
def test_copy_image_remote_rsync(self, mock_rem_fs_remove):
self.flags(remote_filesystem_transport='rsync', group='libvirt')
libvirt_utils.copy_image('src', 'dest', host='host')
mock_rem_fs_remove.assert_called_once_with('src', 'host:dest',
on_completion=None, on_execute=None, compression=True)
@mock.patch('os.path.exists', return_value=True)
def test_disk_type_from_path(self, mock_exists):
# Seems like lvm detection
# if its in /dev ??
for p in ['/dev/b', '/dev/blah/blah']:
d_type = libvirt_utils.get_disk_type_from_path(p)
self.assertEqual('lvm', d_type)
# Try rbd detection
d_type = libvirt_utils.get_disk_type_from_path('rbd:pool/instance')
self.assertEqual('rbd', d_type)
# Try the other types
path = '/myhome/disk.config'
d_type = libvirt_utils.get_disk_type_from_path(path)
self.assertIsNone(d_type)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isdir', return_value=True)
def test_disk_type_ploop(self, mock_isdir, mock_exists):
path = '/some/path'
d_type = libvirt_utils.get_disk_type_from_path(path)
mock_isdir.assert_called_once_with(path)
mock_exists.assert_called_once_with("%s/DiskDescriptor.xml" % path)
self.assertEqual('ploop', d_type)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_disk_backing(self, mock_execute, mock_exists):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: 2K (2048 bytes)
cluster_size: 65536
disk size: 96K
"""
output = template_output % ({
'path': path,
})
mock_execute.return_value = (output, '')
d_backing = libvirt_utils.get_disk_backing_file(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path,
prlimit=images.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertIsNone(d_backing)
def _test_disk_size(self, mock_execute, path, expected_size):
d_size = libvirt_utils.get_disk_size(path)
self.assertEqual(expected_size, d_size)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path,
prlimit=images.QEMU_IMG_LIMITS)
@mock.patch('os.path.exists', return_value=True)
def test_disk_size(self, mock_exists):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: %(v_size)s (%(vsize_b)s bytes)
cluster_size: 65536
disk size: 96K
"""
for i in range(0, 128):
bytes = i * 65336
kbytes = bytes / 1024
mbytes = kbytes / 1024
output = template_output % ({
'v_size': "%sM" % (mbytes),
'vsize_b': i,
'path': path,
})
with mock.patch('nova.utils.execute',
return_value=(output, '')) as mock_execute:
self._test_disk_size(mock_execute, path, i)
output = template_output % ({
'v_size': "%sK" % (kbytes),
'vsize_b': i,
'path': path,
})
with mock.patch('nova.utils.execute',
return_value=(output, '')) as mock_execute:
self._test_disk_size(mock_execute, path, i)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_info_canon(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path,
prlimit=images.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_info_canon2(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: QCOW2
virtual size: 67108844
cluster_size: 65536
disk size: 963434
backing file: /var/lib/nova/a328c7998805951a_2
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path,
prlimit=images.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('qcow2', image_info.file_format)
self.assertEqual(67108844, image_info.virtual_size)
self.assertEqual(963434, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
self.assertEqual('/var/lib/nova/a328c7998805951a_2',
image_info.backing_file)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_info_ploop(self, mock_execute, mock_isdir, mock_exists):
path = "/var/lib/nova"
example_output = """image: root.hds
file format: parallels
virtual size: 3.0G (3221225472 bytes)
disk size: 706M
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info',
os.path.join(path, 'root.hds'),
prlimit=images.QEMU_IMG_LIMITS)
mock_isdir.assert_called_once_with(path)
self.assertEqual(2, mock_exists.call_count)
self.assertEqual(path, mock_exists.call_args_list[0][0][0])
self.assertEqual(os.path.join(path, 'DiskDescriptor.xml'),
mock_exists.call_args_list[1][0][0])
self.assertEqual('root.hds', image_info.image)
self.assertEqual('parallels', image_info.file_format)
self.assertEqual(3221225472, image_info.virtual_size)
self.assertEqual(740294656, image_info.disk_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_backing_file_actual(self,
mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path,
prlimit=images.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(1, len(image_info.snapshots))
self.assertEqual('/b/3a988059e51a_2',
image_info.backing_file)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_info_convert(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
junk stuff: bbb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path,
prlimit=images.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_info_snaps(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path,
prlimit=images.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(3, len(image_info.snapshots))
def test_valid_hostname_normal(self):
self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com"))
def test_valid_hostname_ipv4addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("10.0.2.1"))
def test_valid_hostname_ipv6addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("240:2ac3::2"))
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
@mock.patch('nova.utils.execute')
def test_create_image(self, mock_execute):
libvirt_utils.create_image('raw', '/some/path', '10G')
libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
expected_args = [(('qemu-img', 'create', '-f', 'raw',
'/some/path', '10G'),),
(('qemu-img', 'create', '-f', 'qcow2',
'/some/stuff', '1234567891234'),)]
self.assertEqual(expected_args, mock_execute.call_args_list)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_create_cow_image(self, mock_execute, mock_exists):
mock_execute.return_value = ('stdout', None)
libvirt_utils.create_cow_image('/some/path', '/the/new/cow')
expected_args = [(('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', '/some/path'),
{'prlimit': images.QEMU_IMG_LIMITS}),
(('qemu-img', 'create', '-f', 'qcow2',
'-o', 'backing_file=/some/path',
'/the/new/cow'),)]
self.assertEqual(expected_args, mock_execute.call_args_list)
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
'default_eph_format': None,
'expected_fs_type': 'some_fs_type'},
{'fs_type': None,
'default_eph_format': None,
'expected_fs_type': disk.FS_FORMAT_EXT4},
{'fs_type': None,
'default_eph_format': 'eph_format',
'expected_fs_type': 'eph_format'})
def test_create_ploop_image(self, fs_type,
default_eph_format,
expected_fs_type):
with mock.patch('nova.utils.execute') as mock_execute:
self.flags(default_ephemeral_format=default_eph_format)
libvirt_utils.create_ploop_image('expanded', '/some/path',
'5G', fs_type)
mock_execute.assert_has_calls([
mock.call('mkdir', '-p', '/some/path'),
mock.call('ploop', 'init', '-s', '5G',
'-f', 'expanded', '-t', expected_fs_type,
'/some/path/root.hds',
run_as_root=True, check_exit_code=True),
mock.call('chmod', '-R', 'a+r', '/some/path',
run_as_root=True, check_exit_code=True)])
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
# NOTE(aloga): Xen is tested in test_pick_disk_driver_name_xen
version = 1005001
for (virt_type, checks) in type_map.items():
self.flags(virt_type=virt_type, group='libvirt')
for (is_block_dev, expected_result) in checks:
result = libvirt_utils.pick_disk_driver_name(version,
is_block_dev)
self.assertEqual(result, expected_result)
@mock.patch('nova.utils.execute')
def test_pick_disk_driver_name_xen(self, mock_execute):
def side_effect(*args, **kwargs):
if args == ('tap-ctl', 'check'):
if mock_execute.blktap is True:
return ('ok\n', '')
elif mock_execute.blktap is False:
return ('some error\n', '')
else:
raise OSError(2, "No such file or directory")
elif args == ('xend', 'status'):
if mock_execute.xend is True:
return ('', '')
elif mock_execute.xend is False:
raise processutils.ProcessExecutionError("error")
else:
raise OSError(2, "No such file or directory")
raise Exception('Unexpected call')
mock_execute.side_effect = side_effect
self.flags(virt_type="xen", group='libvirt')
versions = [4000000, 4001000, 4002000, 4003000, 4005000]
for version in versions:
# block dev
result = libvirt_utils.pick_disk_driver_name(version, True)
self.assertEqual(result, "phy")
self.assertFalse(mock_execute.called)
mock_execute.reset_mock()
# file dev
for blktap in True, False, None:
mock_execute.blktap = blktap
for xend in True, False, None:
mock_execute.xend = xend
result = libvirt_utils.pick_disk_driver_name(version,
False)
# qemu backend supported only by libxl which is
# production since xen 4.2. libvirt use libxl if
# xend service not started.
if version >= 4002000 and xend is not True:
self.assertEqual(result, 'qemu')
elif blktap:
if version == 4000000:
self.assertEqual(result, 'tap')
else:
self.assertEqual(result, 'tap2')
else:
self.assertEqual(result, 'file')
# default is_block_dev False
self.assertEqual(result,
libvirt_utils.pick_disk_driver_name(version))
mock_execute.reset_mock()
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_get_disk_size(self, mock_execute, mock_exists):
path = '/some/path'
example_output = """image: 00000001
file format: raw
virtual size: 4.4M (4592640 bytes)
disk size: 4.4M
"""
mock_execute.return_value = (example_output, '')
self.assertEqual(4592640, disk.get_disk_size('/some/path'))
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path,
prlimit=images.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
def test_copy_image(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
src_fd, src_path = tempfile.mkstemp()
try:
with os.fdopen(src_fd, 'w') as fp:
fp.write('canary')
libvirt_utils.copy_image(src_path, dst_path)
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'canary')
finally:
os.unlink(src_path)
finally:
os.unlink(dst_path)
def test_write_to_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_write_to_file_with_umask(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
os.unlink(dst_path)
libvirt_utils.write_to_file(dst_path, 'hello', umask=0o277)
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
mode = os.stat(dst_path).st_mode
self.assertEqual(mode & 0o277, 0)
finally:
os.unlink(dst_path)
@mock.patch.object(utils, 'execute')
def test_chown(self, mock_execute):
libvirt_utils.chown('/some/path', 'soren')
mock_execute.assert_called_once_with('chown', 'soren', '/some/path',
run_as_root=True)
@mock.patch.object(utils, 'execute')
def test_chown_for_id_maps(self, mock_execute):
id_maps = [vconfig.LibvirtConfigGuestUIDMap(),
vconfig.LibvirtConfigGuestUIDMap(),
vconfig.LibvirtConfigGuestGIDMap(),
vconfig.LibvirtConfigGuestGIDMap()]
id_maps[0].target = 10000
id_maps[0].count = 2000
id_maps[1].start = 2000
id_maps[1].target = 40000
id_maps[1].count = 2000
id_maps[2].target = 10000
id_maps[2].count = 2000
id_maps[3].start = 2000
id_maps[3].target = 40000
id_maps[3].count = 2000
libvirt_utils.chown_for_id_maps('/some/path', id_maps)
execute_args = ('nova-idmapshift', '-i',
'-u', '0:10000:2000,2000:40000:2000',
'-g', '0:10000:2000,2000:40000:2000',
'/some/path')
mock_execute.assert_called_once_with(*execute_args, run_as_root=True)
def _do_test_extract_snapshot(self, mock_execute, src_format='qcow2',
dest_format='raw', out_format='raw'):
libvirt_utils.extract_snapshot('/path/to/disk/image', src_format,
'/extracted/snap', dest_format)
qemu_img_cmd = ('qemu-img', 'convert', '-f',
src_format, '-O', out_format)
if CONF.libvirt.snapshot_compression and dest_format == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += ('/path/to/disk/image', '/extracted/snap')
mock_execute.assert_called_once_with(*qemu_img_cmd)
@mock.patch.object(utils, 'execute')
def test_extract_snapshot_raw(self, mock_execute):
self._do_test_extract_snapshot(mock_execute)
@mock.patch.object(utils, 'execute')
def test_extract_snapshot_iso(self, mock_execute):
self._do_test_extract_snapshot(mock_execute, dest_format='iso')
@mock.patch.object(utils, 'execute')
def test_extract_snapshot_qcow2(self, mock_execute):
self._do_test_extract_snapshot(mock_execute,
dest_format='qcow2', out_format='qcow2')
@mock.patch.object(utils, 'execute')
def test_extract_snapshot_qcow2_and_compression(self, mock_execute):
self.flags(snapshot_compression=True, group='libvirt')
self._do_test_extract_snapshot(mock_execute,
dest_format='qcow2', out_format='qcow2')
@mock.patch.object(utils, 'execute')
def test_extract_snapshot_parallels(self, mock_execute):
self._do_test_extract_snapshot(mock_execute,
src_format='raw',
dest_format='ploop',
out_format='parallels')
def test_load_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
self.assertEqual(libvirt_utils.load_file(dst_path), 'hello')
finally:
os.unlink(dst_path)
def test_file_open(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
with libvirt_utils.file_open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_get_fs_info(self):
class FakeStatResult(object):
def __init__(self):
self.f_bsize = 4096
self.f_frsize = 4096
self.f_blocks = 2000
self.f_bfree = 1000
self.f_bavail = 900
self.f_files = 2000
self.f_ffree = 1000
self.f_favail = 900
self.f_flag = 4096
self.f_namemax = 255
self.path = None
def fake_statvfs(path):
self.path = path
return FakeStatResult()
self.stub_out('os.statvfs', fake_statvfs)
fs_info = libvirt_utils.get_fs_info('/some/file/path')
self.assertEqual('/some/file/path', self.path)
self.assertEqual(8192000, fs_info['total'])
self.assertEqual(3686400, fs_info['free'])
self.assertEqual(4096000, fs_info['used'])
@mock.patch('nova.virt.images.fetch_to_raw')
def test_fetch_image(self, mock_images):
context = 'opaque context'
target = '/tmp/targetfile'
image_id = '4'
libvirt_utils.fetch_image(context, target, image_id)
mock_images.assert_called_once_with(
context, image_id, target)
@mock.patch('nova.virt.images.fetch')
def test_fetch_initrd_image(self, mock_images):
_context = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
target = '/tmp/targetfile'
image_id = '4'
libvirt_utils.fetch_raw_image(_context, target, image_id)
mock_images.assert_called_once_with(
_context, image_id, target)
def test_fetch_raw_image(self):
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
def fake_rename(old, new):
self.executes.append(('mv', old, new))
def fake_unlink(path):
self.executes.append(('rm', path))
def fake_rm_on_error(path, remove=None):
self.executes.append(('rm', '-f', path))
def fake_qemu_img_info(path):
class FakeImgInfo(object):
pass
file_format = path.split('.')[-1]
if file_format == 'part':
file_format = path.split('.')[-2]
elif file_format == 'converted':
file_format = 'raw'
if 'backing' in path:
backing_file = 'backing'
else:
backing_file = None
FakeImgInfo.file_format = file_format
FakeImgInfo.backing_file = backing_file
FakeImgInfo.virtual_size = 1
return FakeImgInfo()
self.stub_out('nova.utils.execute', fake_execute)
self.stub_out('os.rename', fake_rename)
self.stub_out('os.unlink', fake_unlink)
self.stub_out('nova.virt.images.fetch', lambda *_, **__: None)
self.stub_out('nova.virt.images.qemu_img_info', fake_qemu_img_info)
self.stub_out('oslo_utils.fileutils.delete_if_exists',
fake_rm_on_error)
# Since the remove param of fileutils.remove_path_on_error()
# is initialized at load time, we must provide a wrapper
# that explicitly resets it to our fake delete_if_exists()
old_rm_path_on_error = fileutils.remove_path_on_error
f = functools.partial(old_rm_path_on_error, remove=fake_rm_on_error)
self.stub_out('oslo_utils.fileutils.remove_path_on_error', f)
context = 'opaque context'
image_id = '4'
target = 't.qcow2'
self.executes = []
expected_commands = [('qemu-img', 'convert', '-O', 'raw',
'-f', 'qcow2',
't.qcow2.part', 't.qcow2.converted'),
('rm', 't.qcow2.part'),
('mv', 't.qcow2.converted', 't.qcow2')]
images.fetch_to_raw(context, image_id, target)
self.assertEqual(self.executes, expected_commands)
target = 't.raw'
self.executes = []
expected_commands = [('mv', 't.raw.part', 't.raw')]
images.fetch_to_raw(context, image_id, target)
self.assertEqual(self.executes, expected_commands)
target = 'backing.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'backing.qcow2.part')]
self.assertRaises(exception.ImageUnacceptable,
images.fetch_to_raw, context, image_id, target)
self.assertEqual(self.executes, expected_commands)
del self.executes
def test_get_disk_backing_file(self):
with_actual_path = False
def fake_execute(*args, **kwargs):
if with_actual_path:
return ("some: output\n"
"backing file: /foo/bar/baz (actual path: /a/b/c)\n"
"...: ...\n"), ''
else:
return ("some: output\n"
"backing file: /foo/bar/baz\n"
"...: ...\n"), ''
def return_true(*args, **kwargs):
return True
self.stub_out('nova.utils.execute', fake_execute)
self.stub_out('os.path.exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
with_actual_path = True
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'c')
def test_get_instance_path_at_destination(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid=uuids.instance)
migrate_data = None
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
expected_path = os.path.join(CONF.instances_path, instance['uuid'])
self.assertEqual(expected_path, inst_path_at_dest)
migrate_data = {}
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
expected_path = os.path.join(CONF.instances_path, instance['uuid'])
self.assertEqual(expected_path, inst_path_at_dest)
migrate_data = objects.LibvirtLiveMigrateData(
instance_relative_path='fake_relative_path')
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
expected_path = os.path.join(CONF.instances_path, 'fake_relative_path')
self.assertEqual(expected_path, inst_path_at_dest)
def test_get_arch(self):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'architecture': "X86_64"}})
image_arch = libvirt_utils.get_arch(image_meta)
self.assertEqual(obj_fields.Architecture.X86_64, image_arch)
def test_update_mtime_error(self):
with mock.patch.object(libvirt_utils, 'execute',
side_effect=processutils.ProcessExecutionError):
with mock.patch.object(libvirt_utils.LOG, 'warning') as mock_log:
libvirt_utils.update_mtime(mock.sentinel.path)
self.assertTrue(mock_log.called)
def test_is_mounted(self):
mount_path = "/var/lib/nova/mnt"
source = "192.168.0.1:/nova"
proc_with_mnt = """/dev/sda3 / xfs rw,seclabel,attr2,inode64 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
192.168.0.1:/nova /var/lib/nova/mnt nfs4 rw,relatime,vers=4.1
"""
proc_wrong_mnt = """/dev/sda3 / xfs rw,seclabel,attr2,inode64 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
192.168.0.2:/nova /var/lib/nova/mnt nfs4 rw,relatime,vers=4.1
"""
proc_without_mnt = """/dev/sda3 / xfs rw,seclabel,,attr2,inode64 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
"""
with mock.patch.object(os.path, 'ismount') as mock_ismount:
# is_mounted(mount_path) with no source is equivalent to
# os.path.ismount(mount_path)
mock_ismount.return_value = False
self.assertFalse(libvirt_utils.is_mounted(mount_path))
mock_ismount.return_value = True
self.assertTrue(libvirt_utils.is_mounted(mount_path))
# Source is given, and matches source in /proc/mounts
proc_mnt = mock.mock_open(read_data=proc_with_mnt)
with mock.patch.object(six.moves.builtins, "open", proc_mnt):
self.assertTrue(libvirt_utils.is_mounted(mount_path, source))
# Source is given, and doesn't match source in /proc/mounts
proc_mnt = mock.mock_open(read_data=proc_wrong_mnt)
with mock.patch.object(six.moves.builtins, "open", proc_mnt):
self.assertFalse(libvirt_utils.is_mounted(mount_path, source))
# Source is given, and mountpoint isn't present in /proc/mounts
# Note that this shouldn't occur, as os.path.ismount should have
# previously returned False in this case.
proc_umnt = mock.mock_open(read_data=proc_without_mnt)
with mock.patch.object(six.moves.builtins, "open", proc_umnt):
self.assertFalse(libvirt_utils.is_mounted(mount_path, source))
| |
import os
import tempfile
import shutil
from django.test import TransactionTestCase
from django.db import IntegrityError
from django.contrib.auth.models import Group
from django.core.files.uploadedfile import UploadedFile
from django.core.exceptions import ValidationError
from rest_framework.exceptions import ValidationError as DRF_ValidationError
from hs_core.testing import MockIRODSTestCaseMixin
from hs_core import hydroshare
from hs_core.models import Coverage
from hs_core.hydroshare.utils import resource_post_create_actions, \
get_resource_file_name_and_extension
from hs_core.views.utils import remove_folder, move_or_rename_file_or_folder
from hs_file_types.models import GeoRasterLogicalFile, GeoRasterFileMetaData, GenericLogicalFile
from utils import assert_raster_file_type_metadata
from hs_geo_raster_resource.models import OriginalCoverage, CellInformation, BandInformation
class RasterFileTypeMetaData(MockIRODSTestCaseMixin, TransactionTestCase):
def setUp(self):
super(RasterFileTypeMetaData, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title='Test Raster File Metadata'
)
self.temp_dir = tempfile.mkdtemp()
self.raster_file_name = 'small_logan.tif'
self.raster_zip_file_name = 'logan_vrt_small.zip'
self.invalid_raster_file_name = 'raster_tif_invalid.tif'
self.invalid_raster_zip_file_name = 'bad_small_vrt.zip'
self.raster_file = 'hs_file_types/tests/{}'.format(self.raster_file_name)
self.raster_zip_file = 'hs_file_types/tests/{}'.format(self.raster_zip_file_name)
self.invalid_raster_file = 'hs_file_types/tests/{}'.format(self.invalid_raster_file_name)
self.invalid_raster_zip_file = 'hs_file_types/tests/{}'.format(
self.invalid_raster_zip_file_name)
target_temp_raster_file = os.path.join(self.temp_dir, self.raster_file_name)
shutil.copy(self.raster_file, target_temp_raster_file)
self.raster_file_obj = open(target_temp_raster_file, 'r')
target_temp_raster_file = os.path.join(self.temp_dir, self.raster_zip_file_name)
shutil.copy(self.raster_zip_file, target_temp_raster_file)
self.raster_zip_file_obj = open(target_temp_raster_file, 'r')
target_temp_raster_file = os.path.join(self.temp_dir, self.invalid_raster_file_name)
shutil.copy(self.invalid_raster_file, target_temp_raster_file)
self.invalid_raster_file_obj = open(target_temp_raster_file, 'r')
target_temp_raster_file = os.path.join(self.temp_dir, self.invalid_raster_zip_file_name)
shutil.copy(self.invalid_raster_zip_file, target_temp_raster_file)
self.invalid_raster_zip_file_obj = open(target_temp_raster_file, 'r')
def tearDown(self):
super(RasterFileTypeMetaData, self).tearDown()
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_tif_set_file_type_to_geo_raster(self):
# here we are using a valid raster tif file for setting it
# to Geo Raster file type which includes metadata extraction
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# check that there is one GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 1)
# set the tif file to GeoRasterFile type
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test extracted raster file type metadata
assert_raster_file_type_metadata(self)
# there should not be any file level keywords at this point
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertTrue(isinstance(logical_file, GeoRasterLogicalFile))
self.assertTrue(logical_file.metadata, GeoRasterFileMetaData)
# TODO: not sure why there would be file level keywords - commented out as the test is
# failing
# self.assertEqual(logical_file.metadata.keywords, [])
self.composite_resource.delete()
def test_zip_set_file_type_to_geo_raster(self):
# here we are using a valid raster zip file for setting it
# to Geo Raster file type which includes metadata extraction
self.raster_file_obj = open(self.raster_zip_file, 'r')
self._create_composite_resource()
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# check that the resource file is not associated with any logical file
# self.assertEqual(res_file.has_logical_file, False)
# set the zip file to GeoRasterFile type
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test the resource now has 3 files (one vrt file and 2 tif files)
self.assertEqual(self.composite_resource.files.all().count(), 3)
tif_files = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')
self.assertEqual(len(tif_files), 2)
vrt_files = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.vrt')
self.assertEqual(len(vrt_files), 1)
# check that the logicalfile is associated with 3 files
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.dataset_name, 'logan_vrt_small')
self.assertEqual(logical_file.has_metadata, True)
self.assertEqual(logical_file.files.all().count(), 3)
self.assertEqual(set(self.composite_resource.files.all()),
set(logical_file.files.all()))
# check that we put the 3 files in a new folder (small_logan)
for res_file in self.composite_resource.files.all():
file_path, base_file_name, _ = get_resource_file_name_and_extension(res_file)
expected_file_path = "{}/data/contents/logan_vrt_small/{}"
expected_file_path = expected_file_path.format(self.composite_resource.short_id,
base_file_name)
self.assertEqual(file_path, expected_file_path)
# check that there is no GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# test that size property of the logical file is equal to sun of size of all files
# that are part of the logical file
self.assertEqual(logical_file.size, sum([f.size for f in logical_file.files.all()]))
# test extracted metadata for the file type
# geo raster file type should have all the metadata elements
self.assertEqual(logical_file.metadata.has_all_required_elements(), True)
# there should be 1 coverage element - box type
self.assertNotEqual(logical_file.metadata.spatial_coverage, None)
self.assertEqual(logical_file.metadata.spatial_coverage.type, 'box')
box_coverage = logical_file.metadata.spatial_coverage
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 42.050028785767275)
self.assertEqual(box_coverage.value['eastlimit'], -111.5773750264389)
self.assertEqual(box_coverage.value['southlimit'], 41.98745777902698)
self.assertEqual(box_coverage.value['westlimit'], -111.65768822411239)
# testing extended metadata element: original coverage
ori_coverage = logical_file.metadata.originalCoverage
self.assertNotEqual(ori_coverage, None)
self.assertEqual(ori_coverage.value['northlimit'], 4655492.446916306)
self.assertEqual(ori_coverage.value['eastlimit'], 452174.01909127034)
self.assertEqual(ori_coverage.value['southlimit'], 4648592.446916306)
self.assertEqual(ori_coverage.value['westlimit'], 445574.01909127034)
self.assertEqual(ori_coverage.value['units'], 'meter')
self.assertEqual(ori_coverage.value['projection'],
'NAD83 / UTM zone 12N')
# testing extended metadata element: cell information
cell_info = logical_file.metadata.cellInformation
self.assertEqual(cell_info.rows, 230)
self.assertEqual(cell_info.columns, 220)
self.assertEqual(cell_info.cellSizeXValue, 30.0)
self.assertEqual(cell_info.cellSizeYValue, 30.0)
self.assertEqual(cell_info.cellDataType, 'Float32')
# testing extended metadata element: band information
self.assertEqual(logical_file.metadata.bandInformations.count(), 1)
band_info = logical_file.metadata.bandInformations.first()
self.assertEqual(band_info.noDataValue, '-3.40282346639e+38')
self.assertEqual(band_info.maximumValue, '2880.00708008')
self.assertEqual(band_info.minimumValue, '2274.95898438')
self.composite_resource.delete()
def test_set_file_type_to_geo_raster_invalid_file_1(self):
# here we are using an invalid raster tif file for setting it
# to Geo Raster file type which should fail
self.raster_file_obj = open(self.invalid_raster_file, 'r')
self._create_composite_resource()
self._test_invalid_file()
self.composite_resource.delete()
def test_set_file_type_to_geo_raster_invalid_file_2(self):
# here we are using a raster tif file for setting it
# to Geo Raster file type which already been previously set to this file type - should fail
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with generic logical file
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# set tif file to GeoRasterFileType
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# check that the resource file is associated with a logical file
res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.tif')[0]
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GeoRasterLogicalFile")
# trying to set this tif file again to geo raster file type should raise
# ValidationError
with self.assertRaises(ValidationError):
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
self.composite_resource.delete()
def test_set_file_type_to_geo_raster_invalid_file_3(self):
# here we are using an invalid raster zip file for setting it
# to Geo Raster file type - should fail
self.raster_file_obj = open(self.invalid_raster_zip_file, 'r')
self._create_composite_resource()
self._test_invalid_file()
self.composite_resource.delete()
def test_metadata_CRUD(self):
# this is test metadata related to GeoRasterLogicalFile
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# extract metadata by setting to geo raster file type
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
res_file = self.composite_resource.files.first()
# test that we can update raster specific metadata at the file level
# test that we can update dataset_name of the logical file object
logical_file = res_file.logical_file
self.assertEqual(logical_file.dataset_name, 'small_logan')
logical_file.dataset_name = "big_logan"
logical_file.save()
logical_file = res_file.logical_file
self.assertEqual(logical_file.dataset_name, 'big_logan')
# delete default original coverage metadata
self.assertNotEquals(logical_file.metadata.originalCoverage, None)
logical_file.metadata.originalCoverage.delete()
# create new original coverage metadata with meaningful value
value = {"northlimit": 12, "projection": "transverse_mercator", "units": "meter",
"southlimit": 10,
"eastlimit": 23, "westlimit": 2}
logical_file.metadata.create_element('originalcoverage', value=value)
self.assertEquals(logical_file.metadata.originalCoverage.value, value)
# multiple original coverage elements are not allowed - should raise exception
with self.assertRaises(IntegrityError):
logical_file.metadata.create_element('originalcoverage', value=value)
# delete default cell information element
self.assertNotEquals(logical_file.metadata.cellInformation, None)
logical_file.metadata.cellInformation.delete()
# create new cell information metadata with meaningful value
logical_file.metadata.create_element('cellinformation', name='cellinfo',
cellDataType='Float32',
rows=1660, columns=985, cellSizeXValue=30.0,
cellSizeYValue=30.0,
)
cell_info = logical_file.metadata.cellInformation
self.assertEquals(cell_info.rows, 1660)
self.assertEquals(cell_info.columns, 985)
self.assertEquals(cell_info.cellSizeXValue, 30.0)
self.assertEquals(cell_info.cellSizeYValue, 30.0)
self.assertEquals(cell_info.cellDataType, 'Float32')
# multiple cell Information elements are not allowed - should raise exception
with self.assertRaises(IntegrityError):
logical_file.metadata.create_element('cellinformation', name='cellinfo',
cellDataType='Float32',
rows=1660, columns=985,
cellSizeXValue=30.0, cellSizeYValue=30.0,
)
# delete default band information element
self.assertNotEquals(logical_file.metadata.bandInformations, None)
logical_file.metadata.bandInformations.first().delete()
# create band information element with meaningful value
logical_file.metadata.create_element('bandinformation', name='bandinfo',
variableName='diginal elevation',
variableUnit='meter',
method='this is method',
comment='this is comment',
maximumValue=1000, minimumValue=0,
noDataValue=-9999)
band_info = logical_file.metadata.bandInformations.first()
self.assertEquals(band_info.name, 'bandinfo')
self.assertEquals(band_info.variableName, 'diginal elevation')
self.assertEquals(band_info.variableUnit, 'meter')
self.assertEquals(band_info.method, 'this is method')
self.assertEquals(band_info.comment, 'this is comment')
self.assertEquals(band_info.maximumValue, '1000')
self.assertEquals(band_info.minimumValue, '0')
self.assertEquals(band_info.noDataValue, '-9999')
# multiple band information elements are allowed
logical_file.metadata.create_element('bandinformation', name='bandinfo',
variableName='diginal elevation2',
variableUnit='meter',
method='this is method',
comment='this is comment',
maximumValue=1000, minimumValue=0,
noDataValue=-9999)
self.assertEquals(logical_file.metadata.bandInformations.all().count(), 2)
# test metadata delete
# original coverage deletion is not allowed
with self.assertRaises(ValidationError):
logical_file.metadata.delete_element('originalcoverage',
logical_file.metadata.originalCoverage.id)
# cell information deletion is not allowed
with self.assertRaises(ValidationError):
logical_file.metadata.delete_element('cellinformation',
logical_file.metadata.cellInformation.id)
# band information deletion is not allowed
with self.assertRaises(ValidationError):
logical_file.metadata.delete_element('bandinformation',
logical_file.metadata.bandInformations.first().id)
# test metadata update
# update original coverage element
value_2 = {"northlimit": 12.5, "projection": "transverse_mercator", "units": "meter",
"southlimit": 10.5,
"eastlimit": 23.5, "westlimit": 2.5}
logical_file.metadata.update_element('originalcoverage',
logical_file.metadata.originalCoverage.id,
value=value_2)
self.assertEquals(logical_file.metadata.originalCoverage.value, value_2)
# update cell info element
logical_file.metadata.update_element('cellinformation',
logical_file.metadata.cellInformation.id,
name='cellinfo', cellDataType='Double',
rows=166, columns=98,
cellSizeXValue=3.0, cellSizeYValue=3.0,
)
cell_info = logical_file.metadata.cellInformation
self.assertEquals(cell_info.rows, 166)
self.assertEquals(cell_info.columns, 98)
self.assertEquals(cell_info.cellSizeXValue, 3.0)
self.assertEquals(cell_info.cellSizeYValue, 3.0)
self.assertEquals(cell_info.cellDataType, 'Double')
# update band info element
logical_file.metadata.update_element('bandinformation',
logical_file.metadata.bandInformations.first().id,
name='bandinfo',
variableName='precipitation',
variableUnit='mm/h',
method='this is method2',
comment='this is comment2',
maximumValue=1001, minimumValue=1,
noDataValue=-9998
)
band_info = logical_file.metadata.bandInformations.first()
self.assertEquals(band_info.name, 'bandinfo')
self.assertEquals(band_info.variableName, 'precipitation')
self.assertEquals(band_info.variableUnit, 'mm/h')
self.assertEquals(band_info.method, 'this is method2')
self.assertEquals(band_info.comment, 'this is comment2')
self.assertEquals(band_info.maximumValue, '1001')
self.assertEquals(band_info.minimumValue, '1')
self.assertEquals(band_info.noDataValue, '-9998')
# test extra_metadata for the logical file
# there should be no key/value metadata at this point
self.assertEqual(logical_file.metadata.extra_metadata, {})
# create key/vale metadata
logical_file.metadata.extra_metadata = {'key1': 'value 1', 'key2': 'value 2'}
logical_file.metadata.save()
self.assertEqual(logical_file.metadata.extra_metadata,
{'key1': 'value 1', 'key2': 'value 2'})
# update key/value metadata
logical_file.metadata.extra_metadata = {'key1': 'value 1', 'key2': 'value 2',
'key 3': 'value3'}
logical_file.metadata.save()
self.assertEqual(logical_file.metadata.extra_metadata,
{'key1': 'value 1', 'key2': 'value 2', 'key 3': 'value3'})
# delete key/value metadata
logical_file.metadata.extra_metadata = {}
logical_file.metadata.save()
self.assertEqual(logical_file.metadata.extra_metadata, {})
self.composite_resource.delete()
def test_file_metadata_on_file_delete(self):
# test that when any file in GeoRasterFileType is deleted
# all metadata associated with GeoRasterFileType is deleted
# test for both .tif and .vrt delete
# test with deleting of 'tif' file
self._test_file_metadata_on_file_delete(ext='.tif')
# test with deleting of 'vrt' file
self._test_file_metadata_on_file_delete(ext='.vrt')
self.composite_resource.delete()
def test_file_metadata_on_logical_file_delete(self):
# test that when the GeoRasterFileType is deleted
# all metadata associated with GeoRasterFileType is deleted
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that we have one logical file of type GeoRasterFileType as a result
# of metadata extraction
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
self.assertEqual(GeoRasterFileMetaData.objects.count(), 1)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# test that we have the metadata elements
# there should be 2 Coverage objects - one at the resource level and
# the other one at the file type level
self.assertEqual(Coverage.objects.count(), 2)
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 1)
self.assertEqual(logical_file.metadata.coverages.all().count(), 1)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertEqual(CellInformation.objects.count(), 1)
self.assertEqual(BandInformation.objects.count(), 1)
# delete the logical file
logical_file.logical_delete(self.user)
# test that we have no logical file of type GeoRasterFileType
self.assertEqual(GeoRasterLogicalFile.objects.count(), 0)
self.assertEqual(GeoRasterFileMetaData.objects.count(), 0)
# test that all metadata deleted
self.assertEqual(Coverage.objects.count(), 0)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(CellInformation.objects.count(), 0)
self.assertEqual(BandInformation.objects.count(), 0)
self.composite_resource.delete()
def test_file_metadata_on_resource_delete(self):
# test that when the composite resource is deleted
# all metadata associated with GeoRasterFileType is deleted
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that we have one logical file of type GeoRasterFileType as a result
# of metadata extraction
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
self.assertEqual(GeoRasterFileMetaData.objects.count(), 1)
# test that we have the metadata elements
# there should be 2 Coverage objects - one at the resource level and
# the other one at the file type level
self.assertEqual(Coverage.objects.count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertEqual(CellInformation.objects.count(), 1)
self.assertEqual(BandInformation.objects.count(), 1)
# delete resource
hydroshare.delete_resource(self.composite_resource.short_id)
# test that we have no logical file of type GeoRasterFileType
self.assertEqual(GeoRasterLogicalFile.objects.count(), 0)
self.assertEqual(GeoRasterFileMetaData.objects.count(), 0)
# test that all metadata deleted
self.assertEqual(Coverage.objects.count(), 0)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(CellInformation.objects.count(), 0)
self.assertEqual(BandInformation.objects.count(), 0)
def test_logical_file_delete(self):
# test that when an instance GeoRasterFileType is deleted
# all files associated with GeoRasterFileType is deleted
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that we have one logical file of type GeoRasterFileType as a result
# of metadata extraction
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
logical_file = GeoRasterLogicalFile.objects.first()
self.assertEqual(logical_file.files.all().count(), 2)
self.assertEqual(self.composite_resource.files.all().count(), 2)
self.assertEqual(set(self.composite_resource.files.all()),
set(logical_file.files.all()))
# delete the logical file using the custom delete function - logical_delete()
logical_file.logical_delete(self.user)
self.assertEqual(self.composite_resource.files.all().count(), 0)
self.composite_resource.delete()
def test_content_file_delete(self):
# test that when any file that is part of an instance GeoRasterFileType is deleted
# all files associated with GeoRasterFileType is deleted
# test deleting of tif file
self._content_file_delete('.tif')
# test deleting of vrt file
self._content_file_delete('.vrt')
self.composite_resource.delete()
def test_raster_file_type_folder_delete(self):
# when a file is set to georasterlogical file type
# system automatically creates folder using the name of the file
# that was used to set the file type
# Here we need to test that when that folder gets deleted, all files
# in that folder gets deleted, the logicalfile object gets deleted and
# the associated metadata objects get deleted
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that we have one logical file of type GeoRasterFileType as a result
# of metadata extraction
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
# should have one GeoRasterFileMetadata object
self.assertEqual(GeoRasterFileMetaData.objects.count(), 1)
# there should be 2 content files
self.assertEqual(self.composite_resource.files.count(), 2)
# test that there are metadata associated with the logical file
self.assertNotEqual(Coverage.objects.count(), 0)
self.assertNotEqual(OriginalCoverage.objects.count(), 0)
self.assertNotEqual(CellInformation.objects.count(), 0)
self.assertNotEqual(BandInformation.objects.count(), 0)
# delete the folder for the logical file
folder_path = "data/contents/small_logan"
remove_folder(self.user, self.composite_resource.short_id, folder_path)
# there should no content files
self.assertEqual(self.composite_resource.files.count(), 0)
# there should not be any GeoRaster logical file or metadata file
self.assertEqual(GeoRasterLogicalFile.objects.count(), 0)
self.assertEqual(GeoRasterFileMetaData.objects.count(), 0)
# test that all metadata associated with the logical file got deleted
self.assertEqual(Coverage.objects.count(), 0)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(CellInformation.objects.count(), 0)
self.assertEqual(BandInformation.objects.count(), 0)
self.composite_resource.delete()
def test_file_rename_or_move(self):
# test that file can't be moved or renamed for any resource file
# that's part of the GeoRaster logical file object (LFO)
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test renaming of files that are associated with raster LFO - which should raise exception
self.assertEqual(self.composite_resource.files.count(), 2)
src_path = 'data/contents/small_logan/small_logan.tif'
tgt_path = "data/contents/small_logan/small_logan_1.tif"
with self.assertRaises(DRF_ValidationError):
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
src_path = 'data/contents/small_logan/small_logan.vrt'
tgt_path = "data/contents/small_logan/small_logan_1.vrt"
with self.assertRaises(DRF_ValidationError):
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
# test moving the files associated with geo raster LFO
src_path = 'data/contents/small_logan/small_logan.tif'
tgt_path = "data/contents/big_logan/small_logan.tif"
with self.assertRaises(DRF_ValidationError):
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
src_path = 'data/contents/small_logan/small_logan.vrt'
tgt_path = "data/contents/big_logan/small_logan.vrt"
with self.assertRaises(DRF_ValidationError):
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
self.composite_resource.delete()
def _create_composite_resource(self):
uploaded_file = UploadedFile(file=self.raster_file_obj,
name=os.path.basename(self.raster_file_obj.name))
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title='Test Raster File Type Metadata',
files=(uploaded_file,)
)
# set the logical file
resource_post_create_actions(resource=self.composite_resource, user=self.user,
metadata=self.composite_resource.metadata)
def _test_file_metadata_on_file_delete(self, ext):
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that we have one logical file of type GeoRasterFileType
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
self.assertEqual(GeoRasterFileMetaData.objects.count(), 1)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# there should be 1 coverage element of type spatial
self.assertEqual(logical_file.metadata.coverages.all().count(), 1)
self.assertNotEqual(logical_file.metadata.spatial_coverage, None)
self.assertNotEqual(logical_file.metadata.originalCoverage, None)
self.assertNotEqual(logical_file.metadata.cellInformation, None)
self.assertNotEqual(logical_file.metadata.bandInformations, None)
# there should be 2 coverage objects - one at the resource level
# and the other one at the file type level
self.assertEqual(Coverage.objects.count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertEqual(CellInformation.objects.count(), 1)
self.assertEqual(BandInformation.objects.count(), 1)
# delete content file specified by extension (ext parameter)
res_file_tif = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, ext)[0]
hydroshare.delete_resource_file(self.composite_resource.short_id,
res_file_tif.id,
self.user)
# test that we don't have logical file of type GeoRasterFileType
self.assertEqual(GeoRasterLogicalFile.objects.count(), 0)
self.assertEqual(GeoRasterFileMetaData.objects.count(), 0)
# test that all metadata deleted
self.assertEqual(Coverage.objects.count(), 0)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(CellInformation.objects.count(), 0)
self.assertEqual(BandInformation.objects.count(), 0)
def _content_file_delete(self, ext):
# test that when any file that is part of an instance GeoRasterFileType is deleted
# all files associated with GeoRasterFileType is deleted
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource()
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# extract metadata from the tif file
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
self.assertEqual(self.composite_resource.files.all().count(), 2)
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
# delete the content file specified by the ext (file extension param)
res_file_tif = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, ext)[0]
hydroshare.delete_resource_file(self.composite_resource.short_id,
res_file_tif.id,
self.user)
self.assertEqual(self.composite_resource.files.all().count(), 0)
self.assertEqual(GeoRasterLogicalFile.objects.count(), 0)
def _test_invalid_file(self):
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with the generic logical file
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# trying to set this invalid tif file to geo raster file type should raise
# ValidationError
with self.assertRaises(ValidationError):
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that the invalid file did not get deleted
self.assertEqual(self.composite_resource.files.all().count(), 1)
# check that the resource file is not associated with generic logical file
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
| |
"""
Copyright (C) 2018 SunSpec Alliance
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import contextlib
import os
import math
try:
import xml.etree.ElementTree as ET
except:
import elementtree.ElementTree as ET
import sunspec.core.pics as pics
import sunspec.core.smdx as smdx
import sunspec.core.suns as suns
import sunspec.core.util as util
from sunspec.core.util import SunSpecError
fspath = getattr(os, 'fspath', str)
# file path list
file_pathlist = None
@contextlib.contextmanager
def fresh_file_pathlist(*paths):
global file_pathlist
original_pathlist = file_pathlist
file_pathlist = util.PathList()
for path in paths:
file_pathlist.add(fspath(path))
try:
yield file_pathlist
finally:
file_pathlist = original_pathlist
MAX_READ_COUNT = 125
class Device(object):
"""
Parameters:
addr :
Modbus base address of device.
Raises:
SunSpecError: Any error encountered in device processing.
Attributes:
base_addr
Modbus base address of the device.
models_list
List of model objects present in the device in the order in which
they appear in the device.
models
Dictionary of model object lists reperesenting model types present
in the device indexed by model id. The elements are model lists to
allow more than one model of the same model type to be present in
the device.
"""
def __init__(self, addr=suns.SUNS_BASE_ADDR_DEFAULT):
self.base_addr = addr
self.models_list = []
self.models = {} # dict of model arrays to support more than one instance of a model
def add_model(self, model):
"""Add a model object to the device.
Parameters:
model :
Model object to add to the device.
"""
models = self.models.get(model.id)
if models is None:
self.models[model.id] = []
models = self.models.get(model.id)
models.append(model)
self.models_list.append(model)
def from_pics(self, element=None, filename=None, pathlist=None):
"""The PICS information for the device can be either an Element Tree
element for a device from a document already being processed or the file
name of document in the file system. Populates the device based on the
elements within the device element.
Parameters:
element :
Element Tree device element.
filename :
File name of the PICS document.
pathlist :
Pathlist object containing alternate paths to the PICS document.
"""
global file_pathlist
pics_data = ''
try:
if element is None:
# try supplied path list
if pathlist is not None:
try:
pics_data = pathlist.read(filename)
except NameError:
pass
# try device file path list
if not pics_data and file_pathlist is not None:
try:
pics_data = file_pathlist.read(filename)
except NameError:
pass
# try local directory
if not pics_data:
f = open(filename, 'r')
pics_data = f.read()
f.close()
root = ET.fromstring(pics_data)
if root.tag != pics.PICS_ROOT:
raise SunSpecError("Unexpected root element: %s" % (root.tag))
d = root.find(pics.PICS_DEVICE)
if d is None:
raise SunSpecError("No '{}' elements found in '{}' element".format(pics.PICS_DEVICE, root.tag))
else:
d = element
if d.tag != pics.PICS_DEVICE:
raise SunSpecError("Unexpected device tag: '%s'" % (d.tag))
self.base_addr = d.attrib.get(pics.PICS_ATTR_BASE_ADDR, pics.PICS_BASE_ADDR_DEFAULT)
addr = self.base_addr + 2
for m in d.findall('*'):
if m is None:
raise SunSpecError("No '{}' elements found in '{}' element".format(pics.PICS_MODEL, d.tag))
if m.tag != pics.PICS_MODEL:
raise SunSpecError("Unexpected '{}' element in '{}' element".format(m.tag, d.tag))
model_id = m.attrib.get(pics.PICS_ATTR_ID)
if model_id is None:
raise SunSpecError('Module id error')
model_len = m.attrib.get(pics.PICS_ATTR_LEN)
if model_len is not None:
# raise SunSpecError('Module len error in model %d' % (model_id))
model_len = int(model_len)
# move address past model id and length (even though address is not real in the case)
model = Model(self, model_id, addr + 2, model_len)
try:
model.load()
except Exception as e:
model.load_error = str(e)
model.from_pics(m)
self.add_model(model)
addr += model.len + 2
except Exception as e:
raise SunSpecError('Error loading PICS: %s' % str(e))
"""
def to_pics(self, pretty_print=False, single_repeating=True):
attr = {pics.PICS_ATTR_VERSION: str(pics.PICS_VERSION)}
root = ET.Element(pics.PICS_ROOT)
e = ET.SubElement(root, pics.PICS_DEVICE, attrib=attr)
for model in self.models_list:
model.to_pics(e, single_repeating=single_repeating)
if pretty_print:
util.indent(root)
return ET.tostring(root)
"""
def to_pics(self, parent, single_repeating=True):
"""Adds the device and all elements within the device to the parent
element. If *single_repeating* is True, only the first repeating block
for each model is added to the document.
Parameters:
parent :
Element Tree element on which to place the device element.
single_repeating :
Flag to indicate whether to include a single or all repeating
blocks within each model in the PICS document.
"""
attr = {pics.PICS_ATTR_VERSION: str(pics.PICS_VERSION)}
e = ET.SubElement(parent, pics.PICS_DEVICE, attrib=attr)
for model in self.models_list:
model.to_pics(e, single_repeating=single_repeating)
def not_equal(self, device):
"""Determines if the specified device instance is not equal based on all
the device attribute values including models, blocks and points. If not
equal, returns a string indicating why the device is not equal. Returns
False if the device is equal.
Parameters:
device :
Device to compare.
"""
if len(self.models_list) != len(device.models_list):
return 'Devices not equal - model counts: %d %d' % (len(self.models_list), len(device.models_list))
for i in range(len(self.models_list)):
s = self.models_list[i].not_equal(device.models_list[i])
if s:
return 'Devices not equal - %s' % (s)
return False
def __str__(self):
device_str = ''
for model in self.models_list:
device_str += str(model)
return device_str
class Block(object):
"""
Parameters:
model :
Model associated with the block.
addr :
Modbus address of the first point in the block.
blen :
Length of the block in Modbus registers.
block_type :
The :const:`sunspec.core.device.BlockType` instance associated with
the block.
index :
Block instance index for the block type within the model.
Attributes:
model
Model associated with the block.
block_type
The :const:`sunspec.core.device.BlockType` instance associated with
the block.
addr
Modbus address of the first point in the block.
len
Length of the block in Modbus registers.
type
Block type, either :const:`sunspec.core.suns.SUNS_BLOCK_FIXED` or
:const:`sunspec.core.suns.SUNS_BLOCK_REPEATING`.
index
Block instance index for the block type within the model.
points_list
List of non-scale factor points in the block ordered by offset.
points
Dictionary of non-scale factor points in the block indexed by point
id.
points_sf
Dictionary of scale factor points int the block indexed by point id.
"""
def __init__(self, model, addr, blen, block_type, index=1):
self.model = model
self.block_type = block_type
self.addr = addr
self.len = blen
self.type = block_type.type
self.index = index
self.points_list = []
self.points = {}
self.points_sf = {}
def from_pics(self, element):
"""Sets the block contents based on an element tree model type element
contained in a SunSpec PICS document.
Parameters:
element :
Element Tree model element.
"""
for p in element.findall('*'):
if p.tag != pics.PICS_POINT:
raise SunSpecError("Unexpected '{}' element in '{}' element".format(p.tag, element.tag))
pid = p.attrib.get(pics.PICS_ATTR_ID)
point = self.points.get(pid)
if point is None:
point = self.points_sf.get(pid)
if point is not None:
point.from_pics(p)
# resolve scale factor values in points, must be done after all points in block are read
for point in self.points_list:
if point.sf_point is not None:
point.value_sf = point.sf_point.value_base
def to_pics(self, parent):
"""Adds the block and all elements within the block to the parent
element.
Parameters:
parent :
Element Tree element on which to place the block element.
"""
attr = {}
if self.index > 1:
attr[pics.PICS_ATTR_INDEX] = str(self.index)
if self.block_type.type == suns.SUNS_BLOCK_REPEATING:
attr[pics.PICS_ATTR_TYPE] = pics.PICS_TYPE_REPEATING
e = ET.SubElement(parent, pics.PICS_BLOCK, attrib=attr)
# use block type points list to preserve original order of points in the block
for pt in self.block_type.points_list:
point = self.points.get(pt.id)
if point is None:
point = self.points_sf.get(pt.id)
if point is not None:
point.to_pics(e)
def not_equal(self, block):
"""Determines if the specified block instance is not equal based on all
the block attribute values including points. If not equal, returns a
string indicating why the block is not equal. Returns False if the block
is equal.
Parameters:
device :
Block to compare.
"""
s = self.block_type.not_equal(block.block_type)
if s:
return 'block {} not equal - block type not equal: {}'.format(self.block_type.type, s)
for point in self.points_list:
s = point.not_equal(block.points.get(point.point_type.id))
if s:
return 'block %d not equal - %s' % (self.index, s)
return False
def __str__(self):
block_str = 'Block: type: %s index: %d:\n' % (self.type, self.index)
for point in self.points_list:
block_str += ' ' + str(point) + '\n'
return block_str
class Point(object):
"""
Parameters:
block :
Block associated with the point.
point_type :
The :const:`sunspec.core.device.PointType` instance associated with
the point.
addr :
The Modbus address of the point.
sf_point :
Scale factor point associated with the point if present.
value :
Initial value for the *value_base* attribute of the point.
Attributes:
block
Block associated with the point.
point_type
The :const:`sunspec.core.device.PointType` instance associated with
the point.
addr
Modbus address of the point.
sf_point
Scale factor point associated with the point if present.
impl
Indication if the point is implemented. A value of True indicates
the point is implmented. Intended to be used for cases when no
initial value is given for the point but the implementation status
is known as in SunSpec PICS documents.
value_base
Value of the point without the point scale factor applied.
value_sf
Scale factor point value.
dirty
Indication if the point has been written to the physical device
since the last update of the point. A value of True indicates that
the point has not been written since the last update.
value
Value of the point with the scale factor applied.
"""
def __init__(self, block=None, point_type=None, addr=None, sf_point=None, value=None):
self.block = block
self.point_type = point_type
self.addr = addr
self.sf_point = sf_point
self.impl = True
self.value_base = value
self.value_sf = None
self.dirty = False
"""
@property
def value(self):
if self.value_sf:
return self.value_base * math.pow(10, self.value_sf)
else:
return self.value_base
@value.setter
def value(self, v):
if self.value_sf:
self.value_base = int(round(float(v), abs(self.value_sf)) / math.pow(10, self.value_sf))
else:
self.value_base = v
self.dirty = True
@property
def value_str(self):
if self.value_sf:
pass
else:
return str(self.value_base)
@value.setter
def value_str(self, v):
if self.value_sf:
pass
else:
self.value_base = int(v)
"""
# use older property format to support earlier python 2.x versions
def value_getter(self):
if self.value_sf:
return self.value_base * math.pow(10, self.value_sf)
else:
return self.value_base
def value_setter(self, v):
if self.value_sf:
self.value_base = int(round(float(v) / math.pow(10, self.value_sf)))
else:
self.value_base = self.point_type.to_value(v)
self.dirty = True
value = property(value_getter, value_setter, None)
def from_pics(self, element):
"""Sets the block contents based on an element tree model type element
contained in a SunSpec PICS document.
Parameters:
element :
Element Tree model element.
"""
impl = True
impl_attr = element.attrib.get(pics.PICS_ATTR_IMPLEMENTED)
if impl_attr:
if impl_attr == pics.PICS_IMPLEMENTED_FALSE:
impl = False
value = None
if impl:
if element.text:
value = self.point_type.to_value(element.text)
self.impl = self.point_type.is_impl(value)
else:
self.impl = False
if self.impl and value is not None:
self.value_base = value
def to_pics(self, parent):
"""Adds the point to the parent element.
Parameters:
parent :
Element Tree element on which to place the point element.
"""
attr = {pics.PICS_ATTR_ID: str(self.point_type.id)}
if self.value is None:
attr[pics.PICS_ATTR_IMPLEMENTED] = str(pics.PICS_IMPLEMENTED_FALSE)
else:
if self.point_type.access != suns.SUNS_ACCESS_R:
access = [key for key, value in pics.pics_access_types.items() if value == suns.SUNS_ACCESS_RW][0]
attr[pics.PICS_ATTR_ACCESS] = str(access)
e = ET.SubElement(parent, pics.PICS_POINT, attrib=attr)
if self.value_base is not None:
e.text = str(self.value_base).rstrip('\0')
def not_equal(self, point):
""" Determines if the specified point instance is not equal based on all
the point attribute values. If not equal, returns a string indicating
why the point is not equal. Returns False if the point is equal.
Parameters:
device :
Point to compare.
"""
s = self.point_type.not_equal(point.point_type)
if s:
return 'point {} not equal - point type not equal: {}'.format(self.point_type.id, s)
if (((self.value_base is not None or point.value_base is not None) and (self.value_base != point.value_base)) or
((self.value_sf is not None or point.value_sf is not None) and (self.value_sf != point.value_sf))):
if self.value_base is not None:
print('self.value_base')
if point.value_base is not None:
print('point.value_base', type(point.value_base), point.value_base)
return 'point {} not equal: {} {} - {} {}'.format(self.point_type.id, self.value_base, self.value_sf, point.value_base, point.value_sf)
return False
def __str__(self):
point_str = 'Point: id = %s impl = %s addr = %s value_base = %s' % (self.point_type.id, str(self.impl), self.addr, str(self.value_base))
if self.sf_point is not None:
point_str += ' sf_value = %s' % (str(self.sf_point.value_base))
return point_str
class ScaleFactor(object):
def __init__(self, value=None):
self.value_base = value
class Model(object):
"""
Parameters:
device :
Device associated with the model.
mid :
Model id.
addr :
Modbus address of the first point in the model.
mlen :
Length of the model in Modbus registers.
index :
Model instance index for the model type within the device.
Raises:
SunSpecError: Any error encountered in device processing.
Attributes:
device
Device instance that contains the model instance.
id
Model id. The model id maps to a SunSpec model type definition.
index
Model instance index for the model type within the device. Model
instance indexes start at 1 for the first model type instance.
model_type
The :const:`sunspec.core.device.ModelType` instance associated with
the model.
addr
Modbus address of the first point in the model.
len
Length of the model in Modbus registers.
points_list
List of fixed block non-scale factor points ordered by offset.
points
Dictionary of fixed block non-scale factor points indexed by point
id.
points_sf
Dictionary of fixed block scale factor points indexed by point id.
blocks
List of blocks contained in the model instance. Block 0 is the fixed
block if present and blocks 1 to n are the repeating block
instances.
"""
def __init__(self, device=None, mid=None, addr=0, mlen=0, index=1):
self.device = device
self.id = int(mid)
self.index = index
self.model_type = None
self.addr = addr # modbus address of first point in the model
self.len = int(mlen) # register count of the point elements in the model
self.points_list = [] # fixed block non-scale factor points list ordered by offset
self.points = {} # fixed block non-scale factor points
self.points_sf = {} # fixed block scale factor points
self.blocks = []
self.load_error = None
self.read_blocks = []
def load(self, block_class=Block, point_class=Point):
"""Loads the model instance with blocks and points based on the SunSpec
model type definition.
Parameters:
block_class :
Block class to use to create block instances.
point_class :
Point class to use to create point instances.
"""
last_read_addr = self.addr
self.read_blocks.append(last_read_addr)
self.model_type = model_type_get(self.id)
if self.model_type is not None:
if self.len == 0:
self.len = self.model_type.len
end_addr = self.addr + self.len
index = 0
# model type always has a fixed block defined
block_type = self.model_type.fixed_block
block_addr = self.addr
block_len = int(block_type.len)
# adjustment for legacy common model len = 65
if self.id == 1 and self.len == 65:
block_len = self.len
while True:
block_end = block_addr + block_len
if end_addr < block_end:
raise SunSpecError(
'Block (length {}, ending at {}) would exceed end of '
'model reported by device ({}).'
.format(block_len, block_end, end_addr)
)
block = block_class(self, block_addr, block_len, block_type, index)
self.blocks.append(block)
for point_type in block_type.points_list:
if point_type.type != suns.SUNS_TYPE_PAD:
point_addr = int(block_addr) + int(point_type.offset)
point = point_class(block=block, point_type=point_type, addr=str(point_addr))
if point_addr + point.point_type.len - last_read_addr > MAX_READ_COUNT:
last_read_addr = point_addr
self.read_blocks.append(last_read_addr)
if point_type.type == suns.SUNS_TYPE_SUNSSF:
block.points_sf[point_type.id] = point
else:
block.points_list.append(point)
block.points[point_type.id] = point
# resolve scale factor addresses for repeating block
for point in block.points_list:
if point.point_type.sf is not None and point.sf_point is None:
# check for constant scale factor
try:
sf_value = int(point.point_type.sf)
point.sf_point = ScaleFactor(sf_value)
except Exception:
pass
# try local repeating block first
if point.sf_point is None:
point.sf_point = block.points_sf.get(point.point_type.sf)
if point.sf_point is None:
# if repeating block, try fixed block
if index > 0:
point.sf_point = self.blocks[0].points_sf.get(point.point_type.sf)
if point.sf_point is None:
# ### what state should model be left in on exception
raise SunSpecError('Unable to resolve scale factor point %s for point %s in model %s' %
point.point_type.sf, point.point_type.pid, self.id)
block_addr += block_len
block_type = self.model_type.repeating_block
if block_type is None:
### check for extra registers?
break
index += 1
block_len = int(block_type.len)
if end_addr == block_end:
break
else:
raise SunSpecError('Unknown model type - id: %s' % str(self.id))
# expose fixed block points at model level if present
try:
self.points_list = self.blocks[0].points_list
self.points = self.blocks[0].points
self.points_sf = self.blocks[0].points_sf
except IndexError:
pass
def from_pics(self, element):
""" Sets the model contents based on an element tree model type element
contained in a SunSpec PICS document.
Parameters:
element :
Element Tree model element.
"""
# update index if present
self.index = element.attrib.get(pics.PICS_ATTR_INDEX, self.index)
for b in element.findall('*'):
if b.tag != pics.PICS_BLOCK:
raise SunSpecError("Unexpected '{}' element in '{}' element".format(b.tag, element.tag))
block_type = pics.pics_block_types.get(b.attrib.get(pics.PICS_ATTR_TYPE, pics.PICS_TYPE_FIXED))
if block_type is None:
raise SunSpecError('Unknown block type')
if block_type == suns.SUNS_BLOCK_FIXED:
if len(self.blocks) > 0:
self.blocks[0].from_pics(b)
elif block_type == suns.SUNS_BLOCK_REPEATING:
block_index = b.attrib.get(pics.PICS_ATTR_INDEX)
# if no index specified, apply to all repeating blocks
if block_index is None:
if len(self.blocks) > 1:
for block in self.blocks[1:]:
block.from_pics(b)
else:
block_index = int(block_index)
if len(self.blocks) < block_index:
raise SunSpecError('Block index out of range: %s' % (str(block_index)))
self.blocks[block_index].from_pics(b)
else:
raise SunSpecError('Internal block type error')
def to_pics(self, parent, single_repeating=True):
""" Adds the model and all elements within the model to the parent
element. If *single_repeating* is True, only the first repeating block
is added to the document.
Parameters:
parent :
Element Tree element on which to place the model element.
single_repeating :
Flag to indicate whether to include a single or all repeating
blocks within the model in the PICS document.
"""
attr = {pics.PICS_ATTR_ID: str(self.id), pics.PICS_ATTR_LEN: str(self.len)}
if self.index != 1:
attr[pics.PICS_ATTR_INDEX] = str(self.index)
e = ET.SubElement(parent, pics.PICS_MODEL, attrib=attr)
for block in self.blocks:
if single_repeating == False or block.index <= 1:
block.to_pics(e)
def not_equal(self, model):
""" Determines if the specified model instance is not equal based on all
the model attribute values including blocks and points. If not equal,
returns a string indicating why the model is not equal. Returns False if
the model is equal.
Parameters:
device :
Model to compare.
"""
if len(self.blocks) != len(model.blocks):
return 'model %s not equal - block counts: %d %d' % (self.model_type.id, len(self.blocks), len(model.blocks))
s = self.model_type.not_equal(model.model_type)
if s:
return 'model {} not equal - model id not equal: {}'.format(self.model_type.id, s)
for i in range(len(self.blocks)):
s = self.blocks[i].not_equal(model.blocks[i])
if s:
return 'model {} not equal - {}'.format(self.model_type.id, s)
return False
def __str__(self):
model_str = 'Model %s:\n' % self.id
for point in self.points_list:
model_str += ' ' + str(point) + '\n'
for block in self.blocks[1:]:
model_str += str(block)
return model_str
model_type_path_default = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'models', 'smdx')
model_types = {}
def model_type_get(model_id):
global file_pathlist
global model_types
model_type = model_types.get(str(model_id))
if model_type is None:
smdx_data = ''
# create model file name
filename = smdx.model_id_to_filename(model_id)
# check in file path list if set
if file_pathlist is not None:
try:
smdx_data = file_pathlist.read(filename)
except NameError:
pass
if not smdx_data:
if not os.path.exists(filename):
filename = os.path.join(model_type_path_default, filename)
if os.path.exists(filename):
try:
f = open(filename, 'r')
smdx_data = f.read()
f.close()
except Exception as e:
raise SunSpecError('Error loading model {} at {}: {}'.format(model_id, filename, str(e)))
if smdx_data:
root = ET.fromstring(smdx_data)
# load model type
try:
model_type = ModelType()
model_type.from_smdx(root)
model_types[model_type.id] = model_type
except Exception as e:
raise SunSpecError('Error loading model {} at {}: {}'.format(model_id, filename, str(e)))
else:
raise SunSpecError('Model file for model %s not found' % (str(model_id)))
return model_type
def check_for_models(pathlist):
# The common model (1) should be accessible.
try:
model_type_get(1)
except:
raise Exception(
'Unable to open common model (1). '
'Make certain model repository is available. '
'Please reference the README.md.'
)
class ModelType(object):
"""
Parameters:
mid :
Model id that identifies a specific SunSpec model type definition.
Attributes:
id
Model id that identifies a specific SunSpec model type definition.
len
Length in Modbus registers of the model type as specified in the
model definition.
label
Label string as specified in the model definition.
description
Description string as specified in the model definition.
notes
Notes string as specified in the model definition.
fixed_block
Fixed block type as specified in the model definition if present.
repeating_block
Repeating block type as specified in the model definition if
present.
"""
def __init__(self, mid=None):
self.id = mid
self.len = None
self.name = None
self.label = None
self.description = None
self.notes = None
self.fixed_block = None
self.repeating_block = None
self.symbols = {}
def from_smdx(self, element):
""" Sets the model type attributes based on an element tree model type
element contained in an SMDX model definition.
Parameters:
element :
Element Tree model type element.
"""
smdx_data = ''
for m in element.findall(smdx.SMDX_MODEL):
if True:
self.id = int(m.attrib.get(smdx.SMDX_ATTR_ID))
self.len = m.attrib.get(smdx.SMDX_ATTR_LEN)
self.name = m.attrib.get(smdx.SMDX_ATTR_NAME)
if self.name is None:
self.name = 'model_' + str(self.id)
if self.len is None:
raise SunSpecError('Module len error')
self.len = int(self.len)
for b in m.findall(smdx.SMDX_BLOCK):
block = BlockType(model_type=self)
block.from_smdx(b)
if block.type == suns.SUNS_BLOCK_FIXED:
if self.fixed_block is None:
self.fixed_block = block
else:
raise SunSpecError('Duplicate fixed block type definition')
elif block.type == suns.SUNS_BLOCK_REPEATING:
if self.repeating_block is None:
self.repeating_block = block
else:
raise SunSpecError('Duplicate repeating block type definition')
break
else:
raise SunSpecError('Unexpected smdx element: %s' % m.tag)
for s in element.findall(smdx.SMDX_STRINGS):
if s.attrib.get(smdx.SMDX_ATTR_ID) == str(self.id):
m = s.find(smdx.SMDX_MODEL)
if m is not None:
for e in m.findall('*'):
if e.tag == smdx.SMDX_LABEL:
self.label = e.text
elif e.tag == smdx.SMDX_DESCRIPTION:
self.description = e.text
elif e.tag == smdx.SMDX_NOTES:
self.notes = e.text
for e in s.findall(smdx.SMDX_POINT):
pid = e.attrib.get(smdx.SMDX_ATTR_ID)
point_type = None
if self.fixed_block is not None:
point_type = self.fixed_block.points.get(pid)
if point_type is None and self.repeating_block is not None:
point_type = self.repeating_block.points.get(pid)
if point_type:
point_type.from_smdx(e, strings=True)
if self.fixed_block is None:
self.fixed_block = BlockType(suns.SUNS_BLOCK_FIXED, model_type=self)
def symbol_add(self, symbol, point_id):
self.symbols[symbol.id, point_id] = symbol
def symbol_get(self, sid, point_id):
return self.symbols.get((sid, point_id))
def not_equal(self, model_type):
""" Determines if the specified model type instance is not equal based
on all the model type attribute values including blocks and points. If
not equal, returns a string indicating why the model type is not equal.
Returns False if the model type is equal.
Parameters:
model_type :
Model type to compare.
"""
if self == model_type:
return False
if model_type is None:
return "ModelType is None"
if self.id != model_type.id:
return "ModelType attribute 'id' not equal: {} {}".format(str(self.id), str(model_type.id))
if self.len != model_type.len:
return "ModelType attribute 'len' not equal: {} {}".format(str(self.len), str(model_type.len))
if self.label != model_type.label:
return "ModelType attribute 'label' not equal: {} {}".format(str(self.label), str(model_type.label))
if self.description != model_type.description:
return "ModelType attribute 'description' not equal: {} {}".format(str(self.description), str(model_type.description))
if self.notes != model_type.notes:
return "ModelType attribute 'notes' not equal: {} {}".format(str(self.notes), str(model_type.notes))
if self.fixed_block is not None:
not_equal = self.fixed_block.not_equal(model_type.fixed_block)
if not_equal:
return not_equal
elif model_type.fixed_block is not None:
return "ModelType fixed block is None"
if self.repeating_block is not None:
not_equal = self.repeating_block.not_equal(model_type.repeating_block)
if not_equal:
return not_equal
elif model_type.repeating_block is not None:
return "ModelType repeating block is None"
return False
def __str__(self):
s = 'ModelType: id = {} len = {}\n'.format(self.id, self.len)
if self.fixed_block:
s += str(self.fixed_block)
if self.repeating_block:
s += str(self.repeating_block)
return s
class BlockType(object):
"""
Parameters:
btype :
Block type as specified in the model definition. Valid values are
sunspec.core.suns.SUNS_BLOCK_FIXED or
sunspec.core.suns.SUNS_BLOCK_REPEATING.
blen : Block length in Modbus registers.
Attributes:
type
Block type as specified in the model definition. Valid values are
sunspec.core.suns.SUNS_BLOCK_FIXED or
sunspec.core.suns.SUNS_BLOCK_REPEATING.
len
Block length in Modbus registers.
points_list
List containing the points in the block in offset order.
points
Dictionary containg the points in the block indexed by the point id.
"""
def __init__(self, btype=None, blen=0, name=None, model_type=None):
self.model_type = model_type
self.type = btype
self.len = blen
self.name = name
self.points_list = []
self.points = {}
def from_smdx(self, element):
""" Sets the block type attributes based on an element tree block type
element contained in an SMDX model definition.
Parameters:
element :
Element Tree block type element.
"""
btype = element.attrib.get(smdx.SMDX_ATTR_TYPE, smdx.SMDX_ATTR_TYPE_FIXED)
if btype != smdx.SMDX_ATTR_TYPE_FIXED and btype != smdx.SMDX_ATTR_TYPE_REPEATING:
raise SunSpecError('Invalid block type')
self.type = smdx.smdx_block_types.get(btype)
self.len = element.attrib.get(smdx.SMDX_ATTR_LEN)
if self.len is None:
raise SunSpecError('Block len error')
self.name = element.attrib.get(smdx.SMDX_ATTR_NAME)
if self.name is None:
self.name = self.type
# process points
for e in element.findall(smdx.SMDX_POINT):
pt = PointType(block_type=self)
pt.from_smdx(e)
if self.points.get(pt.id) is not None:
ET.dump(e)
raise SunSpecError('Duplicate point definition: %s' % (pt.id))
self.points_list.append(pt)
self.points[pt.id] = pt
def not_equal(self, block_type):
""" Determines if the specified block type instance is not equal based
on all the block type attribute values including points. If not equal,
returns a string indicating why the block type is not equal. Returns
False if the block type is equal.
Parameters:
block_type :
Block type to compare.
"""
if self == block_type:
return False
if block_type is None:
return "BlockType '%s' is none" % (str(self.type))
if self.type != block_type.type:
return "BlockType attribute 'type' not equal: {} {}".format(str(self.type), str(block_type.type))
if self.len != block_type.len:
return "BlockType attribute 'len' not equal: {} {}".format(str(self.len), str(block_type.len))
if len(self.points) != len(block_type.points):
return "BlockType '%s' point count not equal" % (str(self.type))
for k, v in self.points.items():
value = block_type.points.get(k)
not_equal = v.not_equal(value)
if not_equal:
return not_equal
return False
def __str__(self):
s = 'BlockType: type = {} len = {}\n'.format(self.type, self.len)
for p in self.points_list:
s += ' %s\n' % (str(p))
return s
class PointType(object):
"""
Parameters:
pid :
Point id as specified in the model definition.
offset :
Point offset within the block as specified in the model definition.
ptype :
Point type as specified in the model definition. Valid values are
defined in sunspec.core.suns.SUNS_TYPE_*.
plen :
Point length in Modbus registers for points that have a type of
'string'.
mandatory :
Mandatory indication as specified in the model definition. Valid
values are sunspec.core.suns.SUNS_MANDATORY_TRUE or
sunspec.core.suns.SUNS_MANDATORY_FALSE.
access :
Point access setting as specfied in the model definition. Valid
values are sunspec.core.suns.SUNS_ACCESS_R or
sunspec.core.suns.SUNS_ACCESS_RW.
sf :
Id of the scale factor point associated with the point or None if
the point does not have a scale factor.
Attributes:
id
Point id as specified in the model definition.
offset
Point offset within the block as specified in the model definition.
type
Point type as specified in the model definition. Valid values are
defined in sunspec.core.suns.SUNS_TYPE_*.
len
Point length in Modbus registers for points that have a type of
'string'.
mandatory
Mandatory indication as specified in the model definition. Valid
values are sunspec.core.suns.SUNS_MANDATORY_TRUE or
sunspec.core.suns.SUNS_MANDATORY_FALSE.
access
Point access setting as specfied in the model definition. Valid
values are sunspec.core.suns.SUNS_ACCESS_R or
sunspec.core.suns.SUNS_ACCESS_RW.
sf
Id of the scale factor point associated with the point or None if
the point does not have a scale factor.
label
Label string as specified in the model definition.
description
Description string as specified in the model definition.
notes
Notes string as specified in the model definition.
value_default
Default value for a point instance if no value specified.
is_impl
Contains the function to call with the point value as an argument to
determine if the point is implemented.
data_to
Contains the function to call to transform a binary data string to
the point value.
to_data
Contains the function to call to transform the point value to a
binary data string.
to_value
Contains the function to call to transform a point value string into
a point value of the type associated with the point.
"""
def __init__(self, pid=None, offset=None, ptype=None, plen=None, mandatory=None, access=None, sf=None,
block_type=None):
self.block_type = block_type
self.id = pid
self.offset = offset
self.type = ptype
self.len = plen
self.mandatory = mandatory
self.access = access
self.units = None
self.sf = sf
self.label = None
self.description = None
self.notes = None
self.value_default = None
self.is_impl = None
self.data_to = None
self.to_data = None
self.to_value = None
self.symbols = []
def from_smdx(self, element, strings=False):
""" Sets the point attributes based on an element tree point element
contained in an SMDX model definition.
Parameters:
element :
Element Tree point type element.
strings :
Indicates if *element* is a subelement of the 'strings'
definintion within the model definition.
"""
if strings is False:
self.id = element.attrib.get(smdx.SMDX_ATTR_ID)
self.offset = int(element.attrib.get(smdx.SMDX_ATTR_OFFSET))
ptype = element.attrib.get(smdx.SMDX_ATTR_TYPE)
plen = element.attrib.get(smdx.SMDX_ATTR_LEN)
mandatory = element.attrib.get(smdx.SMDX_ATTR_MANDATORY, smdx.SMDX_MANDATORY_FALSE)
access = element.attrib.get(smdx.SMDX_ATTR_ACCESS, smdx.SMDX_ACCESS_R)
self.units = element.attrib.get(smdx.SMDX_ATTR_UNITS)
if self.id is None:
raise SunSpecError('Missing point id attribute')
if self.offset is None:
raise SunSpecError('Missing offset attribute for point: %s' % self.id)
if ptype is None:
raise SunSpecError('Missing type attribute for point: %s' % self.id)
if ptype == smdx.SMDX_TYPE_STRING and plen is None:
raise SunSpecError('Missing len attribute for point: %s' % self.id)
self.type = smdx.smdx_point_types.get(ptype)
if self.type is None:
raise SunSpecError('Unknown point type: %s' % ptype)
self.mandatory = smdx.smdx_mandatory_types.get(mandatory)
if self.mandatory is None:
raise SunSpecError('Unknown mandatory type: %s' % mandatory)
self.access = smdx.smdx_access_types.get(access)
if self.access is None:
raise SunSpecError('Unknown access type: %s' % access)
self.sf = element.attrib.get(smdx.SMDX_ATTR_SF)
info = suns.suns_point_type_info.get(self.type)
if info is not None:
self.len, self.is_impl, self.data_to, self.to_data, self.to_value, self.value_default = info
if plen is not None:
self.len = int(plen)
for e in element.findall('*'):
if e.tag == smdx.SMDX_LABEL:
self.label = e.text
elif e.tag == smdx.SMDX_DESCRIPTION:
self.description = e.text
elif e.tag == smdx.SMDX_NOTES:
self.notes = e.text
elif e.tag == smdx.SMDX_SYMBOL:
sid = e.attrib.get(smdx.SMDX_ATTR_ID)
symbol = self.block_type.model_type.symbol_get(
sid=sid,
point_id=self.id,
)
if symbol is None:
symbol = Symbol()
symbol.from_smdx(e, strings)
self.block_type.model_type.symbol_add(
symbol=symbol,
point_id=self.id,
)
if self.symbol_get(sid) is None:
self.symbols.append(symbol)
symbol.from_smdx(e, strings)
def symbol_get(self, sid):
for symbol in self.symbols:
if symbol.id == sid:
return symbol
def not_equal(self, point_type):
""" Determines if the specified point type instance is not equal based
on all the point type attribute values. If not equal, returns string
indicating why the point type is not equal. Returns False if the point
type is equal.
Parameters:
point_type :
Point type to compare.
"""
if self == point_type:
return False
if point_type is None:
return "PointType '%s' is None" % (str(self.id))
if len(self.__dict__) != len(point_type.__dict__):
return "PointType '%s' attribute count not equal': %s %s" % (str(self.id))
for k, v in self.__dict__.items():
if k != 'block_type':
value = point_type.__dict__.get(k)
if v is not None and value is not None:
if value is None or v != value:
return "PointType '{}' attribute '{}' not equal: {} {}".format(str(self.id), str(k), str(v), str(value))
return False
def __str__(self):
return 'PointType: id = %s offset = %d type = %s len = %d sf = %s access = %s mandatory = %s' % \
(self.id, self.offset, self.type, self.len, self.sf, self.access, self.mandatory)
class Symbol(object):
def __init__(self, sid=None):
self.id = sid
self.value = None
self.label = None
self.description = None
self.notes = None
def from_smdx(self, element, strings=False):
for e in element.findall('*'):
if e.tag == smdx.SMDX_LABEL:
self.label = e.text
elif e.tag == smdx.SMDX_DESCRIPTION:
self.description = e.text
elif e.tag == smdx.SMDX_NOTES:
self.notes = e.text
if strings is False:
self.id = element.attrib.get(smdx.SMDX_ATTR_ID)
self.value = element.text
if self.id is None:
raise SunSpecError('Missing point id attribute')
def __str__(self):
return 'Symbol: id = {} value = {}'.format(self.id, self.value)
| |
"""Support for KNX/IP climate devices."""
from typing import Optional, List
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
HVAC_MODE_COOL,
HVAC_MODE_AUTO,
PRESET_ECO,
PRESET_SLEEP,
PRESET_AWAY,
PRESET_COMFORT,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import ATTR_DISCOVER_DEVICES, DATA_KNX
CONF_SETPOINT_SHIFT_ADDRESS = "setpoint_shift_address"
CONF_SETPOINT_SHIFT_STATE_ADDRESS = "setpoint_shift_state_address"
CONF_SETPOINT_SHIFT_STEP = "setpoint_shift_step"
CONF_SETPOINT_SHIFT_MAX = "setpoint_shift_max"
CONF_SETPOINT_SHIFT_MIN = "setpoint_shift_min"
CONF_TEMPERATURE_ADDRESS = "temperature_address"
CONF_TARGET_TEMPERATURE_ADDRESS = "target_temperature_address"
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = "target_temperature_state_address"
CONF_OPERATION_MODE_ADDRESS = "operation_mode_address"
CONF_OPERATION_MODE_STATE_ADDRESS = "operation_mode_state_address"
CONF_CONTROLLER_STATUS_ADDRESS = "controller_status_address"
CONF_CONTROLLER_STATUS_STATE_ADDRESS = "controller_status_state_address"
CONF_CONTROLLER_MODE_ADDRESS = "controller_mode_address"
CONF_CONTROLLER_MODE_STATE_ADDRESS = "controller_mode_state_address"
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = "operation_mode_frost_protection_address"
CONF_OPERATION_MODE_NIGHT_ADDRESS = "operation_mode_night_address"
CONF_OPERATION_MODE_COMFORT_ADDRESS = "operation_mode_comfort_address"
CONF_OPERATION_MODES = "operation_modes"
CONF_ON_OFF_ADDRESS = "on_off_address"
CONF_ON_OFF_STATE_ADDRESS = "on_off_state_address"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
DEFAULT_NAME = "KNX Climate"
DEFAULT_SETPOINT_SHIFT_STEP = 0.5
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
# Map KNX operation modes to HA modes. This list might not be full.
OPERATION_MODES = {
# Map DPT 201.105 HVAC control modes
"Auto": HVAC_MODE_AUTO,
"Heat": HVAC_MODE_HEAT,
"Cool": HVAC_MODE_COOL,
"Off": HVAC_MODE_OFF,
"Fan only": HVAC_MODE_FAN_ONLY,
"Dry": HVAC_MODE_DRY,
}
OPERATION_MODES_INV = dict((reversed(item) for item in OPERATION_MODES.items()))
PRESET_MODES = {
# Map DPT 201.100 HVAC operating modes to HA presets
"Frost Protection": PRESET_ECO,
"Night": PRESET_SLEEP,
"Standby": PRESET_AWAY,
"Comfort": PRESET_COMFORT,
}
PRESET_MODES_INV = dict((reversed(item) for item in PRESET_MODES.items()))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_SETPOINT_SHIFT_STEP, default=DEFAULT_SETPOINT_SHIFT_STEP
): vol.All(float, vol.Range(min=0, max=2)),
vol.Optional(
CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX
): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(
CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN
): vol.All(int, vol.Range(min=-32, max=0)),
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODES): vol.All(
cv.ensure_list, [vol.In(OPERATION_MODES)]
),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up climate(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up climates for KNX platform configured within platform."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXClimate(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up climate for KNX platform configured within platform."""
import xknx
climate_mode = xknx.devices.ClimateMode(
hass.data[DATA_KNX].xknx,
name=config[CONF_NAME] + " Mode",
group_address_operation_mode=config.get(CONF_OPERATION_MODE_ADDRESS),
group_address_operation_mode_state=config.get(
CONF_OPERATION_MODE_STATE_ADDRESS
),
group_address_controller_status=config.get(CONF_CONTROLLER_STATUS_ADDRESS),
group_address_controller_status_state=config.get(
CONF_CONTROLLER_STATUS_STATE_ADDRESS
),
group_address_controller_mode=config.get(CONF_CONTROLLER_MODE_ADDRESS),
group_address_controller_mode_state=config.get(
CONF_CONTROLLER_MODE_STATE_ADDRESS
),
group_address_operation_mode_protection=config.get(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS
),
group_address_operation_mode_night=config.get(
CONF_OPERATION_MODE_NIGHT_ADDRESS
),
group_address_operation_mode_comfort=config.get(
CONF_OPERATION_MODE_COMFORT_ADDRESS
),
operation_modes=config.get(CONF_OPERATION_MODES),
)
hass.data[DATA_KNX].xknx.devices.add(climate_mode)
climate = xknx.devices.Climate(
hass.data[DATA_KNX].xknx,
name=config[CONF_NAME],
group_address_temperature=config[CONF_TEMPERATURE_ADDRESS],
group_address_target_temperature=config.get(CONF_TARGET_TEMPERATURE_ADDRESS),
group_address_target_temperature_state=config[
CONF_TARGET_TEMPERATURE_STATE_ADDRESS
],
group_address_setpoint_shift=config.get(CONF_SETPOINT_SHIFT_ADDRESS),
group_address_setpoint_shift_state=config.get(
CONF_SETPOINT_SHIFT_STATE_ADDRESS
),
setpoint_shift_step=config[CONF_SETPOINT_SHIFT_STEP],
setpoint_shift_max=config[CONF_SETPOINT_SHIFT_MAX],
setpoint_shift_min=config[CONF_SETPOINT_SHIFT_MIN],
group_address_on_off=config.get(CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(CONF_ON_OFF_STATE_ADDRESS),
min_temp=config.get(CONF_MIN_TEMP),
max_temp=config.get(CONF_MAX_TEMP),
mode=climate_mode,
)
hass.data[DATA_KNX].xknx.devices.add(climate)
async_add_entities([KNXClimate(climate)])
class KNXClimate(ClimateDevice):
"""Representation of a KNX climate device."""
def __init__(self, device):
"""Initialize of a KNX climate device."""
self.device = device
self._unit_of_measurement = TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
async def async_added_to_hass(self) -> None:
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
self.device.mode.register_device_updated_cb(after_update_callback)
@property
def name(self) -> str:
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self) -> bool:
"""No polling needed within KNX."""
return False
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self.device.temperature.value
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.device.temperature_step
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.device.target_temperature.value
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.device.target_temperature_min
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.device.target_temperature_max
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
await self.device.set_target_temperature(temperature)
await self.async_update_ha_state()
@property
def hvac_mode(self) -> Optional[str]:
"""Return current operation ie. heat, cool, idle."""
if self.device.supports_on_off and not self.device.is_on:
return HVAC_MODE_OFF
if self.device.supports_on_off and self.device.is_on:
return HVAC_MODE_HEAT
if self.device.mode.supports_operation_mode:
return OPERATION_MODES.get(
self.device.mode.operation_mode.value, HVAC_MODE_HEAT
)
return None
@property
def hvac_modes(self) -> Optional[List[str]]:
"""Return the list of available operation modes."""
_operations = [
OPERATION_MODES.get(operation_mode.value)
for operation_mode in self.device.mode.operation_modes
]
if self.device.supports_on_off:
_operations.append(HVAC_MODE_HEAT)
_operations.append(HVAC_MODE_OFF)
return [op for op in _operations if op is not None]
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set operation mode."""
if self.device.supports_on_off and hvac_mode == HVAC_MODE_OFF:
await self.device.turn_off()
elif self.device.supports_on_off and hvac_mode == HVAC_MODE_HEAT:
await self.device.turn_on()
elif self.device.mode.supports_operation_mode:
from xknx.knx import HVACOperationMode
knx_operation_mode = HVACOperationMode(OPERATION_MODES_INV.get(hvac_mode))
await self.device.mode.set_operation_mode(knx_operation_mode)
await self.async_update_ha_state()
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
if self.device.mode.supports_operation_mode:
return PRESET_MODES.get(self.device.mode.operation_mode.value, PRESET_AWAY)
return None
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
_presets = [
PRESET_MODES.get(operation_mode.value)
for operation_mode in self.device.mode.operation_modes
]
return list(filter(None, _presets))
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode.
This method must be run in the event loop and returns a coroutine.
"""
if self.device.mode.supports_operation_mode:
from xknx.knx import HVACOperationMode
knx_operation_mode = HVACOperationMode(PRESET_MODES_INV.get(preset_mode))
await self.device.mode.set_operation_mode(knx_operation_mode)
await self.async_update_ha_state()
| |
# -*- encoding: utf-8 -*-
"""
Regression model.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import imp
from h2o.model.confusion_matrix import ConfusionMatrix
from h2o.utils.backward_compatibility import backwards_compatible
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type, assert_satisfies, numeric
class MetricsBase(backwards_compatible()):
"""
A parent class to house common metrics available for the various Metrics types.
The methods here are available across different model categories.
"""
def __init__(self, metric_json, on=None, algo=""):
super(MetricsBase, self).__init__()
# Yep, it's messed up...
if isinstance(metric_json, MetricsBase): metric_json = metric_json._metric_json
self._metric_json = metric_json
# train and valid and xval are not mutually exclusive -- could have a test. train and
# valid only make sense at model build time.
self._on_train = False
self._on_valid = False
self._on_xval = False
self._algo = algo
if on == "training_metrics":
self._on_train = True
elif on == "validation_metrics":
self._on_valid = True
elif on == "cross_validation_metrics":
self._on_xval = True
elif on is None:
pass
else:
raise ValueError("on expected to be train,valid,or xval. Got: " + str(on))
@classmethod
def make(cls, kvs):
"""Factory method to instantiate a MetricsBase object from the list of key-value pairs."""
return cls(metric_json=dict(kvs))
def __repr__(self):
# FIXME !!! __repr__ should never print anything, but return a string
self.show()
return ""
# TODO: convert to actual fields list
def __getitem__(self, key):
return self._metric_json.get(key)
@staticmethod
def _has(dictionary, key):
return key in dictionary and dictionary[key] is not None
def show(self):
"""Display a short summary of the metrics."""
metric_type = self._metric_json['__meta']['schema_type']
types_w_glm = ['ModelMetricsRegressionGLM', 'ModelMetricsBinomialGLM']
types_w_clustering = ['ModelMetricsClustering']
types_w_mult = ['ModelMetricsMultinomial']
types_w_bin = ['ModelMetricsBinomial', 'ModelMetricsBinomialGLM']
types_w_r2 = ['ModelMetricsRegressionGLM']
types_w_mean_residual_deviance = ['ModelMetricsRegressionGLM', 'ModelMetricsRegression']
types_w_mean_absolute_error = ['ModelMetricsRegressionGLM', 'ModelMetricsRegression']
types_w_logloss = types_w_bin + types_w_mult
types_w_dim = ["ModelMetricsGLRM"]
print()
print(metric_type + ": " + self._algo)
reported_on = "** Reported on {} data. **"
if self._on_train:
print(reported_on.format("train"))
elif self._on_valid:
print(reported_on.format("validation"))
elif self._on_xval:
print(reported_on.format("cross-validation"))
else:
print(reported_on.format("test"))
print()
print("MSE: " + str(self.mse()))
print("RMSE: " + str(self.rmse()))
if metric_type in types_w_mean_absolute_error:
print("MAE: " + str(self.mae()))
print("RMSLE: " + str(self.rmsle()))
if metric_type in types_w_r2:
print("R^2: " + str(self.r2()))
if metric_type in types_w_mean_residual_deviance:
print("Mean Residual Deviance: " + str(self.mean_residual_deviance()))
if metric_type in types_w_logloss:
print("LogLoss: " + str(self.logloss()))
if metric_type == 'ModelMetricsBinomial':
# second element for first threshold is the actual mean per class error
print("Mean Per-Class Error: %s" % self.mean_per_class_error()[0][1])
if metric_type == 'ModelMetricsMultinomial':
print("Mean Per-Class Error: " + str(self.mean_per_class_error()))
if metric_type in types_w_glm:
print("Null degrees of freedom: " + str(self.null_degrees_of_freedom()))
print("Residual degrees of freedom: " + str(self.residual_degrees_of_freedom()))
print("Null deviance: " + str(self.null_deviance()))
print("Residual deviance: " + str(self.residual_deviance()))
print("AIC: " + str(self.aic()))
if metric_type in types_w_bin:
print("AUC: " + str(self.auc()))
print("Gini: " + str(self.gini()))
self.confusion_matrix().show()
self._metric_json["max_criteria_and_metric_scores"].show()
if self.gains_lift():
print(self.gains_lift())
if metric_type in types_w_mult:
self.confusion_matrix().show()
self.hit_ratio_table().show()
if metric_type in types_w_clustering:
print("Total Within Cluster Sum of Square Error: " + str(self.tot_withinss()))
print("Total Sum of Square Error to Grand Mean: " + str(self.totss()))
print("Between Cluster Sum of Square Error: " + str(self.betweenss()))
self._metric_json['centroid_stats'].show()
if metric_type in types_w_dim:
print("Sum of Squared Error (Numeric): " + str(self.num_err()))
print("Misclassification Error (Categorical): " + str(self.cat_err()))
def r2(self):
"""The R squared coefficient."""
return self._metric_json["r2"]
def logloss(self):
"""Log loss."""
return self._metric_json["logloss"]
def nobs(self):
"""The number of observations."""
return self._metric_json["nobs"]
def mean_residual_deviance(self):
"""The mean residual deviance for this set of metrics."""
return self._metric_json["mean_residual_deviance"]
def auc(self):
"""The AUC for this set of metrics."""
return self._metric_json['AUC']
def aic(self):
"""The AIC for this set of metrics."""
return self._metric_json['AIC']
def gini(self):
"""Gini coefficient."""
return self._metric_json['Gini']
def mse(self):
"""The MSE for this set of metrics."""
return self._metric_json['MSE']
def rmse(self):
"""The RMSE for this set of metrics."""
return self._metric_json['RMSE']
def mae(self):
"""The MAE for this set of metrics."""
return self._metric_json['mae']
def rmsle(self):
"""The RMSLE for this set of metrics."""
return self._metric_json['rmsle']
def residual_deviance(self):
"""The residual deviance if the model has it, otherwise None."""
if MetricsBase._has(self._metric_json, "residual_deviance"):
return self._metric_json["residual_deviance"]
return None
def residual_degrees_of_freedom(self):
"""The residual DoF if the model has residual deviance, otherwise None."""
if MetricsBase._has(self._metric_json, "residual_degrees_of_freedom"):
return self._metric_json["residual_degrees_of_freedom"]
return None
def null_deviance(self):
"""The null deviance if the model has residual deviance, otherwise None."""
if MetricsBase._has(self._metric_json, "null_deviance"):
return self._metric_json["null_deviance"]
return None
def null_degrees_of_freedom(self):
"""The null DoF if the model has residual deviance, otherwise None."""
if MetricsBase._has(self._metric_json, "null_degrees_of_freedom"):
return self._metric_json["null_degrees_of_freedom"]
return None
def mean_per_class_error(self):
"""The mean per class error."""
return self._metric_json['mean_per_class_error']
# Deprecated functions; left here for backward compatibility
_bcim = {
"giniCoef": lambda self, *args, **kwargs: self.gini(*args, **kwargs)
}
class H2ORegressionModelMetrics(MetricsBase):
"""
This class provides an API for inspecting the metrics returned by a regression model.
It is possible to retrieve the R^2 (1 - MSE/variance) and MSE.
"""
def __init__(self, metric_json, on=None, algo=""):
super(H2ORegressionModelMetrics, self).__init__(metric_json, on, algo)
class H2OClusteringModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OClusteringModelMetrics, self).__init__(metric_json, on, algo)
def tot_withinss(self):
"""The Total Within Cluster Sum-of-Square Error, or None if not present."""
if MetricsBase._has(self._metric_json, "tot_withinss"):
return self._metric_json["tot_withinss"]
return None
def totss(self):
"""The Total Sum-of-Square Error to Grand Mean, or None if not present."""
if MetricsBase._has(self._metric_json, "totss"):
return self._metric_json["totss"]
return None
def betweenss(self):
"""The Between Cluster Sum-of-Square Error, or None if not present."""
if MetricsBase._has(self._metric_json, "betweenss"):
return self._metric_json["betweenss"]
return None
class H2OMultinomialModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OMultinomialModelMetrics, self).__init__(metric_json, on, algo)
def confusion_matrix(self):
"""Returns a confusion matrix based of H2O's default prediction threshold for a dataset."""
return self._metric_json['cm']['table']
def hit_ratio_table(self):
"""Retrieve the Hit Ratios."""
return self._metric_json['hit_ratio_table']
class H2OBinomialModelMetrics(MetricsBase):
"""
This class is essentially an API for the AUC object.
This class contains methods for inspecting the AUC for different criteria.
To input the different criteria, use the static variable `criteria`.
"""
def __init__(self, metric_json, on=None, algo=""):
"""
Create a new Binomial Metrics object (essentially a wrapper around some json)
:param metric_json: A blob of json holding all of the needed information
:param on_train: Metrics built on training data (default is False)
:param on_valid: Metrics built on validation data (default is False)
:param on_xval: Metrics built on cross validation data (default is False)
:param algo: The algorithm the metrics are based off of (e.g. deeplearning, gbm, etc.)
:returns: A new H2OBinomialModelMetrics object.
"""
super(H2OBinomialModelMetrics, self).__init__(metric_json, on, algo)
def F1(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The F1 for the given set of thresholds.
"""
return self.metric("f1", thresholds=thresholds)
def F2(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The F2 for this set of metrics and thresholds.
"""
return self.metric("f2", thresholds=thresholds)
def F0point5(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The F0.5 for this set of metrics and thresholds.
"""
return self.metric("f0point5", thresholds=thresholds)
def accuracy(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The accuracy for this set of metrics and thresholds.
"""
return self.metric("accuracy", thresholds=thresholds)
def error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The error for this set of metrics and thresholds.
"""
return 1 - self.metric("accuracy", thresholds=thresholds)
def precision(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The precision for this set of metrics and thresholds.
"""
return self.metric("precision", thresholds=thresholds)
def tpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The True Postive Rate.
"""
return self.metric("tpr", thresholds=thresholds)
def tnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The True Negative Rate.
"""
return self.metric("tnr", thresholds=thresholds)
def fnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The False Negative Rate.
"""
return self.metric("fnr", thresholds=thresholds)
def fpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The False Positive Rate.
"""
return self.metric("fpr", thresholds=thresholds)
def recall(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: Recall for this set of metrics and thresholds.
"""
return self.metric("tpr", thresholds=thresholds)
def sensitivity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: Sensitivity or True Positive Rate for this set of metrics and thresholds.
"""
return self.metric("tpr", thresholds=thresholds)
def fallout(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The fallout (same as False Positive Rate) for this set of metrics and thresholds.
"""
return self.metric("fpr", thresholds=thresholds)
def missrate(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: THe miss rate (same as False Negative Rate).
"""
return self.metric("fnr", thresholds=thresholds)
def specificity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The specificity (same as True Negative Rate).
"""
return self.metric("tnr", thresholds=thresholds)
def mcc(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The absolute MCC (a value between 0 and 1, 0 being totally dissimilar, 1 being identical).
"""
return self.metric("absolute_mcc", thresholds=thresholds)
def max_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: Return 1 - min(per class accuracy).
"""
return 1 - self.metric("min_per_class_accuracy", thresholds=thresholds)
def mean_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: mean per class error.
"""
return [[x[0], 1 - x[1]] for x in self.metric("mean_per_class_accuracy", thresholds=thresholds)]
def metric(self, metric, thresholds=None):
"""
:param str metric: The desired metric.
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then
the thresholds in this set of metrics will be used.
:returns: The set of metrics for the list of thresholds.
"""
assert_is_type(thresholds, None, [numeric])
if not thresholds: thresholds = [self.find_threshold_by_max_metric(metric)]
thresh2d = self._metric_json['thresholds_and_metric_scores']
metrics = []
for t in thresholds:
idx = self.find_idx_by_threshold(t)
metrics.append([t, thresh2d[metric][idx]])
return metrics
def plot(self, type="roc", server=False):
"""
Produce the desired metric plot.
:param type: the type of metric plot (currently, only ROC supported).
:param server: if True, generate plot inline using matplotlib's "Agg" backend.
:returns: None
"""
# TODO: add more types (i.e. cutoffs)
assert_is_type(type, "roc")
# check for matplotlib. exit if absent.
try:
imp.find_module('matplotlib')
import matplotlib
if server: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("matplotlib is required for this function!")
return
if type == "roc":
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate (TPR)')
plt.title('ROC Curve')
plt.text(0.5, 0.5, r'AUC={0:.4f}'.format(self._metric_json["AUC"]))
plt.plot(self.fprs, self.tprs, 'b--')
plt.axis([0, 1, 0, 1])
if not server: plt.show()
@property
def fprs(self):
"""
Return all false positive rates for all threshold values.
:returns: a list of false positive rates.
"""
return self._metric_json["thresholds_and_metric_scores"]["fpr"]
@property
def tprs(self):
"""
Return all true positive rates for all threshold values.
:returns: a list of true positive rates.
"""
return self._metric_json["thresholds_and_metric_scores"]["tpr"]
def confusion_matrix(self, metrics=None, thresholds=None):
"""
Get the confusion matrix for the specified metric
:param metrics: A string (or list of strings) in {"min_per_class_accuracy", "absolute_mcc", "tnr", "fnr", "fpr",
"tpr", "precision", "accuracy", "f0point5", "f2", "f1","mean_per_class_accuracy"}
:param thresholds: A value (or list of values) between 0 and 1
:returns: a list of ConfusionMatrix objects (if there are more than one to return), or a single ConfusionMatrix
(if there is only one).
"""
# make lists out of metrics and thresholds arguments
if metrics is None and thresholds is None: metrics = ["f1"]
if isinstance(metrics, list):
metrics_list = metrics
elif metrics is None:
metrics_list = []
else:
metrics_list = [metrics]
if isinstance(thresholds, list):
thresholds_list = thresholds
elif thresholds is None:
thresholds_list = []
else:
thresholds_list = [thresholds]
# error check the metrics_list and thresholds_list
assert_is_type(thresholds_list, [numeric])
assert_satisfies(thresholds_list, all(0 <= t <= 1 for t in thresholds_list))
if not all(m.lower() in ["min_per_class_accuracy", "absolute_mcc", "precision", "recall", "specificity",
"accuracy", "f0point5", "f2", "f1", "mean_per_class_accuracy"] for m in metrics_list):
raise ValueError(
"The only allowable metrics are min_per_class_accuracy, absolute_mcc, precision, accuracy, f0point5, "
"f2, f1, mean_per_class_accuracy")
# make one big list that combines the thresholds and metric-thresholds
metrics_thresholds = [self.find_threshold_by_max_metric(m) for m in metrics_list]
for mt in metrics_thresholds:
thresholds_list.append(mt)
thresh2d = self._metric_json['thresholds_and_metric_scores']
actual_thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)]
cms = []
for t in thresholds_list:
idx = self.find_idx_by_threshold(t)
row = thresh2d.cell_values[idx]
tns = row[11]
fns = row[12]
fps = row[13]
tps = row[14]
p = tps + fns
n = tns + fps
c0 = n - fps
c1 = p - tps
if t in metrics_thresholds:
m = metrics_list[metrics_thresholds.index(t)]
table_header = "Confusion Matrix (Act/Pred) for max " + m + " @ threshold = " + str(
actual_thresholds[idx])
else:
table_header = "Confusion Matrix (Act/Pred) @ threshold = " + str(actual_thresholds[idx])
cms.append(ConfusionMatrix(cm=[[c0, fps], [c1, tps]], domains=self._metric_json['domain'],
table_header=table_header))
if len(cms) == 1:
return cms[0]
else:
return cms
def find_threshold_by_max_metric(self, metric):
"""
:param metric: A string in {"min_per_class_accuracy", "absolute_mcc", "precision", "recall", "specificity",
"accuracy", "f0point5", "f2", "f1", "mean_per_class_accuracy"}.
:returns: the threshold at which the given metric is maximal.
"""
crit2d = self._metric_json['max_criteria_and_metric_scores']
for e in crit2d.cell_values:
if e[0] == "max " + metric.lower():
return e[1]
raise ValueError("No metric " + str(metric.lower()))
def find_idx_by_threshold(self, threshold):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
:param threshold: Find the index of this input threshold.
:returns: the index
:raises ValueError: if no such index can be found.
"""
assert_is_type(threshold, numeric)
thresh2d = self._metric_json['thresholds_and_metric_scores']
for i, e in enumerate(thresh2d.cell_values):
t = float(e[0])
if abs(t - threshold) < 0.00000001 * max(t, threshold):
return i
if threshold >= 0 and threshold <= 1:
thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)]
threshold_diffs = [abs(t - threshold) for t in thresholds]
closest_idx = threshold_diffs.index(min(threshold_diffs))
closest_threshold = thresholds[closest_idx]
print("Could not find exact threshold {0}; using closest threshold found {1}."
.format(threshold, closest_threshold))
return closest_idx
raise ValueError("Threshold must be between 0 and 1, but got {0} ".format(threshold))
def gains_lift(self):
"""Retrieve the Gains/Lift table."""
if 'gains_lift_table' in self._metric_json:
return self._metric_json['gains_lift_table']
return None
class H2OAutoEncoderModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OAutoEncoderModelMetrics, self).__init__(metric_json, on, algo)
class H2ODimReductionModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2ODimReductionModelMetrics, self).__init__(metric_json, on, algo)
def num_err(self):
"""Sum of Squared Error over non-missing numeric entries, or None if not present."""
if MetricsBase._has(self._metric_json, "numerr"):
return self._metric_json["numerr"]
return None
def cat_err(self):
"""The Number of Misclassified categories over non-missing categorical entries, or None if not present."""
if MetricsBase._has(self._metric_json, "caterr"):
return self._metric_json["caterr"]
return None
class H2OWordEmbeddingModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OWordEmbeddingModelMetrics, self).__init__(metric_json, on, algo)
| |
# -*- coding: utf-8 -*-
r"""
spa.jwtcookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Based on werkzeug.contrib.securecookie, but serialized cookies are JSON Web
Tokens.
A cookie serialized as a JSON Web Token can be easily parsed and used in
client side code. This means that you can access session data from
javascript without a round trip to the server. The big caveat is that
your client side javascript should not have access to the secret key used to
sign the token. Two important implications of this:
1. Client side javascript cannot verify that the session data has not
been tampered with. It must trust it blindly.
2. Client side javascript cannot set new values in the session.
The primary use case is to make it easy for your JS code to access session
data like username, email address, or gravatar URL, that you might want to
show in the user interface.
The rest of this docstring is slightly adapted from Werkzeug's SecureCookie
class.
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from spa.jwtcookie import JWTCookie
>>> x = JWTCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = JWTCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `JWTCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from spa.jwtcookie import JWTCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return JWTCookie(secret_key=SECRET_KEY)
return JWTCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return JWTCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
"""
from __future__ import print_function
import datetime
from six.moves.http_cookies import SimpleCookie
from six.moves.urllib.parse import parse_qs
import re
import jwt
import utc
from werkzeug._compat import text_type
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.http import dump_cookie
class TokenTimestampError(Exception): pass
class JWTCookie(ModificationTrackingDict):
"""Represents a secure cookie.
Example usage:
>>> x = JWTCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data, as a dictionary.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param algorithm: A string indicating the algorithm to be used. Must be
supported by the PyJWT library.
"""
def __init__(self, data=None, secret_key=None, algorithm='HS256'):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None and not isinstance(secret_key, bytes):
secret_key = bytes(secret_key, 'utf8')
self.secret_key = secret_key
self.algorithm = algorithm
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['exp'] = expires
self['iat'] = utc.now()
return jwt.encode(self, self.secret_key, self.algorithm)
@classmethod
def unserialize(cls, string, secret_key, algorithm='HS256', expire_days=None):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`JWTCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
items = jwt.decode(string, secret_key, algorithms=[algorithm])
if expire_days:
if 'iat' not in items:
raise TokenTimestampError('No iat claim in token')
issued_at = utc.fromtimestamp(items['iat'])
time_passed = utc.now() - issued_at
if time_passed > datetime.timedelta(days=expire_days):
raise TokenTimestampError('Token is too old')
return cls(items, secret_key, algorithm)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`JWTCookie` from a cookie in request. If the
cookie is not set, a new :class:`JWTCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to decode the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the JWTCookie in a cookie on response object. All parameters
that are not described here are forwarded directly to
:meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
class JWTSessionMiddleware(object):
def __init__(self, app, secret_key, cookie_name='session',
wsgi_name='jwtsession', expire_days=1,
algorithm='HS256', exclude_pattern=None):
self.app = app
self.secret_key = secret_key
self.cookie_name = cookie_name
self.wsgi_name = wsgi_name
self.expire_days = expire_days
self.algorithm = algorithm
if exclude_pattern:
self.exclude_pattern = re.compile(exclude_pattern)
else:
self.exclude_pattern = None
def __call__(self, environ, start_response):
if self.exclude_pattern and re.match(self.exclude_pattern,
environ['PATH_INFO']):
return self.app(environ, start_response)
# on the way in: if environ includes our cookie, then deserialize it and
# stick it back into environ as jwtsession. If environ doesn't include
# one then make an empty one and stick that in.
if 'HTTP_COOKIE' in environ:
cookie = SimpleCookie(environ['HTTP_COOKIE'])
if self.cookie_name in cookie:
try:
session = JWTCookie.unserialize(
cookie[self.cookie_name].value,
self.secret_key,
self.algorithm,
expire_days=self.expire_days,
)
except (jwt.DecodeError, TokenTimestampError):
session = JWTCookie({}, self.secret_key, self.algorithm)
else:
session = JWTCookie({}, self.secret_key, self.algorithm)
else:
session = JWTCookie({}, self.secret_key, self.algorithm)
environ[self.wsgi_name] = session
# on the way out: serialize jwtsession and stick it into headers as
# 'session'.
def session_start_response(status, headers, exc_info=None):
if session.should_save or session == {}:
# add our cookie to headers
c = dump_cookie(self.cookie_name,
value=environ[self.wsgi_name].serialize(),
max_age=datetime.timedelta(days=self.expire_days))
headers.append(('Set-Cookie', c))
return start_response(status, headers, exc_info=exc_info)
return self.app(environ, session_start_response)
class JWTSessionParamMiddleware(object):
"""
This middleware supports setting session values from a query string
parameter (signed as a JSON Web Token).
This middleware must be used with some other middleware that actually
provides the session functionality.
"""
def __init__(self, app, secret_key, expire_days=7, algorithm='HS256',
qs_name='session_token', wsgi_name='jwtsession'):
self.app = app
self.secret_key = secret_key
self.expire_days = expire_days
self.algorithm = algorithm
self.qs_name = qs_name
self.wsgi_name = wsgi_name
def __call__(self, environ, start_response):
qs_params = {k: v[0] for k, v in
parse_qs(environ['QUERY_STRING']).items()}
if self.qs_name not in qs_params:
return self.app(environ, start_response)
try:
session_vals = jwt.decode(qs_params[self.qs_name], key=self.secret_key)
except jwt.DecodeError:
# silently drop malformed tokens
return self.app(environ, start_response)
if self.expire_days:
if 'iat' not in session_vals:
# We can't enforce token expiration if the token has no issued
# at claim. So ignore the token.
return self.app(environ, start_response)
issued_at = utc.fromtimestamp(session_vals['iat'])
if (utc.now() - issued_at).days > self.expire_days:
# Token has an issued at claim, but it's too old. Ignore the
# token.
return self.app(environ, start_response)
environ[self.wsgi_name].update(session_vals)
return self.app(environ, start_response)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class BackupLongTermRetentionPoliciesOperations(object):
"""BackupLongTermRetentionPoliciesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2014-04-01".
:ivar backup_long_term_retention_policy_name: The name of the backup long term retention policy. Constant value: "Default".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2014-04-01"
self.backup_long_term_retention_policy_name = "Default"
self.config = config
def get(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Returns a database backup long term retention policy.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: BackupLongTermRetentionPolicy or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.sql.models.BackupLongTermRetentionPolicy or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/backupLongTermRetentionPolicies/{backupLongTermRetentionPolicyName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'backupLongTermRetentionPolicyName': self._serialize.url("self.backup_long_term_retention_policy_name", self.backup_long_term_retention_policy_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackupLongTermRetentionPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, server_name, database_name, state, recovery_services_backup_policy_resource_id, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a database backup long term retention policy.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database
:type database_name: str
:param state: The status of the backup long term retention policy.
Possible values include: 'Disabled', 'Enabled'
:type state: str or
~azure.mgmt.sql.models.BackupLongTermRetentionPolicyState
:param recovery_services_backup_policy_resource_id: The azure recovery
services backup protection policy resource id
:type recovery_services_backup_policy_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
BackupLongTermRetentionPolicy or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.BackupLongTermRetentionPolicy]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.BackupLongTermRetentionPolicy(state=state, recovery_services_backup_policy_resource_id=recovery_services_backup_policy_resource_id)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/backupLongTermRetentionPolicies/{backupLongTermRetentionPolicyName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'backupLongTermRetentionPolicyName': self._serialize.url("self.backup_long_term_retention_policy_name", self.backup_long_term_retention_policy_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'BackupLongTermRetentionPolicy')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackupLongTermRetentionPolicy', response)
if response.status_code == 201:
deserialized = self._deserialize('BackupLongTermRetentionPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_by_database(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Returns a database backup long term retention policy.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of BackupLongTermRetentionPolicy
:rtype:
~azure.mgmt.sql.models.BackupLongTermRetentionPolicyPaged[~azure.mgmt.sql.models.BackupLongTermRetentionPolicy]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/backupLongTermRetentionPolicies'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.BackupLongTermRetentionPolicyPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.BackupLongTermRetentionPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_common
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib import ofctl_nicira_ext
from ryu.lib import ofctl_utils
LOG = logging.getLogger('ryu.lib.ofctl_v1_3')
DEFAULT_TIMEOUT = 1.0
UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_3)
str_to_int = ofctl_utils.str_to_int
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL)
def to_actions(dp, acts):
inst = []
actions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for a in acts:
action = to_action(dp, a)
if action is not None:
actions.append(action)
else:
action_type = a.get('type')
if action_type == 'WRITE_ACTIONS':
write_actions = []
write_acts = a.get('actions')
for act in write_acts:
action = to_action(dp, act)
if action is not None:
write_actions.append(action)
else:
LOG.error('Unknown action type: %s', action_type)
if write_actions:
inst.append(
parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
write_actions))
elif action_type == 'CLEAR_ACTIONS':
inst.append(
parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, []))
elif action_type == 'GOTO_TABLE':
table_id = UTIL.ofp_table_from_user(a.get('table_id'))
inst.append(parser.OFPInstructionGotoTable(table_id))
elif action_type == 'WRITE_METADATA':
metadata = str_to_int(a.get('metadata'))
metadata_mask = (str_to_int(a['metadata_mask'])
if 'metadata_mask' in a
else parser.UINT64_MAX)
inst.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
elif action_type == 'METER':
meter_id = UTIL.ofp_meter_from_user(a.get('meter_id'))
inst.append(parser.OFPInstructionMeter(meter_id))
else:
LOG.error('Unknown action type: %s', action_type)
if actions:
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
return inst
def action_to_str(act):
action_type = act.cls_action_type
if action_type == ofproto_v1_3.OFPAT_OUTPUT:
port = UTIL.ofp_port_to_user(act.port)
buf = 'OUTPUT:' + str(port)
elif action_type == ofproto_v1_3.OFPAT_COPY_TTL_OUT:
buf = 'COPY_TTL_OUT'
elif action_type == ofproto_v1_3.OFPAT_COPY_TTL_IN:
buf = 'COPY_TTL_IN'
elif action_type == ofproto_v1_3.OFPAT_SET_MPLS_TTL:
buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl)
elif action_type == ofproto_v1_3.OFPAT_DEC_MPLS_TTL:
buf = 'DEC_MPLS_TTL'
elif action_type == ofproto_v1_3.OFPAT_PUSH_VLAN:
buf = 'PUSH_VLAN:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_VLAN:
buf = 'POP_VLAN'
elif action_type == ofproto_v1_3.OFPAT_PUSH_MPLS:
buf = 'PUSH_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_MPLS:
buf = 'POP_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_SET_QUEUE:
queue_id = UTIL.ofp_queue_to_user(act.queue_id)
buf = 'SET_QUEUE:' + str(queue_id)
elif action_type == ofproto_v1_3.OFPAT_GROUP:
group_id = UTIL.ofp_group_to_user(act.group_id)
buf = 'GROUP:' + str(group_id)
elif action_type == ofproto_v1_3.OFPAT_SET_NW_TTL:
buf = 'SET_NW_TTL:' + str(act.nw_ttl)
elif action_type == ofproto_v1_3.OFPAT_DEC_NW_TTL:
buf = 'DEC_NW_TTL'
elif action_type == ofproto_v1_3.OFPAT_SET_FIELD:
buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value)
elif action_type == ofproto_v1_3.OFPAT_PUSH_PBB:
buf = 'PUSH_PBB:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_PBB:
buf = 'POP_PBB'
elif action_type == ofproto_v1_3.OFPAT_EXPERIMENTER:
if act.experimenter == ofproto_common.NX_EXPERIMENTER_ID:
try:
return ofctl_nicira_ext.action_to_str(act, action_to_str)
except Exception:
LOG.debug('Error parsing NX_ACTION(%s)',
act.__class__.__name__, exc_info=True)
data_str = base64.b64encode(act.data)
buf = 'EXPERIMENTER: {experimenter:%s, data:%s}' % \
(act.experimenter, data_str.decode('utf-8'))
else:
buf = 'UNKNOWN'
return buf
def actions_to_str(instructions):
actions = []
for instruction in instructions:
if isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionActions):
if instruction.type == ofproto_v1_3.OFPIT_APPLY_ACTIONS:
for a in instruction.actions:
actions.append(action_to_str(a))
elif instruction.type == ofproto_v1_3.OFPIT_WRITE_ACTIONS:
write_actions = []
for a in instruction.actions:
write_actions.append(action_to_str(a))
if write_actions:
actions.append({'WRITE_ACTIONS': write_actions})
elif instruction.type == ofproto_v1_3.OFPIT_CLEAR_ACTIONS:
actions.append('CLEAR_ACTIONS')
else:
actions.append('UNKNOWN')
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionGotoTable):
table_id = UTIL.ofp_table_to_user(instruction.table_id)
buf = 'GOTO_TABLE:' + str(table_id)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionWriteMetadata):
buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata,
instruction.metadata_mask)
if instruction.metadata_mask
else 'WRITE_METADATA:0x%x' % instruction.metadata)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionMeter):
meter_id = UTIL.ofp_meter_to_user(instruction.meter_id)
buf = 'METER:' + str(meter_id)
actions.append(buf)
else:
continue
return actions
def to_match(dp, attrs):
convert = {'in_port': UTIL.ofp_port_from_user,
'in_phy_port': str_to_int,
'metadata': ofctl_utils.to_match_masked_int,
'dl_dst': ofctl_utils.to_match_eth,
'dl_src': ofctl_utils.to_match_eth,
'eth_dst': ofctl_utils.to_match_eth,
'eth_src': ofctl_utils.to_match_eth,
'dl_type': str_to_int,
'eth_type': str_to_int,
'dl_vlan': to_match_vid,
'vlan_vid': to_match_vid,
'vlan_pcp': str_to_int,
'ip_dscp': str_to_int,
'ip_ecn': str_to_int,
'nw_proto': str_to_int,
'ip_proto': str_to_int,
'nw_src': ofctl_utils.to_match_ip,
'nw_dst': ofctl_utils.to_match_ip,
'ipv4_src': ofctl_utils.to_match_ip,
'ipv4_dst': ofctl_utils.to_match_ip,
'tp_src': str_to_int,
'tp_dst': str_to_int,
'tcp_src': str_to_int,
'tcp_dst': str_to_int,
'udp_src': str_to_int,
'udp_dst': str_to_int,
'sctp_src': str_to_int,
'sctp_dst': str_to_int,
'icmpv4_type': str_to_int,
'icmpv4_code': str_to_int,
'arp_op': str_to_int,
'arp_spa': ofctl_utils.to_match_ip,
'arp_tpa': ofctl_utils.to_match_ip,
'arp_sha': ofctl_utils.to_match_eth,
'arp_tha': ofctl_utils.to_match_eth,
'ipv6_src': ofctl_utils.to_match_ip,
'ipv6_dst': ofctl_utils.to_match_ip,
'ipv6_flabel': str_to_int,
'icmpv6_type': str_to_int,
'icmpv6_code': str_to_int,
'ipv6_nd_target': ofctl_utils.to_match_ip,
'ipv6_nd_sll': ofctl_utils.to_match_eth,
'ipv6_nd_tll': ofctl_utils.to_match_eth,
'mpls_label': str_to_int,
'mpls_tc': str_to_int,
'mpls_bos': str_to_int,
'pbb_isid': ofctl_utils.to_match_masked_int,
'tunnel_id': ofctl_utils.to_match_masked_int,
'ipv6_exthdr': ofctl_utils.to_match_masked_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \
attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'nw_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['nw_src']
del attrs['nw_src']
if 'nw_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['nw_dst']
del attrs['nw_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
# For old field name
key = keys[key]
if key in convert:
value = convert[key](value)
if key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key = conv[ip_proto][key]
kwargs[key] = value
else:
# others
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_vid(value):
return ofctl_utils.to_match_vid(value, ofproto_v1_3.OFPVID_PRESENT)
def match_to_str(ofmatch):
keys = {'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'vlan_vid': 'dl_vlan',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'ip_proto': 'nw_proto',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'}
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
if key in keys:
key = keys[key]
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
elif key == 'in_port':
value = UTIL.ofp_port_to_user(value)
else:
if mask is not None:
value = str(value) + '/' + str(mask)
match.setdefault(key, value)
return match
def match_vid_to_str(value, mask):
return ofctl_utils.match_vid_to_str(
value, mask, ofproto_v1_3.OFPVID_PRESENT)
def wrap_dpid_dict(dp, value, to_user=True):
if to_user:
return {str(dp.id): value}
return {dp.id: value}
def get_desc_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = {}
for msg in msgs:
stats = msg.body
s = stats.to_jsondict()[stats.__class__.__name__]
return wrap_dpid_dict(dp, s, to_user)
def get_queue_stats(dp, waiters, port=None, queue_id=None, to_user=True):
ofp = dp.ofproto
if port is None:
port = ofp.OFPP_ANY
else:
port = str_to_int(port)
if queue_id is None:
queue_id = ofp.OFPQ_ALL
else:
queue_id = str_to_int(queue_id)
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, port,
queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'duration_nsec': stat.duration_nsec,
'duration_sec': stat.duration_sec,
'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
return wrap_dpid_dict(dp, s, to_user)
def get_queue_config(dp, waiters, port=None, to_user=True):
ofp = dp.ofproto
if port is None:
port = ofp.OFPP_ANY
else:
port = UTIL.ofp_port_from_user(str_to_int(port))
stats = dp.ofproto_parser.OFPQueueGetConfigRequest(dp, port)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
prop_type = {dp.ofproto.OFPQT_MIN_RATE: 'MIN_RATE',
dp.ofproto.OFPQT_MAX_RATE: 'MAX_RATE',
dp.ofproto.OFPQT_EXPERIMENTER: 'EXPERIMENTER'}
configs = []
for config in msgs:
queue_list = []
for queue in config.queues:
prop_list = []
for prop in queue.properties:
p = {'property': prop_type.get(prop.property, 'UNKNOWN')}
if prop.property == dp.ofproto.OFPQT_MIN_RATE or \
prop.property == dp.ofproto.OFPQT_MAX_RATE:
p['rate'] = prop.rate
elif prop.property == dp.ofproto.OFPQT_EXPERIMENTER:
p['experimenter'] = prop.experimenter
p['data'] = prop.data
prop_list.append(p)
q = {'properties': prop_list}
if to_user:
q['port'] = UTIL.ofp_port_to_user(queue.port)
q['queue_id'] = UTIL.ofp_queue_to_user(queue.queue_id)
else:
q['port'] = queue.port
q['queue_id'] = queue.queue_id
queue_list.append(q)
c = {'queues': queue_list}
if to_user:
c['port'] = UTIL.ofp_port_to_user(config.port)
else:
c['port'] = config.port
configs.append(c)
return wrap_dpid_dict(dp, configs, to_user)
def get_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
# Note: OpenFlow does not allow to filter flow entries by priority,
# but for efficiency, ofctl provides the way to do it.
priority = str_to_int(flow.get('priority', -1))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
for stats in msg.body:
if 0 <= priority != stats.priority:
continue
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'length': stats.length,
'flags': stats.flags}
if to_user:
s['actions'] = actions_to_str(stats.instructions)
s['match'] = match_to_str(stats.match)
s['table_id'] = UTIL.ofp_table_to_user(stats.table_id)
else:
s['actions'] = stats.instructions
s['instructions'] = stats.instructions
s['match'] = stats.match
s['table_id'] = stats.table_id
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_aggregate_flow_stats(dp, waiters, flow=None, to_user=True):
flow = flow if flow else {}
table_id = UTIL.ofp_table_from_user(
flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = str_to_int(flow.get('flags', 0))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
flows = []
for msg in msgs:
stats = msg.body
s = {'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'flow_count': stats.flow_count}
flows.append(s)
return wrap_dpid_dict(dp, flows, to_user)
def get_table_stats(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = {'active_count': stat.active_count,
'lookup_count': stat.lookup_count,
'matched_count': stat.matched_count}
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
else:
s['table_id'] = stat.table_id
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_table_features(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
prop_type = {ofproto.OFPTFPT_INSTRUCTIONS: 'INSTRUCTIONS',
ofproto.OFPTFPT_INSTRUCTIONS_MISS: 'INSTRUCTIONS_MISS',
ofproto.OFPTFPT_NEXT_TABLES: 'NEXT_TABLES',
ofproto.OFPTFPT_NEXT_TABLES_MISS: 'NEXT_TABLES_MISS',
ofproto.OFPTFPT_WRITE_ACTIONS: 'WRITE_ACTIONS',
ofproto.OFPTFPT_WRITE_ACTIONS_MISS: 'WRITE_ACTIONS_MISS',
ofproto.OFPTFPT_APPLY_ACTIONS: 'APPLY_ACTIONS',
ofproto.OFPTFPT_APPLY_ACTIONS_MISS: 'APPLY_ACTIONS_MISS',
ofproto.OFPTFPT_MATCH: 'MATCH',
ofproto.OFPTFPT_WILDCARDS: 'WILDCARDS',
ofproto.OFPTFPT_WRITE_SETFIELD: 'WRITE_SETFIELD',
ofproto.OFPTFPT_WRITE_SETFIELD_MISS: 'WRITE_SETFIELD_MISS',
ofproto.OFPTFPT_APPLY_SETFIELD: 'APPLY_SETFIELD',
ofproto.OFPTFPT_APPLY_SETFIELD_MISS: 'APPLY_SETFIELD_MISS',
ofproto.OFPTFPT_EXPERIMENTER: 'EXPERIMENTER',
ofproto.OFPTFPT_EXPERIMENTER_MISS: 'EXPERIMENTER_MISS'}
if not to_user:
prop_type = dict((k, k) for k in prop_type.keys())
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS,
ofproto.OFPTFPT_INSTRUCTIONS_MISS]
p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES,
ofproto.OFPTFPT_NEXT_TABLES_MISS]
p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS,
ofproto.OFPTFPT_WRITE_ACTIONS_MISS,
ofproto.OFPTFPT_APPLY_ACTIONS,
ofproto.OFPTFPT_APPLY_ACTIONS_MISS]
p_type_oxms = [ofproto.OFPTFPT_MATCH,
ofproto.OFPTFPT_WILDCARDS,
ofproto.OFPTFPT_WRITE_SETFIELD,
ofproto.OFPTFPT_WRITE_SETFIELD_MISS,
ofproto.OFPTFPT_APPLY_SETFIELD,
ofproto.OFPTFPT_APPLY_SETFIELD_MISS]
p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER,
ofproto.OFPTFPT_EXPERIMENTER_MISS]
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
properties = []
for prop in stat.properties:
p = {'type': prop_type.get(prop.type, 'UNKNOWN')}
if prop.type in p_type_instructions:
instruction_ids = []
for i in prop.instruction_ids:
inst = {'len': i.len,
'type': i.type}
instruction_ids.append(inst)
p['instruction_ids'] = instruction_ids
elif prop.type in p_type_next_tables:
table_ids = []
for i in prop.table_ids:
table_ids.append(i)
p['table_ids'] = table_ids
elif prop.type in p_type_actions:
action_ids = []
for i in prop.action_ids:
act = {'len': i.len,
'type': i.type}
action_ids.append(act)
p['action_ids'] = action_ids
elif prop.type in p_type_oxms:
oxm_ids = []
for i in prop.oxm_ids:
oxm = {'hasmask': i.hasmask,
'length': i.length,
'type': i.type}
oxm_ids.append(oxm)
p['oxm_ids'] = oxm_ids
elif prop.type in p_type_experimenter:
pass
properties.append(p)
s = {
'name': stat.name.decode('utf-8'),
'metadata_match': stat.metadata_match,
'metadata_write': stat.metadata_write,
'config': stat.config,
'max_entries': stat.max_entries,
'properties': properties,
}
if to_user:
s['table_id'] = UTIL.ofp_table_to_user(stat.table_id)
else:
s['table_id'] = stat.table_id
tables.append(s)
return wrap_dpid_dict(dp, tables, to_user)
def get_port_stats(dp, waiters, port=None, to_user=True):
if port is None:
port = dp.ofproto.OFPP_ANY
else:
port = str_to_int(port)
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, 0, port)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec}
if to_user:
s['port_no'] = UTIL.ofp_port_to_user(stats.port_no)
else:
s['port_no'] = stats.port_no
ports.append(s)
return wrap_dpid_dict(dp, ports, to_user)
def get_meter_stats(dp, waiters, meter_id=None, to_user=True):
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = str_to_int(meter_id)
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
meters = []
for msg in msgs:
for stats in msg.body:
bands = []
for band in stats.band_stats:
b = {'packet_band_count': band.packet_band_count,
'byte_band_count': band.byte_band_count}
bands.append(b)
s = {'len': stats.len,
'flow_count': stats.flow_count,
'packet_in_count': stats.packet_in_count,
'byte_in_count': stats.byte_in_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'band_stats': bands}
if to_user:
s['meter_id'] = UTIL.ofp_meter_to_user(stats.meter_id)
else:
s['meter_id'] = stats.meter_id
meters.append(s)
return wrap_dpid_dict(dp, meters, to_user)
def get_meter_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
if to_user:
band_types.append(v)
else:
band_types.append(k)
capabilities = []
for k, v in sorted(capa_convert.items()):
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_meter_config(dp, waiters, meter_id=None, to_user=True):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
band_type = {dp.ofproto.OFPMBT_DROP: 'DROP',
dp.ofproto.OFPMBT_DSCP_REMARK: 'DSCP_REMARK',
dp.ofproto.OFPMBT_EXPERIMENTER: 'EXPERIMENTER'}
if meter_id is None:
meter_id = dp.ofproto.OFPM_ALL
else:
meter_id = str_to_int(meter_id)
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, meter_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
configs = []
for msg in msgs:
for config in msg.body:
bands = []
for band in config.bands:
b = {'rate': band.rate,
'burst_size': band.burst_size}
if to_user:
b['type'] = band_type.get(band.type, '')
else:
b['type'] = band.type
if band.type == dp.ofproto.OFPMBT_DSCP_REMARK:
b['prec_level'] = band.prec_level
elif band.type == dp.ofproto.OFPMBT_EXPERIMENTER:
b['experimenter'] = band.experimenter
bands.append(b)
c_flags = []
for k, v in sorted(flags.items()):
if k & config.flags:
if to_user:
c_flags.append(v)
else:
c_flags.append(k)
c = {'flags': c_flags,
'bands': bands}
if to_user:
c['meter_id'] = UTIL.ofp_meter_to_user(config.meter_id)
else:
c['meter_id'] = config.meter_id
configs.append(c)
return wrap_dpid_dict(dp, configs, to_user)
def get_group_stats(dp, waiters, group_id=None, to_user=True):
if group_id is None:
group_id = dp.ofproto.OFPG_ALL
else:
group_id = str_to_int(group_id)
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, group_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
groups = []
for msg in msgs:
for stats in msg.body:
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = {'packet_count': bucket_stat.packet_count,
'byte_count': bucket_stat.byte_count}
bucket_stats.append(c)
g = {'length': stats.length,
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'bucket_stats': bucket_stats}
if to_user:
g['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
else:
g['group_id'] = stats.group_id
groups.append(g)
return wrap_dpid_dict(dp, groups, to_user)
def get_group_features(dp, waiters, to_user=True):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
if to_user:
types.append(v)
else:
types.append(k)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
if to_user:
capabilities.append(v)
else:
capabilities.append(k)
if to_user:
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
else:
max_groups = feature.max_groups
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
if to_user:
acts.append(v2)
else:
acts.append(k2)
if to_user:
actions.append({v1: acts})
else:
actions.append({k1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
return wrap_dpid_dict(dp, features, to_user)
def get_group_desc(dp, waiters, to_user=True):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
dp.ofproto.OFPGT_INDIRECT: 'INDIRECT',
dp.ofproto.OFPGT_FF: 'FF'}
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
for stats in msg.body:
buckets = []
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
if to_user:
actions.append(action_to_str(action))
else:
actions.append(action)
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'buckets': buckets}
if to_user:
d['group_id'] = UTIL.ofp_group_to_user(stats.group_id)
d['type'] = type_convert.get(stats.type)
else:
d['group_id'] = stats.group_id
d['type'] = stats.type
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def get_port_desc(dp, waiters, to_user=True):
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = {'hw_addr': stat.hw_addr,
'name': stat.name.decode('utf-8', errors='replace'),
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
if to_user:
d['port_no'] = UTIL.ofp_port_to_user(stat.port_no)
else:
d['port_no'] = stat.port_no
descs.append(d)
return wrap_dpid_dict(dp, descs, to_user)
def get_role(dp, waiters, to_user=True):
return ofctl_utils.get_role(dp, waiters, to_user)
def mod_flow_entry(dp, flow, cmd):
cookie = str_to_int(flow.get('cookie', 0))
cookie_mask = str_to_int(flow.get('cookie_mask', 0))
table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0))
idle_timeout = str_to_int(flow.get('idle_timeout', 0))
hard_timeout = str_to_int(flow.get('hard_timeout', 0))
priority = str_to_int(flow.get('priority', 0))
buffer_id = UTIL.ofp_buffer_from_user(
flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = UTIL.ofp_port_from_user(
flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = UTIL.ofp_group_from_user(
flow.get('out_group', dp.ofproto.OFPG_ANY))
flags = str_to_int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_actions(dp, flow.get('actions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
ofctl_utils.send_msg(dp, flow_mod, LOG)
def mod_meter_entry(dp, meter, cmd):
flags_convert = {'KBPS': dp.ofproto.OFPMF_KBPS,
'PKTPS': dp.ofproto.OFPMF_PKTPS,
'BURST': dp.ofproto.OFPMF_BURST,
'STATS': dp.ofproto.OFPMF_STATS}
flags = 0
if 'flags' in meter:
meter_flags = meter['flags']
if not isinstance(meter_flags, list):
meter_flags = [meter_flags]
for flag in meter_flags:
if flag not in flags_convert:
LOG.error('Unknown meter flag: %s', flag)
continue
flags |= flags_convert.get(flag)
meter_id = UTIL.ofp_meter_from_user(meter.get('meter_id', 0))
bands = []
for band in meter.get('bands', []):
band_type = band.get('type')
rate = str_to_int(band.get('rate', 0))
burst_size = str_to_int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = str_to_int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = str_to_int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
ofctl_utils.send_msg(dp, meter_mod, LOG)
def mod_group_entry(dp, group, cmd):
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
type_ = type_convert.get(group.get('type', 'ALL'))
if type_ is None:
LOG.error('Unknown group type: %s', group.get('type'))
group_id = UTIL.ofp_group_from_user(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = str_to_int(bucket.get('weight', 0))
watch_port = str_to_int(
bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = str_to_int(
bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
ofctl_utils.send_msg(dp, group_mod, LOG)
def mod_port_behavior(dp, port_config):
port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0))
hw_addr = str(port_config.get('hw_addr'))
config = str_to_int(port_config.get('config', 0))
mask = str_to_int(port_config.get('mask', 0))
advertise = str_to_int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
ofctl_utils.send_msg(dp, port_mod, LOG)
def set_role(dp, role):
r = UTIL.ofp_role_from_user(role.get('role', dp.ofproto.OFPCR_ROLE_EQUAL))
role_request = dp.ofproto_parser.OFPRoleRequest(dp, r, 0)
ofctl_utils.send_msg(dp, role_request, LOG)
# NOTE(jkoelker) Alias common funcitons
send_experimenter = ofctl_utils.send_experimenter
| |
"""The tests for the Rfxtrx sensor platform."""
import unittest
from homeassistant.bootstrap import _setup_component
from homeassistant.components import rfxtrx as rfxtrx_core
from homeassistant.const import TEMP_CELSIUS
from tests.common import get_test_home_assistant
class TestSensorRfxtrx(unittest.TestCase):
"""Test the Rfxtrx sensor platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
self.hass.config.components = ['rfxtrx']
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
self.hass.stop()
def test_default_config(self):
"""Test with 0 sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{}}}))
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_old_config_sensor(self):
"""Test with 1 sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'sensor_0502': {
'name': 'Test',
'packetid': '0a52080705020095220269',
'data_type': 'Temperature'}}}}))
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(None, entity.state)
def test_one_sensor(self):
"""Test with 1 sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test',
'data_type': 'Temperature'}}}}))
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(None, entity.state)
def test_one_sensor_no_datatype(self):
"""Test with 1 sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test'}}}}))
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(None, entity.state)
entity_id = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']\
.entity_id
entity = self.hass.states.get(entity_id)
self.assertEqual('Test', entity.name)
self.assertEqual('unknown', entity.state)
def test_several_sensors(self):
"""Test with 3 sensors."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test',
'data_type': 'Temperature'},
'0a520802060100ff0e0269': {
'name': 'Bath',
'data_type': ['Temperature', 'Humidity']
}}}}))
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == 'sensor_0601':
device_num = device_num + 1
self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2)
_entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature']
_entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity']
self.assertEqual('%', _entity_hum.unit_of_measurement)
self.assertEqual('Bath', _entity_hum.__str__())
self.assertEqual(None, _entity_hum.state)
self.assertEqual(TEMP_CELSIUS,
_entity_temp.unit_of_measurement)
self.assertEqual('Bath', _entity_temp.__str__())
elif id == 'sensor_0502':
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]['Temperature']
self.assertEqual(None, entity.state)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual('Test', entity.__str__())
self.assertEqual(2, device_num)
def test_discover_sensor(self):
"""Test with discovery of sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['sensor_0701']['Temperature']
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 18.4,
'Rssi numeric': 7, 'Humidity': 27,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('0a520801070100b81b0279',
entity.__str__())
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a52080405020095240279')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 14.9,
'Rssi numeric': 7, 'Humidity': 36,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('0a52080405020095240279',
entity.__str__())
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['sensor_0701']['Temperature']
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 17.9,
'Rssi numeric': 7, 'Humidity': 27,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('0a520801070100b81b0279',
entity.__str__())
# trying to add a switch
event = rfxtrx_core.get_rfx_object('0b1100cd0213c7f210010f70')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
def test_discover_sensor_noautoadd(self):
"""Test with discover of sensor when auto add is False."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': False,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a52080405020095240279')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_update_of_sensors(self):
"""Test with 3 sensors."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test',
'data_type': 'Temperature'},
'0a520802060100ff0e0269': {
'name': 'Bath',
'data_type': ['Temperature', 'Humidity']
}}}}))
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == 'sensor_0601':
device_num = device_num + 1
self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2)
_entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature']
_entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity']
self.assertEqual('%', _entity_hum.unit_of_measurement)
self.assertEqual('Bath', _entity_hum.__str__())
self.assertEqual(None, _entity_temp.state)
self.assertEqual(TEMP_CELSIUS,
_entity_temp.unit_of_measurement)
self.assertEqual('Bath', _entity_temp.__str__())
elif id == 'sensor_0502':
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]['Temperature']
self.assertEqual(None, entity.state)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual('Test', entity.__str__())
self.assertEqual(2, device_num)
event = rfxtrx_core.get_rfx_object('0a520802060101ff0f0269')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
event = rfxtrx_core.get_rfx_object('0a52080705020085220269')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == 'sensor_0601':
device_num = device_num + 1
self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2)
_entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature']
_entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity']
self.assertEqual('%', _entity_hum.unit_of_measurement)
self.assertEqual(15, _entity_hum.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1,
'Humidity': 15, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_hum.device_state_attributes)
self.assertEqual('Bath', _entity_hum.__str__())
self.assertEqual(TEMP_CELSIUS,
_entity_temp.unit_of_measurement)
self.assertEqual(51.1, _entity_temp.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1,
'Humidity': 15, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_temp.device_state_attributes)
self.assertEqual('Bath', _entity_temp.__str__())
elif id == 'sensor_0502':
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]['Temperature']
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(13.3, entity.state)
self.assertEqual({'Humidity status': 'normal',
'Temperature': 13.3,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('Test', entity.__str__())
self.assertEqual(2, device_num)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
| |
import sys
if sys.version_info < (3,):
range = xrange
import numpy as np
import pandas as pd
import scipy.stats as ss
from .. import families as fam
from .. import output as op
from .. import tests as tst
from .. import tsm as tsm
from .. import data_check as dc
from .garch_recursions import garch_recursion
class GARCH(tsm.TSM):
""" Inherits time series methods from TSM class.
**** GENERALIZED AUTOREGRESSIVE CONDITIONAL HETEROSCEDASTICITY (GARCH) MODELS ****
Parameters
----------
data : pd.DataFrame or np.array
Field to specify the time series data that will be used.
p : int
Field to specify how many GARCH terms the model will have.
q : int
Field to specify how many ARCH terms the model will have.
target : str (pd.DataFrame) or int (np.array)
Specifies which column name or array index to use. By default, first
column/array will be selected as the dependent variable.
"""
def __init__(self, data, p, q, target=None):
# Initialize TSM object
super(GARCH,self).__init__('GARCH')
# Latent Variables
self.p = p
self.q = q
self.z_no = self.p + self.q + 2
self.max_lag = max(self.p,self.q)
self.model_name = "GARCH(" + str(self.p) + "," + str(self.q) + ")"
self._z_hide = 0 # Whether to cutoff variance latent variables from results
self.supported_methods = ["MLE","PML","Laplace","M-H","BBVI"]
self.default_method = "MLE"
self.multivariate_model = False
# Format the data
self.data, self.data_name, self.is_pandas, self.index = dc.data_check(data,target)
self.data_length = self.data.shape[0]
self._create_latent_variables()
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Vol Constant', fam.Normal(0,3,transform='exp'), fam.Normal(0,3))
self.latent_variables.z_list[0].start = -7.00
for q_term in range(self.q):
self.latent_variables.add_z('q(' + str(q_term+1) + ')', fam.Normal(0,0.5,transform='logit'), fam.Normal(0,3))
if q_term == 0:
self.latent_variables.z_list[-1].start = -1.50
else:
self.latent_variables.z_list[-1].start = -4.00
for p_term in range(self.p):
self.latent_variables.add_z('p(' + str(p_term+1) + ')', fam.Normal(0,0.5,transform='logit'), fam.Normal(0,3))
if p_term == 0:
self.latent_variables.z_list[-1].start = 3.00
else:
self.latent_variables.z_list[-1].start = -4.00
self.latent_variables.add_z('Returns Constant', fam.Normal(0,3,transform=None), fam.Normal(0,3))
def _model(self, beta):
""" Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
sigma2 : np.array
Contains the values for the conditional volatility series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
eps : np.array
Contains the squared residuals (ARCH terms) for the time series
"""
# Transform latent variables
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
xeps = np.power(self.data-parm[-1],2)
Y = np.array(self.data[self.max_lag:])
eps = np.power(Y-parm[-1],2)
X = np.ones(Y.shape[0])
# ARCH terms
if self.q != 0:
for i in range(0,self.q):
X = np.vstack((X,xeps[(self.max_lag-i-1):-i-1]))
sigma2 = np.matmul(np.transpose(X),parm[0:-self.p-1])
else:
sigma2 = np.transpose(X*parm[0])
sigma2 = garch_recursion(parm, sigma2, self.q, self.p, Y.shape[0], self.max_lag)
return np.array(sigma2), Y, eps
def _mb_model(self, beta, mini_batch):
""" Creates the structure of the model (model matrices etc) for mini batch model.
Here the structure is the same as for _normal_model() but we are going to
sample a random choice of data points (of length mini_batch).
Parameters
----------
beta : np.ndarray
Contains untransformed starting values for the latent variables
mini_batch : int
Mini batch size for the data sampling
Returns
----------
mu : np.ndarray
Contains the predicted values (location) for the time series
Y : np.ndarray
Contains the length-adjusted time series (accounting for lags)
"""
# Transform latent variables
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
rand_int = np.random.randint(low=0, high=self.data_length-mini_batch+1)
sample = np.arange(start=rand_int, stop=rand_int+mini_batch)
sampled_data = self.data[sample]
xeps = np.power(sampled_data-parm[-1],2)
Y = np.array(sampled_data[self.max_lag:])
eps = np.power(Y-parm[-1],2)
X = np.ones(Y.shape[0])
# ARCH terms
if self.q != 0:
for i in range(0,self.q):
X = np.vstack((X,xeps[(self.max_lag-i-1):-i-1]))
sigma2 = np.matmul(np.transpose(X),parm[0:-self.p-1])
else:
sigma2 = np.transpose(X*parm[0])
sigma2 = garch_recursion(parm, sigma2, self.q, self.p, Y.shape[0], self.max_lag)
return np.array(sigma2), Y, eps
def _mean_prediction(self, sigma2, Y, scores, h, t_params):
""" Creates a h-step ahead mean prediction
Parameters
----------
sigma2 : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions
"""
# Create arrays to iteratre over
sigma2_exp = sigma2.copy()
scores_exp = scores.copy()
# Loop over h time periods
for t in range(0,h):
new_value = t_params[0]
# ARCH
if self.q != 0:
for j in range(1,self.q+1):
new_value += t_params[j]*scores_exp[-j]
# GARCH
if self.p != 0:
for k in range(1,self.p+1):
new_value += t_params[k+self.q]*sigma2_exp[-k]
sigma2_exp = np.append(sigma2_exp,[new_value]) # For indexing consistency
scores_exp = np.append(scores_exp,[0]) # expectation of score is zero
return sigma2_exp
def _sim_prediction(self, sigma2, Y, scores, h, t_params, simulations):
""" Simulates a h-step ahead mean prediction
Parameters
----------
sigma2 : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
"""
sim_vector = np.zeros([simulations,h])
for n in range(0,simulations):
# Create arrays to iteratre over
sigma2_exp = sigma2.copy()
scores_exp = scores.copy()
# Loop over h time periods
for t in range(0,h):
new_value = t_params[0]
if self.q != 0:
for j in range(1,self.q+1):
new_value += t_params[j]*scores_exp[-j]
if self.p != 0:
for k in range(1,self.p+1):
new_value += t_params[k+self.q]*sigma2_exp[-k]
sigma2_exp = np.append(sigma2_exp,[new_value]) # For indexing consistency
scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero
sim_vector[n] = sigma2_exp[-h:]
return np.transpose(sim_vector)
def _sim_prediction_bayes(self, h, simulations):
""" Simulates a h-step ahead mean prediction
Parameters
----------
h : int
How many steps ahead for the prediction
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
"""
sim_vector = np.zeros([simulations,h])
for n in range(0,simulations):
t_z = self.draw_latent_variables(nsims=1).T[0]
sigma2, Y, scores = self._model(t_z)
t_z = np.array([self.latent_variables.z_list[k].prior.transform(t_z[k]) for k in range(t_z.shape[0])])
# Create arrays to iteratre over
sigma2_exp = sigma2.copy()
scores_exp = scores.copy()
# Loop over h time periods
for t in range(0,h):
new_value = t_z[0]
if self.q != 0:
for j in range(1,self.q+1):
new_value += t_z[j]*scores_exp[-j]
if self.p != 0:
for k in range(1,self.p+1):
new_value += t_z[k+self.q]*sigma2_exp[-k]
sigma2_exp = np.append(sigma2_exp,[new_value]) # For indexing consistency
scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero
sim_vector[n] = sigma2_exp[-h:]
return np.transpose(sim_vector)
def _sim_predicted_mean(self, sigma2, Y, scores, h, t_params, simulations):
""" Simulates a h-step ahead mean prediction (with randomly draw disturbances)
Parameters
----------
sigma2 : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
"""
sim_vector = np.zeros([simulations,h])
for n in range(0,simulations):
# Create arrays to iteratre over
sigma2_exp = sigma2.copy()
scores_exp = scores.copy()
# Loop over h time periods
for t in range(0,h):
new_value = t_params[0]
if self.q != 0:
for j in range(1, self.q+1):
new_value += t_params[j]*scores_exp[-j]
if self.p != 0:
for k in range(1, self.p+1):
new_value += t_params[k+self.q]*sigma2_exp[-k]
sigma2_exp = np.append(sigma2_exp,[new_value]) # For indexing consistency
scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero
sim_vector[n] = sigma2_exp[-h:]
return np.append(sigma2, np.array([np.mean(i) for i in np.transpose(sim_vector)]))
def _summarize_simulations(self, sigma2, sim_vector, date_index, h, past_values):
""" Summarizes a simulation vector and a mean vector of predictions
Parameters
----------
sigma2 : np.array
Past volatility values for the moedl
sim_vector : np.array
N simulation predictions for h-step ahead forecasts
date_index : pd.DateIndex or np.array
Dates for the simulations
h : int
How many steps ahead are forecast
past_values : int
How many past observations to include in the forecast plot
intervals : Boolean
Would you like to show prediction intervals for the forecast?
"""
mean_values = np.append(sigma2, np.array([np.mean(i) for i in sim_vector]))
error_bars = []
for pre in range(5,100,5):
error_bars.append(np.insert([np.percentile(i,pre) for i in sim_vector], 0, mean_values[-h-1]))
forecasted_values = np.insert([np.mean(i) for i in sim_vector], 0, mean_values[-h-1])
plot_values = mean_values[-h-past_values:]
plot_index = date_index[-h-past_values:]
return error_bars, forecasted_values, plot_values, plot_index
def neg_loglik(self, beta):
""" Creates the negative log-likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative logliklihood of the model
"""
sigma2, Y, __ = self._model(beta)
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
return -np.sum(ss.norm.logpdf(Y, loc=parm[-1]*np.ones(sigma2.shape[0]), scale=np.sqrt(sigma2)))
def mb_neg_loglik(self, beta, mini_batch):
""" Calculates the negative log-likelihood of the Normal model for a minibatch
Parameters
----------
beta : np.ndarray
Contains untransformed starting values for latent variables
mini_batch : int
Size of each mini batch of data
Returns
----------
The negative logliklihood of the model
"""
sigma2, Y, __ = self._mb_model(beta, mini_batch)
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
return -np.sum(ss.norm.logpdf(Y, loc=parm[-1]*np.ones(sigma2.shape[0]), scale=np.sqrt(sigma2)))
def plot_fit(self, **kwargs):
""" Plots the fit of the model
Returns
----------
None (plots data and the fit)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
plt.figure(figsize=figsize)
date_index = self.index[max(self.p, self.q):]
t_params = self.transform_z()
sigma2, Y, ___ = self._model(self.latent_variables.get_z_values())
plt.plot(date_index, np.abs(Y-t_params[-1]), label=self.data_name + ' Absolute Demeaned Values')
plt.plot(date_index, np.power(sigma2,0.5), label='GARCH(' + str(self.p) + ',' + str(self.q) + ') Conditional Volatility',c='black')
plt.title(self.data_name + " Volatility Plot")
plt.legend(loc=2)
plt.show()
def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
# Retrieve data, dates and (transformed) latent variables
sigma2, Y, scores = self._model(self.latent_variables.get_z_values())
date_index = self.shift_dates(h)
if self.latent_variables.estimation_method in ['M-H']:
sim_vector = self._sim_prediction_bayes(h, 15000)
error_bars = []
for pre in range(5,100,5):
error_bars.append(np.insert([np.percentile(i,pre) for i in sim_vector], 0, sigma2[-1]))
forecasted_values = np.insert([np.mean(i) for i in sim_vector], 0, sigma2[-1])
plot_values = np.append(sigma2[-1-past_values:-2], forecasted_values)
plot_index = date_index[-h-past_values:]
else:
t_z = self.transform_z()
sim_values = self._sim_prediction(sigma2, Y, scores, h, t_z, 15000)
error_bars, forecasted_values, plot_values, plot_index = self._summarize_simulations(sigma2, sim_values, date_index, h, past_values)
plt.figure(figsize=figsize)
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
for count, pre in enumerate(error_bars):
plt.fill_between(date_index[-h-1:], error_bars[count], error_bars[-count-1], alpha=alpha[count])
plt.plot(plot_index, plot_values)
plt.title("Forecast for " + self.data_name + " Conditional Volatility")
plt.xlabel("Time")
plt.ylabel(self.data_name + " Conditional Volatility")
plt.show()
def predict_is(self, h=5, fit_once=True, fit_method='MLE', intervals=False, **kwargs):
""" Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
fit_method : string
Which method to fit the model with
intervals: boolean
Whether to return prediction intervals
Returns
----------
- pd.DataFrame with predicted values
"""
predictions = []
for t in range(0,h):
x = GARCH(p=self.p, q=self.q, data=self.data[0:-h+t])
if fit_once is False:
x.fit(method=fit_method, printer=False)
if t == 0:
if fit_once is True:
x.fit(method=fit_method, printer=False)
saved_lvs = x.latent_variables
predictions = x.predict(1, intervals=intervals)
else:
if fit_once is True:
x.latent_variables = saved_lvs
predictions = pd.concat([predictions,x.predict(1, intervals=intervals)])
if intervals is True:
predictions.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"}, inplace=True)
else:
predictions.rename(columns={0:self.data_name}, inplace=True)
predictions.index = self.index[-h:]
return predictions
def plot_predict_is(self, h=5, fit_once=True, fit_method='MLE', **kwargs):
""" Plots forecasts with the estimated model against data
(Simulated prediction with data)
Parameters
----------
h : int (default : 5)
How many steps to forecast
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
fit_method : string
Which method to fit the model with
Returns
----------
- Plot of the forecast against data
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
date_index = self.index[-h:]
predictions = self.predict_is(h, fit_method=fit_method, fit_once=fit_once)
data = self.data[-h:]
t_params = self.transform_z()
plt.plot(date_index, np.abs(data-t_params[-1]), label='Data')
plt.plot(date_index, np.power(predictions,0.5), label='Predictions', c='black')
plt.title(self.data_name)
plt.legend(loc=2)
plt.show()
def predict(self, h=5, intervals=False):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
intervals : boolean (default: False)
Whether to return prediction intervals
Returns
----------
- pd.DataFrame with predicted values
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
# Retrieve data, dates and (transformed) latent variables
sigma2, Y, scores = self._model(self.latent_variables.get_z_values())
date_index = self.shift_dates(h)
if self.latent_variables.estimation_method in ['M-H']:
sim_vector = self._sim_prediction_bayes(h, 15000)
error_bars = []
for pre in range(5,100,5):
error_bars.append(np.insert([np.percentile(i,pre) for i in sim_vector], 0, sigma2[-1]))
forecasted_values = np.array([np.mean(i) for i in sim_vector])
prediction_01 = np.array([np.percentile(i, 1) for i in sim_vector])
prediction_05 = np.array([np.percentile(i, 5) for i in sim_vector])
prediction_95 = np.array([np.percentile(i, 95) for i in sim_vector])
prediction_99 = np.array([np.percentile(i, 99) for i in sim_vector])
else:
t_z = self.transform_z()
if intervals is True:
sim_values = self._sim_prediction(sigma2, Y, scores, h, t_z, 15000)
else:
sim_values = self._sim_prediction(sigma2, Y, scores, h, t_z, 2)
mean_values = self._sim_predicted_mean(sigma2, Y, scores, h, t_z, 15000)
forecasted_values = mean_values[-h:]
if intervals is False:
result = pd.DataFrame(forecasted_values)
result.rename(columns={0:self.data_name}, inplace=True)
else:
if self.latent_variables.estimation_method not in ['M-H']:
sim_values = self._sim_prediction(sigma2, Y, scores, h, t_z, 15000)
prediction_01 = np.array([np.percentile(i, 1) for i in sim_values])
prediction_05 = np.array([np.percentile(i, 5) for i in sim_values])
prediction_95 = np.array([np.percentile(i, 95) for i in sim_values])
prediction_99 = np.array([np.percentile(i, 99) for i in sim_values])
result = pd.DataFrame([forecasted_values, prediction_01, prediction_05,
prediction_95, prediction_99]).T
result.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"},
inplace=True)
result.index = date_index[-h:]
return result
def sample(self, nsims=1000):
""" Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
lv_draws = self.draw_latent_variables(nsims=nsims)
sigmas = [self._model(lv_draws[:,i])[0] for i in range(nsims)]
data_draws = np.array([ss.norm.rvs(loc=self.latent_variables.z_list[-1].prior.transform(lv_draws[-1,i]),
scale=np.sqrt(sigmas[i])) for i in range(nsims)])
return data_draws
def plot_sample(self, nsims=10, plot_data=True, **kwargs):
"""
Plots draws from the posterior predictive density against the data
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
plot_data boolean
Whether to plot the data or not
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
import matplotlib.pyplot as plt
import seaborn as sns
alpha = 1.0
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
date_index = self.index[max(self.p,self.q):]
sigma2, Y, ___ = self._model(self.latent_variables.get_z_values())
draws = self.sample(nsims).T
plt.plot(date_index, draws, label='Posterior Draws', alpha=1.0)
if plot_data is True:
plt.plot(date_index, Y, label='Data', c='black', alpha=0.5, linestyle='', marker='s')
plt.title(self.data_name)
plt.show()
def ppc(self, nsims=1000, T=np.mean):
""" Computes posterior predictive p-value
Parameters
----------
nsims : int (default : 1000)
How many draws for the PPC
T : function
A discrepancy measure - e.g. np.mean, np.std, np.max
Returns
----------
- float (posterior predictive p-value)
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
lv_draws = self.draw_latent_variables(nsims=nsims)
sigmas = [self._model(lv_draws[:,i])[0] for i in range(nsims)]
data_draws = np.array([ss.norm.rvs(loc=self.latent_variables.z_list[-1].prior.transform(lv_draws[-1,i]),
scale=np.sqrt(sigmas[i])) for i in range(nsims)])
T_sims = T(self.sample(nsims=nsims), axis=1)
T_actual = T(self.data)
return len(T_sims[T_sims>T_actual])/nsims
def plot_ppc(self, nsims=1000, T=np.mean, **kwargs):
""" Plots histogram of the discrepancy from draws of the posterior
Parameters
----------
nsims : int (default : 1000)
How many draws for the PPC
T : function
A discrepancy measure - e.g. np.mean, np.std, np.max
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
lv_draws = self.draw_latent_variables(nsims=nsims)
sigmas = [self._model(lv_draws[:,i])[0] for i in range(nsims)]
data_draws = np.array([ss.norm.rvs(loc=self.latent_variables.z_list[-1].prior.transform(lv_draws[-1,i]),
scale=np.sqrt(sigmas[i])) for i in range(nsims)])
T_sim = T(self.sample(nsims=nsims), axis=1)
T_actual = T(self.data)
if T == np.mean:
description = " of the mean"
elif T == np.max:
description = " of the maximum"
elif T == np.min:
description = " of the minimum"
elif T == np.median:
description = " of the median"
else:
description = ""
plt.figure(figsize=figsize)
ax = plt.subplot()
ax.axvline(T_actual)
sns.distplot(T_sim, kde=False, ax=ax)
ax.set(title='Posterior predictive' + description, xlabel='T(x)', ylabel='Frequency');
plt.show()
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Model using memory component.
The model embeds images using a standard CNN architecture.
These embeddings are used as keys to the memory component,
which returns nearest neighbors.
"""
import tensorflow as tf
import memory
FLAGS = tf.flags.FLAGS
class BasicClassifier(object):
def __init__(self, output_dim):
self.output_dim = output_dim
def core_builder(self, memory_val, x, y):
del x, y
y_pred = memory_val
loss = 0.0
return loss, y_pred
class LeNet(object):
"""Standard CNN architecture."""
def __init__(self, image_size, num_channels, hidden_dim):
self.image_size = image_size
self.num_channels = num_channels
self.hidden_dim = hidden_dim
self.matrix_init = tf.truncated_normal_initializer(stddev=0.1)
self.vector_init = tf.constant_initializer(0.0)
def core_builder(self, x):
"""Embeds x using standard CNN architecture.
Args:
x: Batch of images as a 2-d Tensor [batch_size, -1].
Returns:
A 2-d Tensor [batch_size, hidden_dim] of embedded images.
"""
ch1 = 32 * 2 # number of channels in 1st layer
ch2 = 64 * 2 # number of channels in 2nd layer
conv1_weights = tf.get_variable('conv1_w',
[3, 3, self.num_channels, ch1],
initializer=self.matrix_init)
conv1_biases = tf.get_variable('conv1_b', [ch1],
initializer=self.vector_init)
conv1a_weights = tf.get_variable('conv1a_w',
[3, 3, ch1, ch1],
initializer=self.matrix_init)
conv1a_biases = tf.get_variable('conv1a_b', [ch1],
initializer=self.vector_init)
conv2_weights = tf.get_variable('conv2_w', [3, 3, ch1, ch2],
initializer=self.matrix_init)
conv2_biases = tf.get_variable('conv2_b', [ch2],
initializer=self.vector_init)
conv2a_weights = tf.get_variable('conv2a_w', [3, 3, ch2, ch2],
initializer=self.matrix_init)
conv2a_biases = tf.get_variable('conv2a_b', [ch2],
initializer=self.vector_init)
# fully connected
fc1_weights = tf.get_variable(
'fc1_w', [self.image_size // 4 * self.image_size // 4 * ch2,
self.hidden_dim], initializer=self.matrix_init)
fc1_biases = tf.get_variable('fc1_b', [self.hidden_dim],
initializer=self.vector_init)
# define model
x = tf.reshape(x,
[-1, self.image_size, self.image_size, self.num_channels])
batch_size = tf.shape(x)[0]
conv1 = tf.nn.conv2d(x, conv1_weights,
strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
conv1 = tf.nn.conv2d(relu1, conv1a_weights,
strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1a_biases))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.conv2d(pool1, conv2_weights,
strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
conv2 = tf.nn.conv2d(relu2, conv2a_weights,
strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2a_biases))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
reshape = tf.reshape(pool2, [batch_size, -1])
hidden = tf.matmul(reshape, fc1_weights) + fc1_biases
return hidden
class Model(object):
"""Model for coordinating between CNN embedder and Memory module."""
def __init__(self, input_dim, output_dim, rep_dim, memory_size, vocab_size,
learning_rate=0.0001, use_lsh=False):
self.input_dim = input_dim
self.output_dim = output_dim
self.rep_dim = rep_dim
self.memory_size = memory_size
self.vocab_size = vocab_size
self.learning_rate = learning_rate
self.use_lsh = use_lsh
self.embedder = self.get_embedder()
self.memory = self.get_memory()
self.classifier = self.get_classifier()
self.global_step = tf.train.get_or_create_global_step()
def get_embedder(self):
return LeNet(int(self.input_dim ** 0.5), 1, self.rep_dim)
def get_memory(self):
cls = memory.LSHMemory if self.use_lsh else memory.Memory
return cls(self.rep_dim, self.memory_size, self.vocab_size)
def get_classifier(self):
return BasicClassifier(self.output_dim)
def core_builder(self, x, y, keep_prob, use_recent_idx=True):
embeddings = self.embedder.core_builder(x)
if keep_prob < 1.0:
embeddings = tf.nn.dropout(embeddings, keep_prob)
memory_val, _, teacher_loss = self.memory.query(
embeddings, y, use_recent_idx=use_recent_idx)
loss, y_pred = self.classifier.core_builder(memory_val, x, y)
return loss + teacher_loss, y_pred
def train(self, x, y):
loss, _ = self.core_builder(x, y, keep_prob=0.3)
gradient_ops = self.training_ops(loss)
return loss, gradient_ops
def eval(self, x, y):
_, y_preds = self.core_builder(x, y, keep_prob=1.0,
use_recent_idx=False)
return y_preds
def get_xy_placeholders(self):
return (tf.placeholder(tf.float32, [None, self.input_dim]),
tf.placeholder(tf.int32, [None]))
def setup(self):
"""Sets up all components of the computation graph."""
self.x, self.y = self.get_xy_placeholders()
# This context creates variables
with tf.variable_scope('core', reuse=None):
self.loss, self.gradient_ops = self.train(self.x, self.y)
# And this one re-uses them (thus the `reuse=True`)
with tf.variable_scope('core', reuse=True):
self.y_preds = self.eval(self.x, self.y)
def training_ops(self, loss):
opt = self.get_optimizer()
params = tf.trainable_variables()
gradients = tf.gradients(loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
return opt.apply_gradients(zip(clipped_gradients, params),
global_step=self.global_step)
def get_optimizer(self):
return tf.train.AdamOptimizer(learning_rate=self.learning_rate,
epsilon=1e-4)
def one_step(self, sess, x, y):
outputs = [self.loss, self.gradient_ops]
return sess.run(outputs, feed_dict={self.x: x, self.y: y})
def episode_step(self, sess, x, y, clear_memory=False):
"""Performs training steps on episodic input.
Args:
sess: A Tensorflow Session.
x: A list of batches of images defining the episode.
y: A list of batches of labels corresponding to x.
clear_memory: Whether to clear the memory before the episode.
Returns:
List of losses the same length as the episode.
"""
outputs = [self.loss, self.gradient_ops]
if clear_memory:
self.clear_memory(sess)
losses = []
for xx, yy in zip(x, y):
out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy})
loss = out[0]
losses.append(loss)
return losses
def predict(self, sess, x, y=None):
"""Predict the labels on a single batch of examples.
Args:
sess: A Tensorflow Session.
x: A batch of images.
y: The labels for the images in x.
This allows for updating the memory.
Returns:
Predicted y.
"""
# Storing current memory state to restore it after prediction
mem_keys, mem_vals, mem_age, _ = self.memory.get()
cur_memory = (
tf.identity(mem_keys),
tf.identity(mem_vals),
tf.identity(mem_age),
None,
)
outputs = [self.y_preds]
if y is None:
ret = sess.run(outputs, feed_dict={self.x: x})
else:
ret = sess.run(outputs, feed_dict={self.x: x, self.y: y})
# Restoring memory state
self.memory.set(*cur_memory)
return ret
def episode_predict(self, sess, x, y, clear_memory=False):
"""Predict the labels on an episode of examples.
Args:
sess: A Tensorflow Session.
x: A list of batches of images.
y: A list of labels for the images in x.
This allows for updating the memory.
clear_memory: Whether to clear the memory before the episode.
Returns:
List of predicted y.
"""
# Storing current memory state to restore it after prediction
mem_keys, mem_vals, mem_age, _ = self.memory.get()
cur_memory = (
tf.identity(mem_keys),
tf.identity(mem_vals),
tf.identity(mem_age),
None,
)
if clear_memory:
self.clear_memory(sess)
outputs = [self.y_preds]
y_preds = []
for xx, yy in zip(x, y):
out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy})
y_pred = out[0]
y_preds.append(y_pred)
# Restoring memory state
self.memory.set(*cur_memory)
return y_preds
def clear_memory(self, sess):
sess.run([self.memory.clear()])
| |
import os
import psycopg2
import pickle
from bs4 import BeautifulSoup
conn = psycopg2.connect(dbname='nyc')
cur = conn.cursor()
jobData = {}
def testing():
global tables
enl = pickle.load(open('/home/cory/cityscan/nyc/db_import_scripts/enlargement_data.pickle', 'rb'))
bis = BeautifulSoup(open(enl[0][0]+'/jobs/'+enl[0][1]).read())
center = bis.find('center')
tables = center.find_all('table')
def preprocess(table):
tds = table.find_all('td')
for i in tds:
for x in [c for c in i.children]:
try:
if x.attrs['src'].lower() == './images/clear.gif':
tds.remove(i)
except:
pass
return tds
def blank2null(data):
# Just remove items instead?
for k in data.iterkeys():
if data[k] == '':
data[k] = None
return data
def delblank(data):
trimmed = {}
for i in data.iteritems():
if i[1] != '':
trimmed[i[0]] = i[1]
return trimmed
def samerow(tds):
for i in tds:
if i.text.find(':') > -1 and " ".join(i.text.split(':')[1].split()):
jobData[" ".join(i.text.split(':')[0].split())] = " ".join(i.text.split(':')[1].split())
def seprows(tds):
for i, data in enumerate(tds):
if data.text.find(':') > -1:
if tds[i+1].text.find(':') == -1:
jobData[" ".join(data.text.replace(':', '').split())] = " ".join(tds[i+1].text.split())
else:
jobData[" ".join(data.text.replace(':', '').split())] = None
def getimgtds(tds):
imgtds = {}
for i, data in enumerate(tds):
for c in data.children:
try:
imgtds[i] = c.attrs['src']
except KeyError:
pass
except AttributeError:
pass
return imgtds
def getimgrows(tds):
imgtds = getimgtds(tds)
imgrows = []
for gif in imgtds.iteritems():
if 'box' in gif[1]:
imgrows.append(gif[0])
return imgrows
def stripempty(tds, positions=False):
separators = []
for d, i in enumerate(tds):
try:
if 'content' in i.attrs['class'] and i.text.strip() == '':
if positions:
separators.append(d)
else:
tds.remove(i)
except:
pass
return tds
def onlychildren(tds):
for i in tds:
if not list(i.children):
tds.remove(i)
return tds
def imgchildren(tds):
chdn = list(tds.children)
for c, d in enumerate(chdn):
try:
if d.attrs['src'] in ['images/box_check.gif', 'images/yes_box.gif']:
return " ".join(chdn[c+1].split())
else:
return None
except:
pass
def overview(table):
# if 'Premises' in ...
tds = preprocess(table)
for i in tds:
if i.text.split(':')[0].strip() != 'BIN':
try:
jobData[i.text.split(':')[0].strip()] = i.text.split(':')[1].strip()
except IndexError:
continue
def devchallenge(table):
# if 'Development Challenge' in...?
# need to find affirmative example
# Is this time sensitive? Need to update after 45 days?
# NEEDS FIXIN
if 'This job is not' in [i.text.strip() for i in table.find_all('td')]:
jobData['devChallenge'] = False
else:
jobData['devChallenge'] = True
def dates(table):
# 'Last Action in...'
tds = preprocess(table)
for i in tds:
if i.text.find(':') > -1 and i.text.split(':')[1].strip() != '':
jobData[" ".join(i.text.split(':')[0].split())] = " ".join(i.text.split(':')[1].split())
datekeys = {}
for i, data in enumerate(tds):
if data.text.find(':') > -1 and " ".join(data.text.split(':')[0].split()) not in [k for k in jobData.iterkeys()]:
datekeys[" ".join(data.text.replace(':', '').split())] = i
for i, data in enumerate(tds):
if " ".join(data.text.replace(':', '').split()) in [k for k in datekeys.iterkeys()]:
jobData[" ".join(data.text.replace(':', '').split())] = " ".join(tds[datekeys[" ".join(data.text.replace(':', '').split())]+1].text.split())
def locinfo(table):
# to trust the td classes or not...
# can we assume no value will have a colon in it
# well, can we?
# ANSWER MEEEEEEEE
# 'Location Information in...'
tds = preprocess(table)
seprows(tds)
def applicant(table):
# applicant of record in .... .lower()
# will result in key of "Name" - may need to populate local dict
tds = preprocess(table)
imgtds = getimgtds(tds)
imgrows = getimgrows(tds)
# Will grab label for imgrow as last item
textrows = []
for i, data in enumerate(tds):
if i not in [k for k in imgtds.iterkeys()]:
textrows.append(data)
imglabels = []
try:
seprows(textrows)
# DOES THIS ACTUALLY WORK???
except IndexError:
imglabels.append((i, data))
checks = []
# have to re-clear this every time
# wrap in function, duh
for i in imgrows:
checks.append([t for t in tds[i].children])
for c in checks:
for d, data in enumerate(c):
try:
if data.attrs['src'] == 'images/box_check.gif':
jobData[" ".join(tds[i-1].text.replace(':', '').split())] = " ".join(c[d+1].split())
except:
pass
def prevapplicant(table):
tds = preprocess(table)
for i, data in enumerate(tds):
try:
if 'colhdg' in data.attrs['class']:
jobData[" ".join(data.text.split())] = " ".join(tds[i+1].text.split())
except KeyError: pass
def filingrep(table):
# Key collision with applicant
tds = preprocess(table)
try:
seprows(tds)
except IndexError:
jobData[" ".join(data.text.replace(':', '').split())] = None
def jobtype(table):
# TODO: Needs fixin
# if boxes checked under Alteration Type 1
tds = preprocess(table)
# get the job type
for i, data in enumerate(tds):
for c in data.children:
try:
if c.attrs['src'] == 'images/box_check.gif':
jobData['jobTypes'] = " ".join(tds[i+1].text.split())
except:
pass
# D14 assistance
# probably a way to do this in the loop up there, but fuck it
# also probably pretty fragile, especially the getting the key part
for i, data in enumerate(tds):
chillins = [d for d in data.children]
if len(chillins) > 0:
for counter, tag in enumerate(chillins):
try:
if tag.attrs['src'] == 'images/box_check.gif' and data.text.split('\n')[0].strip() != '':
jobData[data.text.split('\n')[0].strip()] = " ".join(chillins[counter+1].split())
except KeyError:
pass
except AttributeError:
pass
def worktype(table):
tds = preprocess(table)
imgrows = getimgrows(tds)
worktypes = []
for i in imgrows:
for c in tds[i].children:
try:
if c.attrs['src'] == 'images/box_check.gif':
worktypes.append(" ".join(tds[i+1].text.split()))
except AttributeError:
pass
jobData['workTypes'] = worktypes
def submitted(table):
tds = preprocess(table)
for i in tds:
if i.text.find(':') > -1:
if " ".join(i.text.split(':')[1].split()) != 'Not Provided':
jobData[" ".join(i.text.split(':')[0].split())] = " ".join(i.text.split(':')[1].split())
def enlargement(table):
tds = preprocess(table)
# get construction floor area
# NOTE: if enlargement is Y then there will be an additional
# enlargement sqft field
#
# turn sqft label into integer?
# DEAL WITH IT...
# ...in your application code
# Alternatively, it seems that numbers are bracketed nicely between nbsps
# still have to deal with commas, though
for i in tds:
if i.text.find(':') > -1:
jobData[" ".join(i.text.split(':')[0].split())] = " ".join(i.text.split(':')[1].split())
tds = stripempty(tds)
# Get enlargement yes/no and type(s)
# If type is singular, will return string, else list of both
# So, only if an enlargment is proposed will the jobData dict have
# an enlargement key and the type(s) as values
# NOTE: Is this good or bad data model?
# Enlargement should be nested dict,
# TODO: Don't have enlargement categories
# scattered throughout
# the less top-level keys, the better
enltypes = []
for i, data in enumerate(tds):
if data.text.lower().strip() == 'enlargement proposed?':
chdn = [c for c in tds[i+1].children]
for co, da in enumerate(chdn):
try:
if da.attrs['src'] == 'images/box_check.gif' and chdn[co+1].lower().strip() == 'yes':
hv = [h for h in tds[i+2].children]
for oc, ad in enumerate(hv):
try:
if ad.attrs['src'] == 'images/box_check.gif':
enltypes.append(hv[oc+1].lower().strip())
except:
pass
except:
pass
if len(enltypes) > 1:
jobData['enlargement'] = enltypes
elif len(enltypes) == 1:
jobData['enlargement'] = enltypes[0]
def additional(table):
tds = preprocess(table)
# apparently have to do more than once
tds = onlychildren(tds)
tds = onlychildren(tds)
tds = onlychildren(tds)
imgrows = getimgrows(tds)
for i in imgrows:
chdn = list(tds[i].children)
for d in chdn:
try:
if d.attrs['src'] in ['images/box_check.gif', 'images/yes_box.gif']:
jobData[tds[i-1].text.strip()] = True
except:
pass
def calendar(table):
tds = preprocess(table)
seprows(tds)
def energycon(table):
# if we can assume relevant fields are always last two items
tds = preprocess(table)
jobData['nyceccCompliance'] = [i.text for i in tds][-1].strip()
def jobdesc(table):
tds = preprocess(table)
tds = stripempty(tds)
for i, data in enumerate(tds):
if data.text.find('Job Description') > -1:
jobData['jobDesc'] = " ".join(tds[i+1].text.split())
samerow(tds)
def getkeys(tds, rownum=True):
""" Rownum has to be False in most cases it seems - children under td will have same rownum"""
keys = {}
for i, d in enumerate(tds):
try:
if d.name == 'b' or 'label' in d.attrs['class']:
if rownum:
keys[i] = " ".join(d.text.split())
else:
keys[" ".join(d.text.split())] = i
except:
pass
if list(d.children):
for b in list(d.children):
try:
if b.name == 'b' or 'label' in d.attrs['class']:
if rownum:
keys[i] = " ".join(b.text.split())
else:
keys[" ".join(b.text.split())] = i
except:
pass
return keys
def zoning(table):
tds = preprocess(table)
tds = stripempty(tds)
# tds[17] contains a subtable
# 4x3 matrix
# too many quirks to rely on samerow() and seprow() functions
# grab low-hanging fruit first
# Fields this will grab:
# Districts
# Map No.
# Street legal width
# Zoning lot includes the following tax lots
for i in tds:
if i.text.find(':') > -1 and " ".join(i.text.split(':')[1].split()) and " ".join(i.text.split()) not in ['Proposed: Use', 'Lot Type: Corner Interior Through', 'Street status']:
colons = 0
for c in i.text:
if c == ':':
colons += 1
if colons < 2:
jobData[" ".join(i.text.split(':')[0].split())] = " ".join(i.text.split(':')[1].split())
keys = getkeys(tds, False)
for i, d in enumerate(tds):
try:
if " ".join(d.text.split()) == 'Street status:':
chdn = list(tds[i+1].children)
for c, b in enumerate(chdn):
try:
if b.attrs['src'] == 'images/box_check.gif':
jobData[u'streetStatus'] = " ".join(chdn[c+1].split())
except:
pass
elif len(d.find_all('td')):
matrix = d
rows = matrix.find_all('tr')
proposed = rows[2].find_all('td')[-3:]
existing = rows[3].find_all('td')[-3:]
jobData[u'zoningArea'] = {u'existing': " ".join(existing[0].text.split()), u'proposed': " ".join(proposed[0].text.split())}
jobData[u'district'] = {u'existing': " ".join(existing[1].text.split()), u'proposed': " ".join(proposed[1].text.split())}
jobData[u'FAR'] = {u'existing': " ".join(existing[2].text.split()), u'proposed': " ".join(proposed[2].text.split())}
elif " ".join(d.text.split()).find('Lot Type') > -1:
lotdeets = list(d.children)
lottypes = []
for a, b in enumerate(lotdeets):
try:
if b.attrs['src'] == 'images/box_check.gif':
lottypes.append(" ".join(lotdeets[a+1].split()))
except:
pass
jobData[u'lot'] = {u'type': lottypes, u'coverage': None, u'area': None, u'width': None}
elif " ".join(d.text.split()).find('Lot Coverage (%)') > -1:
if " ".join(d.text.split(':')[1].split()):
jobData[u'lot'][u'coverage'] = " ".join(d.text.split(':')[1].split())
elif d.text.find('Lot Area') > -1:
# assuming Lot Area and lot Width and their associated values are always together
jobData[u'lot'][u'area'] = d.text[d.text.find(':')+1:d.text.find('Lot Width')]
jobData[u'lot'][u'width'] = " ".join(d.text.split(':')[-1].split())
elif d.text.find('Proposed Yard Details') > -1:
jobData['yards'] = {'noYards': None, 'front': None, 'rear': None, 'rearEquivalent': None, 'side1': None, 'side2': None}
chdn = list(tds[i+1].children)
for c, d in enumerate(chdn):
try:
if d.attrs['src'] in ['images/box_check.gif', 'images/yes_box.gif']:
jobData['yard']['noYards'] = True
else:
jobData['yard']['noYards'] = False
except:
pass
elif d.text.find('Front Yard') > -1:
yards = d.text.split(':')
jobData['yard']['front'] = " ".join(yards[1][:yards[1].find('Rear Yard')].split())
jobData['yard']['rear'] = " ".join(yards[2][:yards[2].find('Rear Yard')].split())
jobData['yard']['rearEquivalent'] = " ".join(yards[-1].split())
elif d.text.find('Side Yard') > -1:
sideyards = d.text.split(':')
jobData['yard']['side1'] = " ".join(sideyards[1][:sideyards[1].find('Side Yard')].split())
jobData['yard']['side2'] = " ".join(sideyards[2][:sideyards[2].find('Side Yard')].split())
elif d.text.find('Perimeter Wall Height') > -1:
jobData['perimterWallHeight'] = " ".join(d.text.split(':')[1].split())
elif d.text.find('Enclosed Parking') > -1:
jobData['parking'] = {'enclosed':imgchildren(tds[i+1]), 'spaces': " ".join(list(d.children)[-1].split())}
except:
pass
def codedesignation(contents):
for a, b in enumerate(contents):
tds = stripempty(tds)
jobData['occupancy'] = { 'existing': {'classification': None, '2008code': None},
'proposed': {'classification': None, '2008code': None} }
jobData['construction'] = { 'existing': {'classification': None, '2008code': None},
'proposed': {'classification': None, '2008code': None} }
# NOTE: This would work, except the "Proposed:" key will overwrite existing
for i, d in enumerate(tds):
if d.text.find('Occupancy Classification') > -1:
jobData['occupancy']['existing'] = " ".join(tds[i+1].text.split())
jobData['occupancy']['proposed'] = " ".join(tds[i+4].text.split())
chdn = tds[i+2].contents
for a, b in enumerate(chdn):
try:
if b.attrs['src'] == 'images/box_check.gif':
label = chdn[a+1:]
for l in label:
try:
if l.name == 'b' and " ".join(l.text.split()).lower() == 'no':
jobData['occupancy']['existing']
except:
pass
except:
pass
def nextrow(children, datadict=jobData):
for i, data in enumerate(children):
try:
if data.attrs['src'] == 'images/box_check.gif':
if " ".join(chdn[i+1].lower().split()) == 'yes':
# pass td row in as argument, get T/F back?
return True
else:
return False
except:
pass
def colhdgs():
for i in tables:
for x in i.find_all('td'):
try:
if 'colhdg' in x.attrs['class']:
print x.text.strip()
except:
pass
def hitme():
for i in tables:
if 'Premises' in i.text:
overview(i)
elif 'Last Action' in i.text:
dates(i)
elif 'Location Information' in i.text:
locinfo(i)
elif 'Applicant of Record' in i.text:
applicant(i)
elif 'Directive 14' in i.text:
prevapplicant(i)
elif 'Filing Representative' in i.text:
filingrep(i)
elif 'Job Types' in i.text:
jobtypes(i)
elif 'Work Types' in i.text:
worktype(i)
elif 'Plans/Construction Documents Submitted' in i.text:
submitted(i)
elif 'Additional Information' in i.text:
enlargement(i)
elif 'Additional Considerations' in i.text:
additional(i)
elif 'BSA Calendar' in i.text:
calendar(i)
elif 'NYCECC Compliance' in i.text:
energycon(i)
elif 'Job Description' in i.text:
jobdesc(i)
elif 'Zoning Characteristics' in i.text:
zoning(i)
| |
with open("input.txt") as f:
data = f.read()
import string
def dir_to_pos(dir):
return {1: (-1, 0), 2: (1, 0), 3: (0, 1), 4: (0, -1)}[dir]
def pos_sum(x,y):
return x[0] + y[0], x[1] + y[1]
def solve(data):
area = {}
for y, line in enumerate(data.splitlines()):
for x, c in enumerate(line):
area[(y,x)] = c
max_y = max(y for (y,x) in area.keys())
max_x = max(x for (y,x) in area.keys())
portals = {}
for y in range(max_y+1):
for x in range(max_x+1):
if area[(y,x)] in string.ascii_uppercase:
if area.get((y+1,x), "?") in string.ascii_uppercase:
portal = area[(y,x)] + area[(y+1,x)]
if area.get((y+2,x), "?") == ".":
portals[(y+2,x)] = portal
else:
portals[(y-1,x)] = portal
elif area.get((y,x+1), "?") in string.ascii_uppercase:
portal = area[(y,x)] + area[(y,x+1)]
if area.get((y,x+2), "?") == ".":
portals[(y,x+2)] = portal
else:
portals[(y,x-1)] = portal
for coord, portal in portals.items():
if portal == "AA":
start = coord
elif portal == "ZZ":
end = coord
del portals[start]
del portals[end]
visited = {start: 0}
l = [start]
while l:
pos = l[0]
l = l[1:]
for new_pos in (pos_sum(pos, dir_to_pos(i)) for i in (1,2,3,4)):
if new_pos == end:
return visited[pos] + 1
elif new_pos in visited:
continue
elif area.get(new_pos, "") == ".":
l.append(new_pos)
visited[new_pos] = visited[pos] + 1
elif area.get(new_pos, "?") in string.ascii_uppercase and pos in portals:
portal = portals[pos]
pair_portal_pos = [pp for pp, p in portals.items() if p == portal and pp != pos][0]
l.append(pair_portal_pos)
visited[pair_portal_pos] = visited[pos] + 1
def solve2(data):
area = {}
for y, line in enumerate(data.splitlines()):
for x, c in enumerate(line):
area[(y,x)] = c
max_y = max(y for (y,x) in area.keys())
max_x = max(x for (y,x) in area.keys())
portals = {}
for y in range(max_y+1):
for x in range(max_x+1):
if area[(y,x)] in string.ascii_uppercase:
if area.get((y+1,x), "?") in string.ascii_uppercase:
portal = area[(y,x)] + area[(y+1,x)]
if area.get((y+2,x), "?") == ".":
portals[(y+2,x)] = portal
else:
portals[(y-1,x)] = portal
elif area.get((y,x+1), "?") in string.ascii_uppercase:
portal = area[(y,x)] + area[(y,x+1)]
if area.get((y,x+2), "?") == ".":
portals[(y,x+2)] = portal
else:
portals[(y,x-1)] = portal
for coord, portal in portals.items():
if portal == "AA":
start = coord
elif portal == "ZZ":
end = coord
# These are not actual portals!
del portals[start]
del portals[end]
visited = {(0,start): 0}
l = [(0,start)]
while l:
level, pos = state = l[0]
l = l[1:]
for new_pos in (pos_sum(pos, dir_to_pos(i)) for i in (1,2,3,4)):
if new_pos == end and level == 0:
return visited[(level,pos)] + 1
elif pos in portals and area.get(new_pos, "?") in string.ascii_uppercase:
if new_pos[0] < 5 or (max_y-new_pos[0]) < 5 or new_pos[1] < 5 or (max_x-new_pos[1]) < 5:
new_level = level-1
else:
new_level = level+1
if new_level < 0: # Dead path
continue
portal = portals[pos]
pair_portal_pos = [pp for pp, p in portals.items() if p == portal and pp != pos][0]
new_state = (new_level, pair_portal_pos)
if new_state in visited:
continue
l.append(new_state)
visited[new_state] = visited[state] + 1
elif area.get(new_pos, "") == ".":
new_state = (level, new_pos)
if new_state in visited:
continue
l.append(new_state)
visited[new_state] = visited[state] + 1
print(solve(""" A
A
#################.#############
#.#...#...................#.#.#
#.#.#.###.###.###.#########.#.#
#.#.#.......#...#.....#.#.#...#
#.#########.###.#####.#.#.###.#
#.............#.#.....#.......#
###.###########.###.#####.#.#.#
#.....# A C #.#.#.#
####### S P #####.#
#.#...# #......VT
#.#.#.# #.#####
#...#.# YN....#.#
#.###.# #####.#
DI....#.# #.....#
#####.# #.###.#
ZZ......# QG....#..AS
###.### #######
JO..#.#.# #.....#
#.#.#.# ###.#.#
#...#..DI BU....#..LF
#####.# #.#####
YN......# VT..#....QG
#.###.# #.###.#
#.#...# #.....#
###.### J L J #.#.###
#.....# O F P #.#...#
#.###.#####.#.#####.#####.###.#
#...#.#.#...#.....#.....#.#...#
#.#####.###.###.#.#.#########.#
#...#.#.....#...#.#.#.#.....#.#
#.###.#####.###.###.#.#.#######
#.#.........#...#.............#
#########.###.###.#############
B J C
U P P """))
print(solve(""" A
A
#######.#########
#######.........#
#######.#######.#
#######.#######.#
#######.#######.#
##### B ###.#
BC...## C ###.#
##.## ###.#
##...DE F ###.#
##### G ###.#
#########.#####.#
DE..#######...###.#
#.#########.###.#
FG..#########.....#
###########.#####
Z
Z """))
print(solve(data))
print(solve2(""" Z L X W C
Z P Q B K
###########.#.#.#.#######.###############
#...#.......#.#.......#.#.......#.#.#...#
###.#.#.#.#.#.#.#.###.#.#.#######.#.#.###
#.#...#.#.#...#.#.#...#...#...#.#.......#
#.###.#######.###.###.#.###.###.#.#######
#...#.......#.#...#...#.............#...#
#.#########.#######.#.#######.#######.###
#...#.# F R I Z #.#.#.#
#.###.# D E C H #.#.#.#
#.#...# #...#.#
#.###.# #.###.#
#.#....OA WB..#.#..ZH
#.###.# #.#.#.#
CJ......# #.....#
####### #######
#.#....CK #......IC
#.###.# #.###.#
#.....# #...#.#
###.### #.#.#.#
XF....#.# RF..#.#.#
#####.# #######
#......CJ NM..#...#
###.#.# #.###.#
RE....#.# #......RF
###.### X X L #.#.#.#
#.....# F Q P #.#.#.#
###.###########.###.#######.#########.###
#.....#...#.....#.......#...#.....#.#...#
#####.#.###.#######.#######.###.###.#.#.#
#.......#.......#.#.#.#.#...#...#...#.#.#
#####.###.#####.#.#.#.#.###.###.#.###.###
#.......#.....#.#...#...............#...#
#############.#.#.###.###################
A O F N
A A D M """))
print(solve2(data))
| |
#this file contains models that I have tried out for different tasks, which are reusable
#plus it has the training framework for those models given data - each model has its own data requirements
import numpy as np
import common_libs.utilities as ut
import random
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
import torch
import math
class ModelAbs(nn.Module):
"""
Abstract model without the forward method.
lstm for processing tokens in sequence and linear layer for output generation
lstm is a uni-directional single layer lstm
num_classes = 1 - for regression
num_classes = n - for classifying into n classes
"""
def __init__(self, hidden_size, embedding_size, num_classes):
super(ModelAbs, self).__init__()
self.hidden_size = hidden_size
self.name = 'should be overridden'
#numpy array with batchsize, embedding_size
self.embedding_size = embedding_size
self.num_classes = num_classes
#lstm - input size, hidden size, num layers
self.lstm_token = nn.LSTM(self.embedding_size, self.hidden_size)
#hidden state for the rnn
self.hidden_token = self.init_hidden()
#linear layer for regression - in_features, out_features
self.linear = nn.Linear(self.hidden_size, self.num_classes)
def init_hidden(self):
return (autograd.Variable(torch.zeros(1, 1, self.hidden_size)),
autograd.Variable(torch.zeros(1, 1, self.hidden_size)))
#this is to set learnable embeddings
def set_learnable_embedding(self, mode, dictsize, seed = None):
self.mode = mode
if mode != 'learnt':
embedding = nn.Embedding(dictsize, self.embedding_size)
if mode == 'none':
print 'learn embeddings form scratch...'
initrange = 0.5 / self.embedding_size
embedding.weight.data.uniform_(-initrange, initrange)
self.final_embeddings = embedding
elif mode == 'seed':
print 'seed by word2vec vectors....'
embedding.weight.data = torch.FloatTensor(seed)
self.final_embeddings = embedding
else:
print 'using learnt word2vec embeddings...'
self.final_embeddings = seed
#remove any references you may have that inhibits garbage collection
def remove_refs(self, item):
return
class ModelSequentialRNN(ModelAbs):
"""
Prediction at every hidden state of the unrolled rnn.
Input - sequence of tokens processed in sequence by the lstm
Output - predictions at the every hidden state
uses lstm and linear setup of ModelAbs
each hidden state is given as a seperate batch to the linear layer
"""
def __init__(self, hidden_size, embedding_size, num_classes, intermediate):
super(ModelSequentialRNN, self).__init__(hidden_size, embedding_size, num_classes)
if intermediate:
self.name = 'sequential RNN intermediate'
else:
self.name = 'sequential RNN'
self.intermediate = intermediate
def forward(self, item):
self.hidden_token = self.init_hidden()
#convert to tensor
if self.mode == 'learnt':
acc_embeds = []
for token in item.x:
acc_embeds.append(self.final_embeddings[token])
embeds = torch.FloatTensor(acc_embeds)
else:
embeds = self.final_embeddings(torch.LongTensor(item.x))
#prepare for lstm - seq len, batch size, embedding size
seq_len = embeds.shape[0]
embeds_for_lstm = embeds.unsqueeze(1)
#lstm outputs
#output, (h_n,c_n)
#output - (seq_len, batch = 1, hidden_size * directions) - h_t for each t final layer only
#h_n - (layers * directions, batch = 1, hidden_size) - h_t for t = seq_len
#c_n - (layers * directions, batch = 1, hidden_size) - c_t for t = seq_len
#lstm inputs
#input, (h_0, c_0)
#input - (seq_len, batch, input_size)
lstm_out, self.hidden_token = self.lstm_token(embeds_for_lstm, self.hidden_token)
if self.intermediate:
#input to linear - seq_len, hidden_size (seq_len is the batch size for the linear layer)
#output - seq_len, num_classes
values = self.linear(lstm_out[:,0,:].squeeze()).squeeze()
else:
#input to linear - hidden_size
#output - num_classes
values = self.linear(self.hidden_token[0].squeeze()).squeeze()
return values
class ModelHierarchicalRNN(ModelAbs):
"""
Prediction at every hidden state of the unrolled rnn for instructions.
Input - sequence of tokens processed in sequence by the lstm but seperated into instructions
Output - predictions at the every hidden state
lstm predicting instruction embedding for sequence of tokens
lstm_ins processes sequence of instruction embeddings
linear layer process hidden states to produce output
"""
def __init__(self, hidden_size, embedding_size, num_classes, intermediate):
super(ModelHierarchicalRNN, self).__init__(hidden_size, embedding_size, num_classes)
self.hidden_ins = self.init_hidden()
self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)
if intermediate:
self.name = 'hierarchical RNN intermediate'
else:
self.name = 'hierarchical RNN'
self.intermediate = intermediate
def copy(self, model):
self.linear = model.linear
self.lstm_token = model.lstm_token
self.lstm_ins = model.lstm_ins
def forward(self, item):
self.hidden_token = self.init_hidden()
self.hidden_ins = self.init_hidden()
ins_embeds = autograd.Variable(torch.zeros(len(item.x),self.embedding_size))
for i, ins in enumerate(item.x):
if self.mode == 'learnt':
acc_embeds = []
for token in ins:
acc_embeds.append(self.final_embeddings[token])
token_embeds = torch.FloatTensor(acc_embeds)
else:
token_embeds = self.final_embeddings(torch.LongTensor(ins))
#token_embeds = torch.FloatTensor(ins)
token_embeds_lstm = token_embeds.unsqueeze(1)
out_token, hidden_token = self.lstm_token(token_embeds_lstm,self.hidden_token)
ins_embeds[i] = hidden_token[0].squeeze()
ins_embeds_lstm = ins_embeds.unsqueeze(1)
out_ins, hidden_ins = self.lstm_ins(ins_embeds_lstm, self.hidden_ins)
if self.intermediate:
values = self.linear(out_ins[:,0,:]).squeeze()
else:
values = self.linear(hidden_ins[0].squeeze()).squeeze()
return values
class ModelHierarchicalRNNRelational(ModelAbs):
def __init__(self, embedding_size, num_classes):
super(ModelHierarchicalRNNRelational, self).__init__(embedding_size, num_classes)
self.hidden_ins = self.init_hidden()
self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)
self.linearg1 = nn.Linear(2 * self.hidden_size, self.hidden_size)
self.linearg2 = nn.Linear(self.hidden_size, self.hidden_size)
def forward(self, item):
self.hidden_token = self.init_hidden()
self.hidden_ins = self.init_hidden()
ins_embeds = autograd.Variable(torch.zeros(len(item.x),self.hidden_size))
for i, ins in enumerate(item.x):
if self.mode == 'learnt':
acc_embeds = []
for token in ins:
acc_embeds.append(self.final_embeddings[token])
token_embeds = torch.FloatTensor(acc_embeds)
else:
token_embeds = self.final_embeddings(torch.LongTensor(ins))
#token_embeds = torch.FloatTensor(ins)
token_embeds_lstm = token_embeds.unsqueeze(1)
out_token, hidden_token = self.lstm_token(token_embeds_lstm,self.hidden_token)
ins_embeds[i] = hidden_token[0].squeeze()
ins_embeds_lstm = ins_embeds.unsqueeze(1)
out_ins, hidden_ins = self.lstm_ins(ins_embeds_lstm, self.hidden_ins)
seq_len = len(item.x)
g_variable = autograd.Variable(torch.zeros(self.hidden_size))
for i in range(seq_len):
for j in range(i,seq_len):
concat = torch.cat((out_ins[i].squeeze(),out_ins[j].squeeze()),0)
g1 = nn.functional.relu(self.linearg1(concat))
g2 = nn.functional.relu(self.linearg2(g1))
g_variable += g2
output = self.linear(g_variable)
return output
class ModelSequentialRNNComplex(nn.Module):
"""
Prediction using the final hidden state of the unrolled rnn.
Input - sequence of tokens processed in sequence by the lstm
Output - the final value to be predicted
we do not derive from ModelAbs, but instead use a bidirectional, multi layer
lstm and a deep MLP with non-linear activation functions to predict the final output
"""
def __init__(self, embedding_size):
super(ModelFinalHidden, self).__init__()
self.name = 'sequential RNN'
self.hidden_size = 256
self.embedding_size = embedding_size
self.layers = 2
self.directions = 1
self.is_bidirectional = (self.directions == 2)
self.lstm_token = torch.nn.LSTM(input_size = self.embedding_size,
hidden_size = self.hidden_size,
num_layers = self.layers,
bidirectional = self.is_bidirectional)
self.linear1 = nn.Linear(self.layers * self. directions * self.hidden_size, self.hidden_size)
self.linear2 = nn.Linear(self.hidden_size,1)
self.hidden_token = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.zeros(self.layers * self.directions, 1, self.hidden_size)),
autograd.Variable(torch.zeros(self.layers * self.directions, 1, self.hidden_size)))
def forward(self, item):
self.hidden_token = self.init_hidden()
#convert to tensor
if self.mode == 'learnt':
acc_embeds = []
for token in item.x:
acc_embeds.append(self.final_embeddings[token])
embeds = torch.FloatTensor(acc_embeds)
else:
embeds = self.final_embeddings(torch.LongTensor(item.x))
#prepare for lstm - seq len, batch size, embedding size
seq_len = embeds.shape[0]
embeds_for_lstm = embeds.unsqueeze(1)
lstm_out, self.hidden_token = self.lstm_token(embeds_for_lstm, self.hidden_token)
f1 = nn.functional.relu(self.linear1(self.hidden_token[0].squeeze().view(-1)))
f2 = self.linear2(f1)
return f2
| |
"""
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.db import models
from django.db.models.fields.related import ManyToManyField
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.encoding import smart_text, force_text
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.admin.utils import (get_model_from_relation,
reverse_field_path, get_limit_choices_to_from_path, prepare_lookup_value)
from django.contrib.admin.options import IncorrectLookupParameters
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name, None)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value)')
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params,
model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.lookup_choices = self.field_choices(field, request, model_admin)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (isinstance(self.field, models.related.RelatedObject) and
self.field.field.null or hasattr(self.field, 'rel') and
self.field.null):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def field_choices(self, field, request, model_admin):
return field.get_choices(include_blank=False)
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, models.related.RelatedObject) and
(self.field.field.null or isinstance(self.field.field, ManyToManyField)) or
hasattr(self.field, 'rel') and (self.field.null or isinstance(self.field, ManyToManyField))):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: (
bool(f.rel) if hasattr(f, 'rel') else
isinstance(f, models.related.RelatedObject)), RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(BooleanFieldListFilter, self).__init__(field,
request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, cl):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, [self.lookup_kwarg2]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string({
self.lookup_kwarg2: 'True',
}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(lambda f: isinstance(f,
(models.BooleanField, models.NullBooleanField)), BooleanFieldListFilter)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_text(lookup) == self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup}),
'display': title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = {k: v for k, v in params.items()
if k.startswith(self.field_generic)}
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
if today.month == 12:
next_month = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month = today.replace(month=today.month + 1, day=1)
next_year = today.replace(year=today.year + 1, month=1, day=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(next_month),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(next_year),
}),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg_since, self.lookup_kwarg_until]
def choices(self, cl):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull,
None)
parent_model, reverse_path = reverse_field_path(model, field_path)
# Obey parent ModelAdmin queryset when deciding which options to show
if model == parent_model:
queryset = model_admin.get_queryset(request)
else:
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': (self.lookup_val is None
and self.lookup_val_isnull is None),
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
'selected': self.lookup_val == val,
'query_string': cl.get_query_string({
self.lookup_kwarg: val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
class RelatedOnlyFieldListFilter(RelatedFieldListFilter):
def field_choices(self, field, request, model_admin):
limit_choices_to = {'pk__in': set(model_admin.get_queryset(request).values_list(field.name, flat=True))}
return field.get_choices(include_blank=False, limit_choices_to=limit_choices_to)
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from pycket import impersonators as imp
from pycket import values
from pycket import values_parameter, values_struct
from pycket.arity import Arity
from pycket.error import SchemeException
from pycket.prims.expose import unsafe, default, expose, expose_val
expose_val("current-inspector", values_struct.current_inspector_param)
expose_val("current-code-inspector", values_struct.current_inspector_param)
@expose("make-inspector", [default(values_struct.W_StructInspector, None)], simple=False)
def do_make_instpector(w_inspector, env, cont):
from pycket.interpreter import return_value
if w_inspector is None:
w_inspector = values_struct.current_inspector_param.get(cont)
new_inspector = values_struct.W_StructInspector.make(w_inspector)
return return_value(new_inspector, env, cont)
@expose("make-sibling-inspector", [default(values_struct.W_StructInspector, None)], simple=False)
def do_make_sibling_instpector(w_inspector, env, cont):
from pycket.interpreter import return_value
if w_inspector is None:
w_inspector = values_struct.current_inspector_param.get(cont)
new_inspector = values_struct.W_StructInspector.make(w_inspector, issibling=True)
return return_value(new_inspector, env, cont)
@expose("inspector-superior?", [values_struct.W_StructInspector, values_struct.W_StructInspector])
def inspector_superior_huh(w_inspector, maybe_subinspector):
if w_inspector is maybe_subinspector:
return values.w_false
s = maybe_subinspector.w_super
while(s is not None):
if w_inspector is s:
return values.w_true
s = s.w_super
return values.w_false
@expose("struct?", [values.W_Object], simple=False)
def do_is_struct(v, env, cont):
from pycket.interpreter import return_value
current_inspector = values_struct.current_inspector_param.get(cont)
if isinstance(v, values_struct.W_RootStruct):
if current_inspector.has_control(v.struct_type()):
return return_value(values.w_true, env, cont)
return return_value(values.w_false, env, cont)
@expose("struct-info", [values.W_Object], simple=False)
def do_struct_info(v, env, cont):
from pycket.interpreter import return_multi_vals
current_inspector = values_struct.current_inspector_param.get(cont)
if (isinstance(v, values_struct.W_RootStruct) and
current_inspector.has_control(v.struct_type())):
return v.get_struct_info(env, cont)
return return_multi_vals(
values.Values.make([values.w_false, values.w_true]), env, cont)
struct_info = do_struct_info.w_prim
@expose("struct-type-info", [values_struct.W_StructType], simple=False)
def do_struct_type_info(struct_type, env, cont):
from pycket.interpreter import return_value
return return_value(values.Values.make(struct_type.struct_type_info(cont)), env, cont)
@expose("struct-type-make-constructor", [values_struct.W_StructType], simple=False)
def do_struct_type_make_constructor(struct_type, env, cont):
from pycket.interpreter import return_value
current_inspector = values_struct.current_inspector_param.get(cont)
if not current_inspector.has_control(struct_type):
# TODO: we should raise exn:fail:contract
raise SchemeException("fail_contract")
return return_value(struct_type.constructor, env, cont)
@expose("struct-type-make-predicate", [values_struct.W_StructType], simple=False)
def do_struct_type_make_predicate(struct_type, env, cont):
from pycket.interpreter import return_value
current_inspector = values_struct.current_inspector_param.get(cont)
if not current_inspector.has_control(struct_type):
# TODO: we should raise exn:fail:contract
raise SchemeException("fail_contract")
return return_value(struct_type.predicate, env, cont)
@expose("make-struct-type",
[values.W_Symbol, values.W_Object, values.W_Fixnum, values.W_Fixnum,
default(values.W_Object, values.w_false),
default(values.W_Object, values.w_null),
default(values.W_Object, None),
default(values.W_Object, values.w_false),
default(values.W_List, values.w_null),
default(values.W_Object, values.w_false),
default(values.W_Object, values.w_false)], simple=False)
def do_make_struct_type(w_name, w_super_type, w_init_field_count,
w_auto_field_count, w_auto_value, w_properties, w_inspector,
w_proc_spec, w_immutables, w_guard, w_constructor_name,
env, cont):
if w_inspector is None:
w_inspector = values_struct.current_inspector_param.get(cont)
if (w_constructor_name is not values.w_false and
not isinstance(w_constructor_name, values.W_Symbol)):
raise SchemeException("make-struct-type: constructor name mustbe be symbol? or #f")
if not (isinstance(w_super_type, values_struct.W_StructType) or
w_super_type is values.w_false):
raise SchemeException("make-struct-type: expected a struct-type? or #f for the super type , but got %s : %s" % (w_super_type, w_super_type.tostring()))
if (isinstance(w_super_type, values_struct.W_StructType) and
w_super_type.prop_sealed):
raise SchemeException("make-struct-type: cannot make a subtype of a sealed type")
init_field_count = w_init_field_count.value
auto_field_count = w_auto_field_count.value
immutables = []
for i in values.from_list_iter(w_immutables):
if not isinstance(i, values.W_Fixnum) or i.value < 0:
raise SchemeException("make-struct-type: expected list of positive integers for immutable fields")
immutables.append(i.value)
return values_struct.W_StructType.make(w_name=w_name,
w_super_type=w_super_type, init_field_count=init_field_count,
auto_field_count=auto_field_count, w_auto_value=w_auto_value,
w_properties=w_properties, w_inspector=w_inspector,
w_proc_spec=w_proc_spec, immutables=immutables,
w_guard=w_guard, w_constructor_name=w_constructor_name,
env=env, cont=cont)
@expose("struct-accessor-procedure?", [values.W_Object])
def do_is_struct_accessor_procedure(v):
return values.W_Bool.make(isinstance(v, values_struct.W_StructAccessor) or
isinstance(v, values_struct.W_StructFieldAccessor))
@expose("make-struct-field-accessor", [values_struct.W_StructAccessor,
values.W_Fixnum, default(values.W_Object, values.w_false)])
def do_make_struct_field_accessor(accessor, field, field_name):
if field_name is values.w_false:
return values_struct.W_StructFieldAccessor(accessor, field.value, None)
if not isinstance(field_name, values.W_Symbol):
raise SchemeException("make-struct-field-accessor: expected symbol or #f as argument 2")
return values_struct.W_StructFieldAccessor(accessor, field.value, field_name)
@expose("struct-mutator-procedure?", [values.W_Object])
def do_is_struct_mutator_procedure(v):
return values.W_Bool.make(isinstance(v, values_struct.W_StructMutator) or
isinstance(v, values_struct.W_StructFieldMutator))
@expose("make-struct-field-mutator", [values_struct.W_StructMutator,
values.W_Fixnum, default(values.W_Object, values.w_false)])
def do_make_struct_field_mutator(mutator, field, field_name):
if field_name is values.w_false:
return values_struct.W_StructFieldMutator(mutator, field.value, None)
if not isinstance(field_name, values.W_Symbol):
raise SchemeException("make-struct-field-mutator: expected symbol or #f as argument 2")
return values_struct.W_StructFieldMutator(mutator, field.value, field_name)
@expose("struct->vector", [values_struct.W_RootStruct])
def expose_struct2vector(struct):
return values_struct.struct2vector(struct)
@expose("prefab-struct-key", [values.W_Object])
def do_prefab_struct_key(v):
if not (isinstance(v, values_struct.W_Struct) and v.struct_type().isprefab):
return values.w_false
prefab_key = values_struct.W_PrefabKey.from_struct_type(v.struct_type())
return prefab_key.short_key()
@expose("make-prefab-struct", arity=Arity.geq(1))
def do_make_prefab_struct(args):
key = args[0]
vals = args[1:]
return values_struct.W_Struct.make_prefab(key, vals)
@expose("prefab-key->struct-type", [values.W_Object, values.W_Fixnum])
def expose_prefab_key2struct_type(w_key, field_count):
return values_struct.W_StructType.make_prefab(
values_struct.W_PrefabKey.from_raw_key(w_key, field_count.value))
@expose("prefab-key?", [values.W_Object])
def do_prefab_key(v):
return values_struct.W_PrefabKey.is_prefab_key(v)
w_can_impersonate = values.W_Symbol.make("can-impersonate")
@expose("make-struct-type-property", [values.W_Symbol,
default(values.W_Object, values.w_false),
default(values.W_List, values.w_null),
default(values.W_Object, values.w_false)])
def mk_stp(sym, guard, supers, _can_imp):
can_imp = False
if guard is w_can_impersonate:
guard = values.w_false
can_imp = True
if _can_imp is not values.w_false:
can_imp = True
prop = values_struct.W_StructProperty(sym, guard, supers, can_imp)
return values.Values.make([prop,
values_struct.W_StructPropertyPredicate(prop),
values_struct.W_StructPropertyAccessor(prop)])
# Unsafe struct ops
@expose("unsafe-struct-ref", [values.W_Object, unsafe(values.W_Fixnum)])
def unsafe_struct_ref(v, k):
v = imp.get_base_object(v)
assert isinstance(v, values_struct.W_Struct)
assert 0 <= k.value <= v.struct_type().total_field_count
return v._ref(k.value)
@expose("unsafe-struct-set!", [values.W_Object, unsafe(values.W_Fixnum),
values.W_Object])
def unsafe_struct_set(v, k, val):
v = imp.get_base_object(v)
assert isinstance(v, values_struct.W_Struct)
assert 0 <= k.value < v.struct_type().total_field_count
return v._set(k.value, val)
@expose("unsafe-struct*-ref", [values_struct.W_Struct, unsafe(values.W_Fixnum)])
def unsafe_struct_star_ref(v, k):
assert 0 <= k.value < v.struct_type().total_field_count
return v._ref(k.value)
@expose("unsafe-struct*-set!", [values_struct.W_Struct, unsafe(values.W_Fixnum),
values.W_Object])
def unsafe_struct_star_set(v, k, val):
assert 0 <= k.value <= v.struct_type().total_field_count
return v._set(k.value, val)
@expose("unsafe-struct*-cas!", [values_struct.W_Struct, unsafe(values.W_Fixnum),
values.W_Object, values.W_Object])
def unsafe_struct_star_cas(v, k, old_val, new_val):
assert 0 <= k.value <= v.struct_type().total_field_count
if v._ref(k.value) is old_val:
v._set(k.value, new_val)
return values.w_true
return values.w_false
| |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Filerep test suite, to test Replication of the GPDB cluster.
Run workload with all DB operations on all the relation types,
from transition states of insync, to change tracking, to resync,
and until insync again, workload generated by each transition
stage will be tested by all following transition stages. Catalog
is checked within each phase, checkmirrorseg performed within insync
phases.
This test suite leverages scenario framework to run the workload(sqls)
in parallel, however, in order to avoid issues of deadlock and currently
alterring the same relation, vacuum-full sqls and some other sqls are
put into a vacuum folder of each workload to run in sequential.
"""
import os
import socket
import tinctest
import unittest2 as unittest
from tinctest.lib import local_path
from mpp.gpdb.tests.storage.lib import Database
from mpp.models import MPPTestCase
from tinctest.models.scenario import ScenarioTestCase
from mpp.gpdb.tests.storage.filerep_end_to_end import FilerepTestCase
class FilerepE2EScenarioTestCase(ScenarioTestCase, MPPTestCase):
"""
@gucs gp_create_table_random_default_distribution=off
"""
def __init__(self, methodName):
self.filerep = FilerepTestCase('preprocess')
self.path = local_path("data")
super(FilerepE2EScenarioTestCase,self).__init__(methodName)
def setUp(self):
'''
gpfdist port should be dynamically generated, this is a chance
that it may fail to start gpfdist service as other processes are
likely to be running on this specific port.
'''
super(FilerepE2EScenarioTestCase, self).setUp()
db = Database()
db.setupDatabase('gptest')
self.filerep.preprocess()
self.filerep.setupGpfdist('8088', self.path)
self.filerep.method_setup()
def tearDown(self):
self.filerep.cleanupGpfdist('8088', self.path)
super(FilerepE2EScenarioTestCase, self).tearDown()
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_incr_mirror(self):
self.do_test('incr','mirror')
def test_full_mirror(self):
self.do_test('full','mirror')
def test_incr_primary(self):
self.do_test('incr','primary')
def test_full_primary(self):
self.do_test('full', 'primary')
def do_test(self,rec_mode,fail_type):
'''
@rec_mode: recovery mode, can be full or incremental
@fail_type, failover type, can be mirror or primary
'''
self.do_sync1_tests(fail_type)
self.do_ck_sync1_tests()
self.do_ct_tests(fail_type)
self.do_resync_tests(rec_mode, fail_type)
self.do_sync2_tests(rec_mode, fail_type)
self.do_clean_tests()
def do_sync1_tests(self, fail_type):
'''
Run workload while insync transition state, creates workload to be tested by following
transition phases.
@fail_type: failover type, can be mirror or primary
'''
list_cl = []
list_cl.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.clean_data")
self.test_case_scenario.append(list_cl)
list_set_sync1 = []
list_set_sync1.append("mpp.gpdb.tests.storage.filerep_end_to_end.set_sync1.test_set_sync1.SetSync1TestCase")
self.test_case_scenario.append(list_set_sync1)
list_sync1 = []
list_sync1.append("mpp.gpdb.tests.storage.filerep_end_to_end.sync1.test_sync1.Sync1TestCase")
self.test_case_scenario.append(list_sync1)
list_serial = []
list_serial.append("mpp.gpdb.tests.storage.filerep_end_to_end.sync1.vacuum.test_vacuum.VacuumTestCase")
list_serial.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.check_mirror_seg")
list_serial.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.run_gpstate", [fail_type, 'sync1']))
list_serial.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.do_gpcheckcat", {'outputFile':'sync1_checkcat.out'}))
self.test_case_scenario.append(list_serial, serial=True)
def do_ck_sync1_tests(self):
'''
Run workload in insync but after pushing a checkpoint, in case heap data pages maybe in flight getting flushed to mirror
and not on primary.
'''
list_cl_and_ck = []
list_cl_and_ck.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.clean_data")
list_cl_and_ck.append("mpp.gpdb.tests.storage.filerep_end_to_end.runcheckpoint.runCheckPointSQL.runCheckPointTestCase")
self.test_case_scenario.append(list_cl_and_ck, serial=True)
list_set_checkpoint_sync1 = []
list_set_checkpoint_sync1.append("mpp.gpdb.tests.storage.filerep_end_to_end.set_ck_sync1.test_set_ck_sync1.SetCkSync1TestCase")
self.test_case_scenario.append(list_set_checkpoint_sync1)
list_checkpoint_sync1 = []
list_checkpoint_sync1.append("mpp.gpdb.tests.storage.filerep_end_to_end.ck_sync1.test_ck_sync1.CkSync1TestCase")
self.test_case_scenario.append(list_checkpoint_sync1)
list_serial = []
list_serial.append("mpp.gpdb.tests.storage.filerep_end_to_end.ck_sync1.vacuum.test_vacuum.VacuumTestCase")
list_serial.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.do_gpcheckcat", {'outputFile':'ck_sync1_checkcat.out'}))
self.test_case_scenario.append(list_serial, serial=True)
def do_ct_tests(self, fail_type):
'''
Run workload in change tracking
@fail_type: failover type, can be mirror or primary
'''
list = []
list.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.clean_data")
list.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.method_run_failover",[fail_type]))
if (fail_type=='mirror'):
list.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.trigger_transition")
list.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.wait_till_change_tracking_transition")
self.test_case_scenario.append(list,serial=True)
list_set_ct = []
list_set_ct.append("mpp.gpdb.tests.storage.filerep_end_to_end.set_ct.test_set_ct.SetCtTestCase")
self.test_case_scenario.append(list_set_ct)
list_ct = []
list_ct.append("mpp.gpdb.tests.storage.filerep_end_to_end.ct.test_ct.CtTestCase")
self.test_case_scenario.append(list_ct)
list_serial = []
list_serial.append("mpp.gpdb.tests.storage.filerep_end_to_end.ct.vacuum.test_vacuum.VacuumTestCase")
list_serial.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.do_gpcheckcat",{'outputFile':'ct_checkcat.out'}))
list_serial.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.run_gpstate",[fail_type, 'ct']))
self.test_case_scenario.append(list_serial, serial=True)
def do_resync_tests(self, rec_mode, fail_type):
'''
Suspend the transition to resync, trigger gprecoverseg, run workload.
@rec_mode: recovery type, can be full or incremental.
@fail_type: failover type, can be mirror or primary.
'''
list = []
list.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.clean_data")
list.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.run_method_suspendresync")
if ((rec_mode == 'incr') and (fail_type == 'primary')):
list.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.inject_fault_on_first_primary");
list.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.run_gprecoverseg",[rec_mode]))
list.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.run_gpstate",[fail_type,'resync_'+rec_mode]))
self.test_case_scenario.append(list, serial=True)
list_set_resync = []
list_set_resync.append("mpp.gpdb.tests.storage.filerep_end_to_end.set_resync.test_set_resync.SetResyncTestCase")
self.test_case_scenario.append(list_set_resync)
list_resync = []
list_resync.append("mpp.gpdb.tests.storage.filerep_end_to_end.resync.test_resync.ResyncTestCase")
self.test_case_scenario.append(list_resync)
list_serial = []
list_serial.append("mpp.gpdb.tests.storage.filerep_end_to_end.resync.vacuum.test_vacuum.VacuumTestCase")
list_serial.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.do_gpcheckcat",{'outputFile':'resync_checkcat.out'}))
self.test_case_scenario.append(list_serial,serial=True)
def do_sync2_tests(self, rec_mode, fail_type):
'''
Resume and reset the fault, bring cluster into insync, run workload, perform checkmirrorseg and check catalog.
'''
list_resume_validate = []
list_resume_validate.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.clean_data")
list_resume_validate.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.method_resume_filerep_resync")
list_resume_validate.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.method_reset_fault_injection")
if ((rec_mode == 'incr') and (fail_type == 'primary')):
list_resume_validate.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.inject_fault_on_first_mirror");
list_resume_validate.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.stop_start_validate", [False]))
else:
list_resume_validate.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.stop_start_validate")
list_resume_validate.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.wait_till_insync_transition")
self.test_case_scenario.append(list_resume_validate,serial=True)
list_set_sync2 = []
list_set_sync2.append("mpp.gpdb.tests.storage.filerep_end_to_end.set_sync2.test_set_sync2.SetSync2TestCase")
self.test_case_scenario.append(list_set_sync2)
list_sync2 = []
list_sync2.append("mpp.gpdb.tests.storage.filerep_end_to_end.sync2.test_sync2.Sync2TestCase")
self.test_case_scenario.append(list_sync2)
list_serial = []
list_serial.append("mpp.gpdb.tests.storage.filerep_end_to_end.sync2.vacuum.test_vacuum.VacuumTestCase")
list_serial.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.check_mirror_seg")
list_serial.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.do_gpcheckcat",{'outputFile':'sync2_checkcat.out'}))
self.test_case_scenario.append(list_serial,serial=True)
def do_clean_tests(self):
'''
Drop databases, tablespaces, and filespaces.
'''
list_cleanup = []
list_cleanup.append("mpp.gpdb.tests.storage.filerep_end_to_end.clean.test_clean.CleanTestCase")
self.test_case_scenario.append(list_cleanup)
| |
"""W&B callback for lightgbm.
Really simple callback to get logging for each tree
Example usage:
param_list = [("eta", 0.08), ("max_depth", 6), ("subsample", 0.8), ("colsample_bytree", 0.8), ("alpha", 8), ("num_class", 10)]
config.update(dict(param_list))
lgb = lgb.train(param_list, d_train, callbacks=[wandb_callback()])
"""
from pathlib import Path
from typing import Callable
from typing import TYPE_CHECKING
import lightgbm # type: ignore
from lightgbm import Booster
import wandb
from wandb.sdk.lib import telemetry as wb_telemetry
MINIMIZE_METRICS = [
"l1",
"l2",
"rmse",
"mape",
"huber",
"fair",
"poisson",
"gamma",
"binary_logloss",
]
MAXIMIZE_METRICS = ["map", "auc", "average_precision"]
if TYPE_CHECKING:
from typing import Any, Dict, List, NamedTuple, Tuple, Union
# Note: upstream lightgbm has this defined incorrectly
_EvalResultTuple = Union[
Tuple[str, str, float, bool], Tuple[str, str, float, bool, float]
]
class CallbackEnv(NamedTuple):
model: Any
params: Dict
iteration: int
begin_interation: int
end_iteration: int
evaluation_result_list: List[_EvalResultTuple]
def _define_metric(data: str, metric_name: str) -> None:
"""Capture model performance at the best step.
instead of the last step, of training in your `wandb.summary`
"""
if "loss" in str.lower(metric_name):
wandb.define_metric(f"{data}_{metric_name}", summary="min")
elif str.lower(metric_name) in MINIMIZE_METRICS:
wandb.define_metric(f"{data}_{metric_name}", summary="min")
elif str.lower(metric_name) in MAXIMIZE_METRICS:
wandb.define_metric(f"{data}_{metric_name}", summary="max")
def _checkpoint_artifact(
model: "Booster", iteration: int, aliases: "List[str]"
) -> None:
"""Upload model checkpoint as W&B artifact."""
# NOTE: type ignore required because wandb.run is improperly inferred as None type
model_name = f"model_{wandb.run.id}" # type: ignore
model_path = Path(wandb.run.dir) / f"model_ckpt_{iteration}.txt" # type: ignore
model.save_model(model_path, num_iteration=iteration)
model_artifact = wandb.Artifact(name=model_name, type="model")
model_artifact.add_file(model_path)
wandb.log_artifact(model_artifact, aliases=aliases)
def _log_feature_importance(model: "Booster") -> None:
"""Log feature importance."""
feat_imps = model.feature_importance()
feats = model.feature_name()
fi_data = [[feat, feat_imp] for feat, feat_imp in zip(feats, feat_imps)]
table = wandb.Table(data=fi_data, columns=["Feature", "Importance"])
wandb.log(
{
"Feature Importance": wandb.plot.bar(
table, "Feature", "Importance", title="Feature Importance"
)
},
commit=False,
)
def wandb_callback(log_params: bool = True, define_metric: bool = True) -> Callable:
"""Automatically integrates LightGBM with wandb.
Arguments:
log_params: (boolean) if True (default) logs params passed to lightgbm.train as W&B config
define_metric: (boolean) if True (default) capture model performance at the best step, instead of the last step, of training in your `wandb.summary`
Passing `wandb_callback` to LightGBM will:
- log params passed to lightgbm.train as W&B config (default).
- log evaluation metrics collected by LightGBM, such as rmse, accuracy etc to Weights & Biases
- Capture the best metric in `wandb.summary` when `define_metric=True` (default).
Use `log_summary` as an extension of this callback.
Example:
```python
params = {
'boosting_type': 'gbdt',
'objective': 'regression',
.
}
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_eval,
valid_names=('validation'),
callbacks=[wandb_callback()])
```
"""
log_params_list: "List[bool]" = [log_params]
define_metric_list: "List[bool]" = [define_metric]
def _init(env: "CallbackEnv") -> None:
with wb_telemetry.context() as tel:
tel.feature.lightgbm_wandb_callback = True
wandb.config.update(env.params)
log_params_list[0] = False
if define_metric_list[0]:
for i in range(len(env.evaluation_result_list)):
data_type = env.evaluation_result_list[i][0]
metric_name = env.evaluation_result_list[i][1]
_define_metric(data_type, metric_name)
def _callback(env: "CallbackEnv") -> None:
if log_params_list[0]:
_init(env)
eval_results: "Dict[str, Dict[str, List[Any]]]" = {}
recorder = lightgbm.record_evaluation(eval_results)
recorder(env)
for validation_key in eval_results.keys():
for key in eval_results[validation_key].keys():
wandb.log(
{validation_key + "_" + key: eval_results[validation_key][key][0]},
commit=False,
)
# Previous log statements use commit=False. This commits them.
wandb.log({"iteration": env.iteration}, commit=True)
return _callback
def log_summary(
model: Booster, feature_importance: bool = True, save_model_checkpoint: bool = False
) -> None:
"""Logs useful metrics about lightgbm model after training is done.
Arguments:
model: (Booster) is an instance of lightgbm.basic.Booster.
feature_importance: (boolean) if True (default), logs the feature importance plot.
save_model_checkpoint: (boolean) if True saves the best model and upload as W&B artifacts.
Using this along with `wandb_callback` will:
- log `best_iteration` and `best_score` as `wandb.summary`.
- log feature importance plot.
- save and upload your best trained model to Weights & Biases Artifacts (when `save_model_checkpoint = True`)
Example:
```python
params = {
'boosting_type': 'gbdt',
'objective': 'regression',
.
}
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_eval,
valid_names=('validation'),
callbacks=[wandb_callback()])
log_summary(gbm)
```
"""
if wandb.run is None:
raise wandb.Error("You must call wandb.init() before WandbCallback()")
if not isinstance(model, Booster):
raise wandb.Error("Model should be an instance of lightgbm.basic.Booster")
wandb.run.summary["best_iteration"] = model.best_iteration
wandb.run.summary["best_score"] = model.best_score
# Log feature importance
if feature_importance:
_log_feature_importance(model)
if save_model_checkpoint:
_checkpoint_artifact(model, model.best_iteration, aliases=["best"])
with wb_telemetry.context() as tel:
tel.feature.lightgbm_log_summary = True
| |
# Copyright (c) 2008, 2009, 2010 Friedrich Romstedt
# <www.friedrichromstedt.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Developed since: Jul 2008
"""Defines the Stack class, an abstraction of Axes, providing the framework
to hold a stack of layers."""
import matplotlib.figure
import matplotlib.ticker
class Stack:
"""Abstraction of Axes, providing the framework to hold a stack of layers.
A Stack is associated necessarily with exactly one matplotlib.axes.Axes
instance. The Axes instance can be handed over, or can be created during
initialisation from a Figure provided by the user.
There is no association with a Figure, except that during initialisation
time, the Axes may be created by using the Figure handed over by the
user."""
#
# Initialisation methods ...
#
def __init__(self,
figure,
left = 0.2, bottom = 0.2, width = 0.6, height = 0.6,
axes = None,
polar = None,
autoscale_both_on = None,
autoscale_x_on = None,
autoscale_y_on = None,
colorbar=None,
locator_x=None,
locator_y=None):
"""FIGURE is the matplotlib.figure.Figure instance where to act on.
AXES is optionally an existing axes instance. If AXES is not given,
a new axes instance will be created, either a cartesian, or a polar if
POLAR is True.
The initial autoscaling is controled by AUTOSCALING_BOTH,
AUTOSCALING_X, and AUTOSCALING_Y. If AUTOSCALING_BOTH is given, it
overrides AUTOSCALING_X and AUTOSCALING_Y. If the autoscaling for
some axis isn't given (either by AUTOSCALING_BOTH or by the other
arguments), it defaults to True.
If COLORBAR isn't None, but 'vertical' or 'horizontal', the Axes
will be initialised by setting the label and ticks position to the
appropriate position. This is useful if the Stack is intended to be
used for a LayerColorbar, since the LayerColorbar cannot draw a
Colorbar until it has received data, and therefore there would be
nothing updating the ticks and label positions.
LOCATOR_X and LOCATOR_Y are optional and are the major locator to be
used for the respective axes."""
# Define the default values for AUTOSCALING_X/Y. May be overridden
# by AUTOSCALING_BOTH if that is given ...
if autoscale_x_on is None:
autoscale_x_on = True
if autoscale_y_on is None:
autoscale_y_on = True
# Initialise attributes ...
if axes is None:
# Create a new axes instance.
axes = figure.add_axes(
(left, bottom, width, height),
polar = polar)
axes.hold(True)
# Take over the axes.
self.axes = axes
# Initialise the title etc. to some values.
self.title = self.axes.get_title()
self.title_kwargs = {}
self.xlabel = self.axes.get_xlabel()
self.ylabel = self.axes.get_ylabel()
# Apply the autoscaling ...
#
# This will also store the correct values for .xlim and .ylim.
self.set_autoscale_on(
both_on = autoscale_both_on,
x_on = autoscale_x_on,
y_on = autoscale_y_on)
# Store the locators ...
self.set_locators(locator_x=locator_x, locator_y=locator_y)
# Prepare for use as a colorbar ...
#
# Do this after setting the locators and not before. Because
# set_colorbar() sets also the xticks, but set_locators overrides
# this by setting the xlocator to AutoLocator(), when done in the
# wrong order.
self.set_colorbar(colorbar)
# The layers present.
self._layers = []
# The layers rendered to the FigureAxes.
self._layers_drawn = []
# Whether a reset of the FigureAxes is needed before rendering. This
# may occur because:
# 1. Layers drawn have changed data.
# 2. Layers have been removed.
self._needs_reset = False
#
# Layer maintainance ...
#
def _flag_needs_reset(self):
"""Set .needs_reset to True if the state of the layers implies
that an reset is needed. If the premise isn't true, the flag
remains unchanged."""
# A changed, drawed layer implies an reset ...
for layer in self._layers:
if layer.has_changed() and \
id(layer) in map(id, self._layers_drawn):
self._needs_reset = True
def add_layer(self, layer):
"""Add a layer to the Renderer. It will only be added if it
has not been added yet."""
if id(layer) not in map(id, self._layers):
self._layers.append(layer)
def remove_layer(self, layer):
"""Remove a layer from the Renderer. Removing an nonexistent
layer will be silently ignored."""
if id(layer) in map(id, self._layers):
self._layers.remove(layer)
# Flag that a reset is needed:
self._needs_reset = True
#
# Rendering ...
#
def render(self):
"""Render the layers to the Stack. The Stack may be clear()'ed during
this."""
# Reset eventually ...
self._flag_needs_reset()
if self._needs_reset:
# Clear the axes, the list of drawn layers, and the flag.
self.clear()
self._layers_drawn = []
self._needs_reset = False
# Draw all layers which are not drawn yet ...
for layer in self._layers:
if id(layer) in map(id, self._layers_drawn):
# The layer does not need to be drawn.
continue
layer.to_axes(self.axes)
layer.unset_changed()
self._layers_drawn.append(layer)
#
# Property set methods ...
#
def set_title(self, title, **title_kwargs):
"""Set the title to string TITLE with kwargs *title_kwargs*."""
self.axes.set_title(title, **title_kwargs)
self.title = title
self.title_kwargs = title_kwargs
def set_xlabel(self, xlabel):
"""Set the xlabel to string XLABEL."""
self.axes.set_xlabel(xlabel)
self.xlabel = xlabel
def set_ylabel(self, ylabel):
"""Set the ylabel to string YLABEL."""
self.axes.set_ylabel(ylabel)
self.ylabel = ylabel
def set_xlim(self, lim):
"""Sets the limit and the stored value for restoration in .clear().
If LIM isn't None, autoscaling in x will be turned off, else it will
be turned on."""
if lim is not None:
# This turns autoscaling off, maintaining the current xlim.
self.set_autoscale_on(x_on = False)
# This sets and stores the *new* xlim.
self.axes.set_xlim(lim)
self.xlim = lim
else:
# Turn autoscaling on, which will also store None in .xlim.
self.set_autoscale_on(x_on = True)
def set_ylim(self, lim):
"""Sets the limit and the stored value for restoration in .clear().
If LIM isn't None, autoscaling in y will be turned off, else it will
be turned on."""
if lim is not None:
# This turns autoscaling off, maintaining the current ylim.
self.set_autoscale_on(y_on = False)
# This sets and stores the *new* ylim.
self.axes.set_ylim(lim)
self.ylim = lim
else:
# Turn autoscaling on, which will also store None in .ylim.
self.set_autoscale_on(y_on = True)
def set_autoscale_on(self, both_on = None, x_on = None, y_on = None):
"""The autoscaling is controled by BOTH_ON, X_ON, and Y_ON. If
BOTH_ON is given, it overrides X_ON and Y_ON. If the autoscaling for
some axis isn't given (either by BOTH_ON or by X_ON or Y_ON), its
setting will be maintained."""
if both_on is not None:
# Override AUTOSCALING_X/Y.
x_on = both_on
y_on = both_on
if x_on is not None:
# Set the X autoscaling...
#
# Do not call .set_xlim() during that, because this may result in
# infinite recursion, if .set_autoscale_on() was called by
# a .set_Xlim() method.
self.axes.set_autoscalex_on(x_on)
if x_on:
# This signals .clear() that no limit shall be preserved.
self.xlim = None
else:
# Signal .clear() the correct xlim. If .autoscale_x_on was
# True, then .xlim is currently None.
self.xlim = self.axes.get_xlim()
self.autoscale_x_on = x_on
if y_on is not None:
# Set the Y autoscaling ...
self.axes.set_autoscaley_on(y_on)
if y_on:
# This signals .clear() that no limit shall be preserved.
self.ylim = None
else:
# Signal .clear() the correct ylim. If .autoscale_y_on was
# True, then .ylim is currently None.
self.ylim = self.axes.get_ylim()
self.autoscale_y_on = y_on
# Apply the autoscaling if needed ...
# If X_ON or Y_ON is None, this evaluates as False, i.e., no problem
# with None values supplied for X/Y_ON.
if x_on or y_on:
self.axes.autoscale_view()
def _update_colorbar_mode(self):
"""Ensures the colorbar mode if present. Note that returning from
colorbar mode to normal mode will not work properly except you
do a .clear()."""
# Copied from matplotlib.colorbar.ColorbarBase.config_axis().
if self.colorbar == 'vertical':
self.axes.xaxis.set_ticks([])
self.axes.yaxis.set_label_position('right')
self.axes.yaxis.set_ticks_position('right')
elif self.colorbar == 'horizontal':
self.axes.yaxis.set_ticks([])
self.axes.xaxis.set_label_position('bottom')
def set_colorbar(self, colorbar):
"""Sets the colorbar mode."""
self.colorbar = colorbar
self._update_colorbar_mode()
def set_locators(self, locator_x, locator_y):
"""Sets the locators to be used. None means 'default locator'."""
self.locator_x = locator_x
self.locator_y = locator_y
# Set locators ...
if self.locator_x is not None:
self.axes.xaxis.set_major_locator(self.locator_x)
else:
self.axes.xaxis.set_major_locator(matplotlib.ticker.AutoLocator())
if self.locator_y is not None:
self.axes.yaxis.set_major_locator(self.locator_y)
else:
self.axes.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
#
# Clearing method ...
#
def clear(self):
"""Clears the axes, maintaining autoscaling setting, xlim, ylim,
title, xlabel, and ylabel. If autoscaling is on for an axis, the
limit will not be maintained, it will default then to (0.0, 1.0)."""
# Put the axes back into initial state ...
self.axes.clear()
# Restore the settings stored ...
# Restore labeling.
self.set_title(self.title, **self.title_kwargs)
self.set_xlabel(self.xlabel)
self.set_ylabel(self.ylabel)
# Restore colorbar mode
self._update_colorbar_mode()
# Restore locators
if self.locator_x is not None:
self.axes.xaxis.set_major_locator(self.locator_x)
if self.locator_y is not None:
self.axes.yaxis.set_major_locator(self.locator_y)
# Restore lims. If autoscaling was turned on, the corresponding
# limit will be None, and the setting is maintained, because
# autoscaling on is the default state. If autoscaling was turned off,
# the corresponding limit will be set, and the autoscaling will be
# turned off in the .axes by the .set_Xlim() call.
if self.xlim is not None:
self.set_xlim(self.xlim)
if self.ylim is not None:
self.set_ylim(self.ylim)
#
# Property get methods ...
#
def get_title(self):
"""Returns the title set for the axes."""
return self.title
def get_xlabel(self):
"""Returns the xlabel set for the axes."""
return self.xlabel
def get_ylabel(self):
"""Returns the ylabel set for the axes."""
return self.ylabel
def get_xlim(self):
"""Return the *actual* xlimit. This may change during autoscaling."""
return self.axes.get_xlim()
def get_ylim(self):
"""Return the *actual* ylimit. This may change during autoscaling."""
return self.axes.get_ylim()
def get_autoscale_on(self):
"""Return if both x and y autoscaling is active for the axes (logical
AND)."""
return self.axes.get_autoscale_on()
def get_autoscalex_on(self):
"""Return if x autoscaling is active for the axes."""
return self.axes.get_autoscalex_on()
def get_autoscaley_on(self):
"""Return if y autoscaling is active for the axes."""
return self.axes.get_autoscaley_on()
| |
import datetime
from dateutil.parser import parse as dateutil_parse
import os
import warnings
from optparse import make_option
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import LabelCommand
from django.db import reset_queries
from django.utils.encoding import smart_str
from haystack import connections as haystack_connections
from haystack.constants import DEFAULT_ALIAS
from haystack.query import SearchQuerySet
DEFAULT_BATCH_SIZE = None
DEFAULT_AGE = None
APP = 'app'
MODEL = 'model'
def worker(bits):
# We need to reset the connections, otherwise the different processes
# will try to share the connection, which causes things to blow up.
from django.db import connections
for alias, info in connections.databases.items():
# We need to also tread lightly with SQLite, because blindly wiping
# out connections (via ``... = {}``) destroys in-memory DBs.
if not 'sqlite3' in info['ENGINE']:
try:
del(connections._connections[alias])
except KeyError:
pass
if bits[0] == 'do_update':
func, model, start, end, total, using, start_date, end_date, verbosity = bits
elif bits[0] == 'do_remove':
func, model, pks_seen, start, upper_bound, using, verbosity = bits
else:
return
unified_index = haystack_connections[using].get_unified_index()
index = unified_index.get_index(model)
backend = haystack_connections[using].get_backend()
if func == 'do_update':
qs = build_queryset(index, model, start_date=start_date, end_date=end_date, verbosity=verbosity)
do_update(backend, index, qs, start, end, total, verbosity=verbosity)
elif bits[0] == 'do_remove':
do_remove(backend, index, model, pks_seen, start, upper_bound, verbosity=verbosity)
def build_queryset(index, model, start_date=None, end_date=None, verbosity=1):
extra_lookup_kwargs = {}
updated_field = index.get_updated_field()
if start_date:
if updated_field:
extra_lookup_kwargs['%s__gte' % updated_field] = start_date
else:
if verbosity >= 2:
print "No updated date field found for '%s' - not restricting by age." % model.__name__
if end_date:
if updated_field:
extra_lookup_kwargs['%s__lte' % updated_field] = end_date
else:
if verbosity >= 2:
print "No updated date field found for '%s' - not restricting by age." % model.__name__
index_qs = None
if hasattr(index, 'get_queryset'):
warnings.warn("'SearchIndex.get_queryset' was deprecated in Haystack v2. Please rename the method 'index_queryset'.")
index_qs = index.get_queryset()
else:
index_qs = index.index_queryset()
if not hasattr(index_qs, 'filter'):
raise ImproperlyConfigured("The '%r' class must return a 'QuerySet' in the 'index_queryset' method." % index)
# `.select_related()` seems like a good idea here but can fail on
# nullable `ForeignKey` as well as what seems like other cases.
return index_qs.filter(**extra_lookup_kwargs).order_by(model._meta.pk.name)
def do_update(backend, index, qs, start, end, total, verbosity=1):
# Get a clone of the QuerySet so that the cache doesn't bloat up
# in memory. Useful when reindexing large amounts of data.
small_cache_qs = qs.all()
current_qs = small_cache_qs[start:end]
if verbosity >= 2:
if os.getpid() == os.getppid():
print " indexed %s - %d of %d." % (start+1, end, total)
else:
print " indexed %s - %d of %d (by %s)." % (start+1, end, total, os.getpid())
# FIXME: Get the right backend.
backend.update(index, current_qs)
# Clear out the DB connections queries because it bloats up RAM.
reset_queries()
def do_remove(backend, index, model, pks_seen, start, upper_bound, verbosity=1):
# Fetch a list of results.
# Can't do pk range, because id's are strings (thanks comments
# & UUIDs!).
stuff_in_the_index = SearchQuerySet().models(model)[start:upper_bound]
# Iterate over those results.
for result in stuff_in_the_index:
# Be careful not to hit the DB.
if not smart_str(result.pk) in pks_seen:
# The id is NOT in the small_cache_qs, issue a delete.
if verbosity >= 2:
print " removing %s." % result.pk
backend.remove(".".join([result.app_label, result.model_name, str(result.pk)]))
class Command(LabelCommand):
help = "Freshens the index for the given app(s)."
base_options = (
make_option('-a', '--age', action='store', dest='age',
default=DEFAULT_AGE, type='int',
help='Number of hours back to consider objects new.'
),
make_option('-s', '--start', action='store', dest='start_date',
default=None, type='string',
help='The start date for indexing within. Can be any dateutil-parsable string, recommended to be YYYY-MM-DDTHH:MM:SS.'
),
make_option('-e', '--end', action='store', dest='end_date',
default=None, type='string',
help='The end date for indexing within. Can be any dateutil-parsable string, recommended to be YYYY-MM-DDTHH:MM:SS.'
),
make_option('-b', '--batch-size', action='store', dest='batchsize',
default=None, type='int',
help='Number of items to index at once.'
),
make_option('-r', '--remove', action='store_true', dest='remove',
default=False, help='Remove objects from the index that are no longer present in the database.'
),
make_option("-u", "--using", action="store", type="string", dest="using", default=DEFAULT_ALIAS,
help='If provided, chooses a connection to work with.'
),
make_option('-k', '--workers', action='store', dest='workers',
default=0, type='int',
help='Allows for the use multiple workers to parallelize indexing. Requires multiprocessing.'
),
)
option_list = LabelCommand.option_list + base_options
def handle(self, *items, **options):
self.verbosity = int(options.get('verbosity', 1))
self.batchsize = options.get('batchsize', DEFAULT_BATCH_SIZE)
self.start_date = None
self.end_date = None
self.remove = options.get('remove', False)
self.using = options.get('using')
self.workers = int(options.get('workers', 0))
self.backend = haystack_connections[self.using].get_backend()
age = options.get('age', DEFAULT_AGE)
start_date = options.get('start_date')
end_date = options.get('end_date')
if age is not None:
self.start_date = datetime.datetime.now() - datetime.timedelta(hours=int(age))
if start_date is not None:
try:
self.start_date = dateutil_parse(start_date)
except ValueError:
pass
if end_date is not None:
try:
self.end_date = dateutil_parse(end_date)
except ValueError:
pass
if not items:
from django.db.models import get_app
# Do all, in an INSTALLED_APPS sorted order.
items = []
for app in settings.INSTALLED_APPS:
try:
app_label = app.split('.')[-1]
loaded_app = get_app(app_label)
items.append(app_label)
except:
# No models, no problem.
pass
return super(Command, self).handle(*items, **options)
def is_app_or_model(self, label):
label_bits = label.split('.')
if len(label_bits) == 1:
return APP
elif len(label_bits) == 2:
return MODEL
else:
raise ImproperlyConfigured("'%s' isn't recognized as an app (<app_label>) or model (<app_label>.<model_name>)." % label)
def get_models(self, label):
from django.db.models import get_app, get_models, get_model
app_or_model = self.is_app_or_model(label)
if app_or_model == APP:
app_mod = get_app(label)
return get_models(app_mod)
else:
app_label, model_name = label.split('.')
return [get_model(app_label, model_name)]
def handle_label(self, label, **options):
from haystack.exceptions import NotHandled
unified_index = haystack_connections[self.using].get_unified_index()
if self.workers > 0:
import multiprocessing
for model in self.get_models(label):
try:
index = unified_index.get_index(model)
except NotHandled:
if self.verbosity >= 2:
print "Skipping '%s' - no index." % model
continue
qs = build_queryset(index, model, start_date=self.start_date, end_date=self.end_date, verbosity=self.verbosity)
total = qs.count()
if self.verbosity >= 1:
print "Indexing %d %s." % (total, smart_str(model._meta.verbose_name_plural))
pks_seen = set([smart_str(pk) for pk in qs.values_list('pk', flat=True)])
batch_size = self.batchsize or self.backend.batch_size
if self.workers > 0:
ghetto_queue = []
for start in range(0, total, batch_size):
end = min(start + batch_size, total)
if self.workers == 0:
do_update(self.backend, index, qs, start, end, total, self.verbosity)
else:
ghetto_queue.append(('do_update', model, start, end, total, self.using, self.start_date, self.end_date, self.verbosity))
if self.workers > 0:
pool = multiprocessing.Pool(self.workers)
pool.map(worker, ghetto_queue)
if self.remove:
if self.start_date or self.end_date or total <= 0:
# They're using a reduced set, which may not incorporate
# all pks. Rebuild the list with everything.
qs = index.index_queryset().values_list('pk', flat=True)
pks_seen = set([smart_str(pk) for pk in qs])
total = len(pks_seen)
if self.workers > 0:
ghetto_queue = []
for start in range(0, total, batch_size):
upper_bound = start + batch_size
if self.workers == 0:
do_remove(self.backend, index, model, pks_seen, start, upper_bound)
else:
ghetto_queue.append(('do_remove', model, pks_seen, start, upper_bound, self.using, self.verbosity))
if self.workers > 0:
pool = multiprocessing.Pool(self.workers)
pool.map(worker, ghetto_queue)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import tensorflow.compat.v2 as tf
import numpy as np
from official.vision.detection.utils.object_detection import box_list
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def keypoint_flip_horizontal(keypoints, flip_point, flip_permutation,
scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'FlipHorizontal'
with tf.name_scope(scope):
keypoints = tf.transpose(a=keypoints, perm=[1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(a=new_keypoints, perm=[1, 0, 2])
return new_keypoints
def keypoint_change_coordinate_frame(keypoints, window, scope=None):
"""Changes coordinate frame of the keypoints to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint
coordinates from keypoints of shape [num_instances, num_keypoints, 2]
to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
keypoints and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each groundtruth keypoint to be
relative to this new window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'ChangeCoordinateFrame'
with tf.name_scope(scope):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_keypoints = box_list_ops.scale(keypoints - [window[0], window[1]],
1.0 / win_height, 1.0 / win_width)
return new_keypoints
def keypoint_prune_outside_window(keypoints, window, scope=None):
"""Prunes keypoints that fall outside a given window.
This function replaces keypoints that fall outside the given window with nan.
See also clip_to_window which clips any keypoints that fall outside the given
window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window outside of which the op should prune the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'PruneOutsideWindow'
with tf.name_scope(scope):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
valid_indices = tf.logical_and(
tf.logical_and(y >= win_y_min, y <= win_y_max),
tf.logical_and(x >= win_x_min, x <= win_x_max))
new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))
new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))
new_keypoints = tf.concat([new_y, new_x], 2)
return new_keypoints
def random_horizontal_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
seed=None):
"""Randomly flips the image and detections horizontally.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
seed: random seed
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
with tf.name_scope('RandomHorizontalFlip'):
result = []
# random variable defining whether to do flip or not
do_a_flip_random = tf.greater(tf.random.uniform([], seed=seed), 0.5)
# flip image
image = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: _flip_image(image),
false_fn=lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: _flip_boxes_left_right(boxes),
false_fn=lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: _flip_masks_left_right(masks),
false_fn=lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),
false_fn=lambda: keypoints)
result.append(keypoints)
return tuple(result)
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
orig_min_dim = min(orig_height, orig_width)
# Calculates the larger of the possible sizes
large_scale_factor = min_dimension / float(orig_min_dim)
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = int(round(orig_height * large_scale_factor))
large_width = int(round(orig_width * large_scale_factor))
large_size = [large_height, large_width]
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = int(round(orig_height * small_scale_factor))
small_width = int(round(orig_width * small_scale_factor))
small_size = [small_height, small_width]
new_size = large_size
if max(large_size) > max_dimension:
new_size = small_size
else:
new_size = large_size
return tf.constant(new_size + [num_channels])
def _compute_new_dynamic_size(image, min_dimension, max_dimension):
"""Compute new dynamic shape for resize_to_range method."""
image_shape = tf.shape(input=image)
orig_height = tf.cast(image_shape[0], dtype=tf.float32)
orig_width = tf.cast(image_shape[1], dtype=tf.float32)
num_channels = image_shape[2]
orig_min_dim = tf.minimum(orig_height, orig_width)
# Calculates the larger of the possible sizes
min_dimension = tf.constant(min_dimension, dtype=tf.float32)
large_scale_factor = min_dimension / orig_min_dim
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = tf.cast(
tf.round(orig_height * large_scale_factor), dtype=tf.int32)
large_width = tf.cast(
tf.round(orig_width * large_scale_factor), dtype=tf.int32)
large_size = tf.stack([large_height, large_width])
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = tf.maximum(orig_height, orig_width)
max_dimension = tf.constant(max_dimension, dtype=tf.float32)
small_scale_factor = max_dimension / orig_max_dim
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = tf.cast(
tf.round(orig_height * small_scale_factor), dtype=tf.int32)
small_width = tf.cast(
tf.round(orig_width * small_scale_factor), dtype=tf.int32)
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
pred=tf.cast(tf.reduce_max(input_tensor=large_size), dtype=tf.float32) >
max_dimension,
true_fn=lambda: small_size,
false_fn=lambda: large_size)
else:
new_size = large_size
return tf.stack(tf.unstack(new_size) + [num_channels])
def resize_to_range(image,
masks=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False,
pad_to_max_dimension=False):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size
of the larger image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
pad_to_max_dimension: Whether to resize the image and pad it with zeros
so the resulting image is of the spatial size
[max_dimension, max_dimension]. If masks are included they are padded
similarly.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeToRange'):
if image.get_shape().is_fully_defined():
new_size = _compute_new_static_size(image, min_dimension, max_dimension)
else:
new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)
new_image = tf.image.resize(image, new_size[:-1], method=method)
if pad_to_max_dimension:
new_image = tf.image.pad_to_bounding_box(
new_image, 0, 0, max_dimension, max_dimension)
result = [new_image]
if masks is not None:
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize(
new_masks,
new_size[:-1],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
new_masks = tf.squeeze(new_masks, 3)
if pad_to_max_dimension:
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, max_dimension, max_dimension)
result.append(new_masks)
result.append(new_size)
return result
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def box_list_scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
if not scope:
scope = 'Scale'
with tf.name_scope(scope):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def keypoint_scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'Scale'
with tf.name_scope(scope):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form
[ymin, xmin, ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(input=image)[0]
image_width = tf.shape(input=image)[1]
scaled_boxes = box_list_scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
| |
from __future__ import absolute_import, unicode_literals
import json
import unittest
from mopidy.models import (
Album, Artist, ModelJSONEncoder, Playlist, Ref, SearchResult, TlTrack,
Track, model_json_decoder)
class GenericCopyTest(unittest.TestCase):
def compare(self, orig, other):
self.assertEqual(orig, other)
self.assertNotEqual(id(orig), id(other))
def test_copying_track(self):
track = Track()
self.compare(track, track.copy())
def test_copying_artist(self):
artist = Artist()
self.compare(artist, artist.copy())
def test_copying_album(self):
album = Album()
self.compare(album, album.copy())
def test_copying_playlist(self):
playlist = Playlist()
self.compare(playlist, playlist.copy())
def test_copying_track_with_basic_values(self):
track = Track(name='foo', uri='bar')
copy = track.copy(name='baz')
self.assertEqual('baz', copy.name)
self.assertEqual('bar', copy.uri)
def test_copying_track_with_missing_values(self):
track = Track(uri='bar')
copy = track.copy(name='baz')
self.assertEqual('baz', copy.name)
self.assertEqual('bar', copy.uri)
def test_copying_track_with_private_internal_value(self):
artist1 = Artist(name='foo')
artist2 = Artist(name='bar')
track = Track(artists=[artist1])
copy = track.copy(artists=[artist2])
self.assertIn(artist2, copy.artists)
def test_copying_track_with_invalid_key(self):
with self.assertRaises(TypeError):
Track().copy(invalid_key=True)
def test_copying_track_to_remove(self):
track = Track(name='foo').copy(name=None)
self.assertEquals(track.__dict__, Track().__dict__)
class RefTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
ref = Ref(uri=uri)
self.assertEqual(ref.uri, uri)
with self.assertRaises(AttributeError):
ref.uri = None
def test_name(self):
name = 'a name'
ref = Ref(name=name)
self.assertEqual(ref.name, name)
with self.assertRaises(AttributeError):
ref.name = None
def test_invalid_kwarg(self):
with self.assertRaises(TypeError):
SearchResult(foo='baz')
def test_repr_without_results(self):
self.assertEquals(
"Ref(name=u'foo', type=u'artist', uri=u'uri')",
repr(Ref(uri='uri', name='foo', type='artist')))
def test_serialize_without_results(self):
self.assertDictEqual(
{'__model__': 'Ref', 'uri': 'uri'},
Ref(uri='uri').serialize())
def test_to_json_and_back(self):
ref1 = Ref(uri='uri')
serialized = json.dumps(ref1, cls=ModelJSONEncoder)
ref2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(ref1, ref2)
def test_type_constants(self):
self.assertEqual(Ref.ALBUM, 'album')
self.assertEqual(Ref.ARTIST, 'artist')
self.assertEqual(Ref.DIRECTORY, 'directory')
self.assertEqual(Ref.PLAYLIST, 'playlist')
self.assertEqual(Ref.TRACK, 'track')
def test_album_constructor(self):
ref = Ref.album(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.ALBUM)
def test_artist_constructor(self):
ref = Ref.artist(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.ARTIST)
def test_directory_constructor(self):
ref = Ref.directory(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.DIRECTORY)
def test_playlist_constructor(self):
ref = Ref.playlist(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.PLAYLIST)
def test_track_constructor(self):
ref = Ref.track(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.TRACK)
class ArtistTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
artist = Artist(uri=uri)
self.assertEqual(artist.uri, uri)
with self.assertRaises(AttributeError):
artist.uri = None
def test_name(self):
name = 'a name'
artist = Artist(name=name)
self.assertEqual(artist.name, name)
with self.assertRaises(AttributeError):
artist.name = None
def test_musicbrainz_id(self):
mb_id = 'mb-id'
artist = Artist(musicbrainz_id=mb_id)
self.assertEqual(artist.musicbrainz_id, mb_id)
with self.assertRaises(AttributeError):
artist.musicbrainz_id = None
def test_invalid_kwarg(self):
with self.assertRaises(TypeError):
Artist(foo='baz')
def test_invalid_kwarg_with_name_matching_method(self):
with self.assertRaises(TypeError):
Artist(copy='baz')
with self.assertRaises(TypeError):
Artist(serialize='baz')
def test_repr(self):
self.assertEquals(
"Artist(name=u'name', uri=u'uri')",
repr(Artist(uri='uri', name='name')))
def test_serialize(self):
self.assertDictEqual(
{'__model__': 'Artist', 'uri': 'uri', 'name': 'name'},
Artist(uri='uri', name='name').serialize())
def test_serialize_falsy_values(self):
self.assertDictEqual(
{'__model__': 'Artist', 'uri': '', 'name': ''},
Artist(uri='', name='').serialize())
def test_to_json_and_back(self):
artist1 = Artist(uri='uri', name='name')
serialized = json.dumps(artist1, cls=ModelJSONEncoder)
artist2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(artist1, artist2)
def test_to_json_and_back_with_unknown_field(self):
artist = Artist(uri='uri', name='name').serialize()
artist['foo'] = 'foo'
serialized = json.dumps(artist)
with self.assertRaises(TypeError):
json.loads(serialized, object_hook=model_json_decoder)
def test_to_json_and_back_with_field_matching_method(self):
artist = Artist(uri='uri', name='name').serialize()
artist['copy'] = 'foo'
serialized = json.dumps(artist)
with self.assertRaises(TypeError):
json.loads(serialized, object_hook=model_json_decoder)
def test_to_json_and_back_with_field_matching_internal_field(self):
artist = Artist(uri='uri', name='name').serialize()
artist['__mro__'] = 'foo'
serialized = json.dumps(artist)
with self.assertRaises(TypeError):
json.loads(serialized, object_hook=model_json_decoder)
def test_eq_name(self):
artist1 = Artist(name='name')
artist2 = Artist(name='name')
self.assertEqual(artist1, artist2)
self.assertEqual(hash(artist1), hash(artist2))
def test_eq_uri(self):
artist1 = Artist(uri='uri')
artist2 = Artist(uri='uri')
self.assertEqual(artist1, artist2)
self.assertEqual(hash(artist1), hash(artist2))
def test_eq_musibrainz_id(self):
artist1 = Artist(musicbrainz_id='id')
artist2 = Artist(musicbrainz_id='id')
self.assertEqual(artist1, artist2)
self.assertEqual(hash(artist1), hash(artist2))
def test_eq(self):
artist1 = Artist(uri='uri', name='name', musicbrainz_id='id')
artist2 = Artist(uri='uri', name='name', musicbrainz_id='id')
self.assertEqual(artist1, artist2)
self.assertEqual(hash(artist1), hash(artist2))
def test_eq_none(self):
self.assertNotEqual(Artist(), None)
def test_eq_other(self):
self.assertNotEqual(Artist(), 'other')
def test_ne_name(self):
artist1 = Artist(name='name1')
artist2 = Artist(name='name2')
self.assertNotEqual(artist1, artist2)
self.assertNotEqual(hash(artist1), hash(artist2))
def test_ne_uri(self):
artist1 = Artist(uri='uri1')
artist2 = Artist(uri='uri2')
self.assertNotEqual(artist1, artist2)
self.assertNotEqual(hash(artist1), hash(artist2))
def test_ne_musicbrainz_id(self):
artist1 = Artist(musicbrainz_id='id1')
artist2 = Artist(musicbrainz_id='id2')
self.assertNotEqual(artist1, artist2)
self.assertNotEqual(hash(artist1), hash(artist2))
def test_ne(self):
artist1 = Artist(uri='uri1', name='name1', musicbrainz_id='id1')
artist2 = Artist(uri='uri2', name='name2', musicbrainz_id='id2')
self.assertNotEqual(artist1, artist2)
self.assertNotEqual(hash(artist1), hash(artist2))
class AlbumTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
album = Album(uri=uri)
self.assertEqual(album.uri, uri)
with self.assertRaises(AttributeError):
album.uri = None
def test_name(self):
name = 'a name'
album = Album(name=name)
self.assertEqual(album.name, name)
with self.assertRaises(AttributeError):
album.name = None
def test_artists(self):
artist = Artist()
album = Album(artists=[artist])
self.assertIn(artist, album.artists)
with self.assertRaises(AttributeError):
album.artists = None
def test_artists_none(self):
self.assertEqual(set(), Album(artists=None).artists)
def test_num_tracks(self):
num_tracks = 11
album = Album(num_tracks=num_tracks)
self.assertEqual(album.num_tracks, num_tracks)
with self.assertRaises(AttributeError):
album.num_tracks = None
def test_num_discs(self):
num_discs = 2
album = Album(num_discs=num_discs)
self.assertEqual(album.num_discs, num_discs)
with self.assertRaises(AttributeError):
album.num_discs = None
def test_date(self):
date = '1977-01-01'
album = Album(date=date)
self.assertEqual(album.date, date)
with self.assertRaises(AttributeError):
album.date = None
def test_musicbrainz_id(self):
mb_id = 'mb-id'
album = Album(musicbrainz_id=mb_id)
self.assertEqual(album.musicbrainz_id, mb_id)
with self.assertRaises(AttributeError):
album.musicbrainz_id = None
def test_images(self):
image = 'data:foobar'
album = Album(images=[image])
self.assertIn(image, album.images)
with self.assertRaises(AttributeError):
album.images = None
def test_images_none(self):
self.assertEqual(set(), Album(images=None).images)
def test_invalid_kwarg(self):
with self.assertRaises(TypeError):
Album(foo='baz')
def test_repr_without_artists(self):
self.assertEquals(
"Album(name=u'name', uri=u'uri')",
repr(Album(uri='uri', name='name')))
def test_repr_with_artists(self):
self.assertEquals(
"Album(artists=[Artist(name=u'foo')], name=u'name', uri=u'uri')",
repr(Album(uri='uri', name='name', artists=[Artist(name='foo')])))
def test_serialize_without_artists(self):
self.assertDictEqual(
{'__model__': 'Album', 'uri': 'uri', 'name': 'name'},
Album(uri='uri', name='name').serialize())
def test_serialize_with_artists(self):
artist = Artist(name='foo')
self.assertDictEqual(
{'__model__': 'Album', 'uri': 'uri', 'name': 'name',
'artists': [artist.serialize()]},
Album(uri='uri', name='name', artists=[artist]).serialize())
def test_serialize_with_images(self):
image = 'data:foobar'
self.assertDictEqual(
{'__model__': 'Album', 'uri': 'uri', 'name': 'name',
'images': [image]},
Album(uri='uri', name='name', images=[image]).serialize())
def test_to_json_and_back(self):
album1 = Album(uri='uri', name='name', artists=[Artist(name='foo')])
serialized = json.dumps(album1, cls=ModelJSONEncoder)
album2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(album1, album2)
def test_eq_name(self):
album1 = Album(name='name')
album2 = Album(name='name')
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_uri(self):
album1 = Album(uri='uri')
album2 = Album(uri='uri')
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_artists(self):
artists = [Artist()]
album1 = Album(artists=artists)
album2 = Album(artists=artists)
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_artists_order(self):
artist1 = Artist(name='name1')
artist2 = Artist(name='name2')
album1 = Album(artists=[artist1, artist2])
album2 = Album(artists=[artist2, artist1])
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_num_tracks(self):
album1 = Album(num_tracks=2)
album2 = Album(num_tracks=2)
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_date(self):
date = '1977-01-01'
album1 = Album(date=date)
album2 = Album(date=date)
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_musibrainz_id(self):
album1 = Album(musicbrainz_id='id')
album2 = Album(musicbrainz_id='id')
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq(self):
artists = [Artist()]
album1 = Album(
name='name', uri='uri', artists=artists, num_tracks=2,
musicbrainz_id='id')
album2 = Album(
name='name', uri='uri', artists=artists, num_tracks=2,
musicbrainz_id='id')
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_none(self):
self.assertNotEqual(Album(), None)
def test_eq_other(self):
self.assertNotEqual(Album(), 'other')
def test_ne_name(self):
album1 = Album(name='name1')
album2 = Album(name='name2')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_uri(self):
album1 = Album(uri='uri1')
album2 = Album(uri='uri2')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_artists(self):
album1 = Album(artists=[Artist(name='name1')])
album2 = Album(artists=[Artist(name='name2')])
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_num_tracks(self):
album1 = Album(num_tracks=1)
album2 = Album(num_tracks=2)
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_date(self):
album1 = Album(date='1977-01-01')
album2 = Album(date='1977-01-02')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_musicbrainz_id(self):
album1 = Album(musicbrainz_id='id1')
album2 = Album(musicbrainz_id='id2')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne(self):
album1 = Album(
name='name1', uri='uri1', artists=[Artist(name='name1')],
num_tracks=1, musicbrainz_id='id1')
album2 = Album(
name='name2', uri='uri2', artists=[Artist(name='name2')],
num_tracks=2, musicbrainz_id='id2')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
class TrackTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
track = Track(uri=uri)
self.assertEqual(track.uri, uri)
with self.assertRaises(AttributeError):
track.uri = None
def test_name(self):
name = 'a name'
track = Track(name=name)
self.assertEqual(track.name, name)
with self.assertRaises(AttributeError):
track.name = None
def test_artists(self):
artists = [Artist(name='name1'), Artist(name='name2')]
track = Track(artists=artists)
self.assertEqual(set(track.artists), set(artists))
with self.assertRaises(AttributeError):
track.artists = None
def test_artists_none(self):
self.assertEqual(set(), Track(artists=None).artists)
def test_composers(self):
artists = [Artist(name='name1'), Artist(name='name2')]
track = Track(composers=artists)
self.assertEqual(set(track.composers), set(artists))
with self.assertRaises(AttributeError):
track.composers = None
def test_composers_none(self):
self.assertEqual(set(), Track(composers=None).composers)
def test_performers(self):
artists = [Artist(name='name1'), Artist(name='name2')]
track = Track(performers=artists)
self.assertEqual(set(track.performers), set(artists))
with self.assertRaises(AttributeError):
track.performers = None
def test_performers_none(self):
self.assertEqual(set(), Track(performers=None).performers)
def test_album(self):
album = Album()
track = Track(album=album)
self.assertEqual(track.album, album)
with self.assertRaises(AttributeError):
track.album = None
def test_track_no(self):
track_no = 7
track = Track(track_no=track_no)
self.assertEqual(track.track_no, track_no)
with self.assertRaises(AttributeError):
track.track_no = None
def test_disc_no(self):
disc_no = 2
track = Track(disc_no=disc_no)
self.assertEqual(track.disc_no, disc_no)
with self.assertRaises(AttributeError):
track.disc_no = None
def test_date(self):
date = '1977-01-01'
track = Track(date=date)
self.assertEqual(track.date, date)
with self.assertRaises(AttributeError):
track.date = None
def test_length(self):
length = 137000
track = Track(length=length)
self.assertEqual(track.length, length)
with self.assertRaises(AttributeError):
track.length = None
def test_bitrate(self):
bitrate = 160
track = Track(bitrate=bitrate)
self.assertEqual(track.bitrate, bitrate)
with self.assertRaises(AttributeError):
track.bitrate = None
def test_musicbrainz_id(self):
mb_id = 'mb-id'
track = Track(musicbrainz_id=mb_id)
self.assertEqual(track.musicbrainz_id, mb_id)
with self.assertRaises(AttributeError):
track.musicbrainz_id = None
def test_invalid_kwarg(self):
with self.assertRaises(TypeError):
Track(foo='baz')
def test_repr_without_artists(self):
self.assertEquals(
"Track(name=u'name', uri=u'uri')",
repr(Track(uri='uri', name='name')))
def test_repr_with_artists(self):
self.assertEquals(
"Track(artists=[Artist(name=u'foo')], name=u'name', uri=u'uri')",
repr(Track(uri='uri', name='name', artists=[Artist(name='foo')])))
def test_serialize_without_artists(self):
self.assertDictEqual(
{'__model__': 'Track', 'uri': 'uri', 'name': 'name'},
Track(uri='uri', name='name').serialize())
def test_serialize_with_artists(self):
artist = Artist(name='foo')
self.assertDictEqual(
{'__model__': 'Track', 'uri': 'uri', 'name': 'name',
'artists': [artist.serialize()]},
Track(uri='uri', name='name', artists=[artist]).serialize())
def test_serialize_with_album(self):
album = Album(name='foo')
self.assertDictEqual(
{'__model__': 'Track', 'uri': 'uri', 'name': 'name',
'album': album.serialize()},
Track(uri='uri', name='name', album=album).serialize())
def test_to_json_and_back(self):
track1 = Track(
uri='uri', name='name', album=Album(name='foo'),
artists=[Artist(name='foo')])
serialized = json.dumps(track1, cls=ModelJSONEncoder)
track2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(track1, track2)
def test_eq_uri(self):
track1 = Track(uri='uri1')
track2 = Track(uri='uri1')
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_name(self):
track1 = Track(name='name1')
track2 = Track(name='name1')
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_artists(self):
artists = [Artist()]
track1 = Track(artists=artists)
track2 = Track(artists=artists)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_artists_order(self):
artist1 = Artist(name='name1')
artist2 = Artist(name='name2')
track1 = Track(artists=[artist1, artist2])
track2 = Track(artists=[artist2, artist1])
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_album(self):
album = Album()
track1 = Track(album=album)
track2 = Track(album=album)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_track_no(self):
track1 = Track(track_no=1)
track2 = Track(track_no=1)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_date(self):
date = '1977-01-01'
track1 = Track(date=date)
track2 = Track(date=date)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_length(self):
track1 = Track(length=100)
track2 = Track(length=100)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_bitrate(self):
track1 = Track(bitrate=100)
track2 = Track(bitrate=100)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_musibrainz_id(self):
track1 = Track(musicbrainz_id='id')
track2 = Track(musicbrainz_id='id')
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq(self):
date = '1977-01-01'
artists = [Artist()]
album = Album()
track1 = Track(
uri='uri', name='name', artists=artists, album=album, track_no=1,
date=date, length=100, bitrate=100, musicbrainz_id='id')
track2 = Track(
uri='uri', name='name', artists=artists, album=album, track_no=1,
date=date, length=100, bitrate=100, musicbrainz_id='id')
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_none(self):
self.assertNotEqual(Track(), None)
def test_eq_other(self):
self.assertNotEqual(Track(), 'other')
def test_ne_uri(self):
track1 = Track(uri='uri1')
track2 = Track(uri='uri2')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_name(self):
track1 = Track(name='name1')
track2 = Track(name='name2')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_artists(self):
track1 = Track(artists=[Artist(name='name1')])
track2 = Track(artists=[Artist(name='name2')])
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_album(self):
track1 = Track(album=Album(name='name1'))
track2 = Track(album=Album(name='name2'))
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_track_no(self):
track1 = Track(track_no=1)
track2 = Track(track_no=2)
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_date(self):
track1 = Track(date='1977-01-01')
track2 = Track(date='1977-01-02')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_length(self):
track1 = Track(length=100)
track2 = Track(length=200)
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_bitrate(self):
track1 = Track(bitrate=100)
track2 = Track(bitrate=200)
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_musicbrainz_id(self):
track1 = Track(musicbrainz_id='id1')
track2 = Track(musicbrainz_id='id2')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne(self):
track1 = Track(
uri='uri1', name='name1', artists=[Artist(name='name1')],
album=Album(name='name1'), track_no=1, date='1977-01-01',
length=100, bitrate=100, musicbrainz_id='id1')
track2 = Track(
uri='uri2', name='name2', artists=[Artist(name='name2')],
album=Album(name='name2'), track_no=2, date='1977-01-02',
length=200, bitrate=200, musicbrainz_id='id2')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ignores_values_with_default_value_none(self):
track1 = Track(name='name1')
track2 = Track(name='name1', album=None)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_copy_can_reset_to_default_value(self):
track1 = Track(name='name1')
track2 = Track(name='name1', album=Album()).copy(album=None)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
class TlTrackTest(unittest.TestCase):
def test_tlid(self):
tlid = 123
tl_track = TlTrack(tlid=tlid)
self.assertEqual(tl_track.tlid, tlid)
with self.assertRaises(AttributeError):
tl_track.tlid = None
def test_track(self):
track = Track()
tl_track = TlTrack(track=track)
self.assertEqual(tl_track.track, track)
with self.assertRaises(AttributeError):
tl_track.track = None
def test_invalid_kwarg(self):
with self.assertRaises(TypeError):
TlTrack(foo='baz')
def test_positional_args(self):
tlid = 123
track = Track()
tl_track = TlTrack(tlid, track)
self.assertEqual(tl_track.tlid, tlid)
self.assertEqual(tl_track.track, track)
def test_iteration(self):
tlid = 123
track = Track()
tl_track = TlTrack(tlid, track)
(tlid2, track2) = tl_track
self.assertEqual(tlid2, tlid)
self.assertEqual(track2, track)
def test_repr(self):
self.assertEquals(
"TlTrack(tlid=123, track=Track(uri=u'uri'))",
repr(TlTrack(tlid=123, track=Track(uri='uri'))))
def test_serialize(self):
track = Track(uri='uri', name='name')
self.assertDictEqual(
{'__model__': 'TlTrack', 'tlid': 123, 'track': track.serialize()},
TlTrack(tlid=123, track=track).serialize())
def test_to_json_and_back(self):
tl_track1 = TlTrack(tlid=123, track=Track(uri='uri', name='name'))
serialized = json.dumps(tl_track1, cls=ModelJSONEncoder)
tl_track2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(tl_track1, tl_track2)
def test_eq(self):
tlid = 123
track = Track()
tl_track1 = TlTrack(tlid=tlid, track=track)
tl_track2 = TlTrack(tlid=tlid, track=track)
self.assertEqual(tl_track1, tl_track2)
self.assertEqual(hash(tl_track1), hash(tl_track2))
def test_eq_none(self):
self.assertNotEqual(TlTrack(), None)
def test_eq_other(self):
self.assertNotEqual(TlTrack(), 'other')
def test_ne_tlid(self):
tl_track1 = TlTrack(tlid=123)
tl_track2 = TlTrack(tlid=321)
self.assertNotEqual(tl_track1, tl_track2)
self.assertNotEqual(hash(tl_track1), hash(tl_track2))
def test_ne_track(self):
tl_track1 = TlTrack(track=Track(uri='a'))
tl_track2 = TlTrack(track=Track(uri='b'))
self.assertNotEqual(tl_track1, tl_track2)
self.assertNotEqual(hash(tl_track1), hash(tl_track2))
class PlaylistTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
playlist = Playlist(uri=uri)
self.assertEqual(playlist.uri, uri)
with self.assertRaises(AttributeError):
playlist.uri = None
def test_name(self):
name = 'a name'
playlist = Playlist(name=name)
self.assertEqual(playlist.name, name)
with self.assertRaises(AttributeError):
playlist.name = None
def test_tracks(self):
tracks = [Track(), Track(), Track()]
playlist = Playlist(tracks=tracks)
self.assertEqual(list(playlist.tracks), tracks)
with self.assertRaises(AttributeError):
playlist.tracks = None
def test_length(self):
tracks = [Track(), Track(), Track()]
playlist = Playlist(tracks=tracks)
self.assertEqual(playlist.length, 3)
def test_last_modified(self):
last_modified = 1390942873000
playlist = Playlist(last_modified=last_modified)
self.assertEqual(playlist.last_modified, last_modified)
with self.assertRaises(AttributeError):
playlist.last_modified = None
def test_with_new_uri(self):
tracks = [Track()]
last_modified = 1390942873000
playlist = Playlist(
uri='an uri', name='a name', tracks=tracks,
last_modified=last_modified)
new_playlist = playlist.copy(uri='another uri')
self.assertEqual(new_playlist.uri, 'another uri')
self.assertEqual(new_playlist.name, 'a name')
self.assertEqual(list(new_playlist.tracks), tracks)
self.assertEqual(new_playlist.last_modified, last_modified)
def test_with_new_name(self):
tracks = [Track()]
last_modified = 1390942873000
playlist = Playlist(
uri='an uri', name='a name', tracks=tracks,
last_modified=last_modified)
new_playlist = playlist.copy(name='another name')
self.assertEqual(new_playlist.uri, 'an uri')
self.assertEqual(new_playlist.name, 'another name')
self.assertEqual(list(new_playlist.tracks), tracks)
self.assertEqual(new_playlist.last_modified, last_modified)
def test_with_new_tracks(self):
tracks = [Track()]
last_modified = 1390942873000
playlist = Playlist(
uri='an uri', name='a name', tracks=tracks,
last_modified=last_modified)
new_tracks = [Track(), Track()]
new_playlist = playlist.copy(tracks=new_tracks)
self.assertEqual(new_playlist.uri, 'an uri')
self.assertEqual(new_playlist.name, 'a name')
self.assertEqual(list(new_playlist.tracks), new_tracks)
self.assertEqual(new_playlist.last_modified, last_modified)
def test_with_new_last_modified(self):
tracks = [Track()]
last_modified = 1390942873000
new_last_modified = last_modified + 1000
playlist = Playlist(
uri='an uri', name='a name', tracks=tracks,
last_modified=last_modified)
new_playlist = playlist.copy(last_modified=new_last_modified)
self.assertEqual(new_playlist.uri, 'an uri')
self.assertEqual(new_playlist.name, 'a name')
self.assertEqual(list(new_playlist.tracks), tracks)
self.assertEqual(new_playlist.last_modified, new_last_modified)
def test_invalid_kwarg(self):
with self.assertRaises(TypeError):
Playlist(foo='baz')
def test_repr_without_tracks(self):
self.assertEquals(
"Playlist(name=u'name', uri=u'uri')",
repr(Playlist(uri='uri', name='name')))
def test_repr_with_tracks(self):
self.assertEquals(
"Playlist(name=u'name', tracks=[Track(name=u'foo')], uri=u'uri')",
repr(Playlist(uri='uri', name='name', tracks=[Track(name='foo')])))
def test_serialize_without_tracks(self):
self.assertDictEqual(
{'__model__': 'Playlist', 'uri': 'uri', 'name': 'name'},
Playlist(uri='uri', name='name').serialize())
def test_serialize_with_tracks(self):
track = Track(name='foo')
self.assertDictEqual(
{'__model__': 'Playlist', 'uri': 'uri', 'name': 'name',
'tracks': [track.serialize()]},
Playlist(uri='uri', name='name', tracks=[track]).serialize())
def test_to_json_and_back(self):
playlist1 = Playlist(uri='uri', name='name')
serialized = json.dumps(playlist1, cls=ModelJSONEncoder)
playlist2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(playlist1, playlist2)
def test_eq_name(self):
playlist1 = Playlist(name='name')
playlist2 = Playlist(name='name')
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq_uri(self):
playlist1 = Playlist(uri='uri')
playlist2 = Playlist(uri='uri')
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq_tracks(self):
tracks = [Track()]
playlist1 = Playlist(tracks=tracks)
playlist2 = Playlist(tracks=tracks)
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq_last_modified(self):
playlist1 = Playlist(last_modified=1)
playlist2 = Playlist(last_modified=1)
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq(self):
tracks = [Track()]
playlist1 = Playlist(
uri='uri', name='name', tracks=tracks, last_modified=1)
playlist2 = Playlist(
uri='uri', name='name', tracks=tracks, last_modified=1)
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq_none(self):
self.assertNotEqual(Playlist(), None)
def test_eq_other(self):
self.assertNotEqual(Playlist(), 'other')
def test_ne_name(self):
playlist1 = Playlist(name='name1')
playlist2 = Playlist(name='name2')
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
def test_ne_uri(self):
playlist1 = Playlist(uri='uri1')
playlist2 = Playlist(uri='uri2')
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
def test_ne_tracks(self):
playlist1 = Playlist(tracks=[Track(uri='uri1')])
playlist2 = Playlist(tracks=[Track(uri='uri2')])
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
def test_ne_last_modified(self):
playlist1 = Playlist(last_modified=1)
playlist2 = Playlist(last_modified=2)
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
def test_ne(self):
playlist1 = Playlist(
uri='uri1', name='name1', tracks=[Track(uri='uri1')],
last_modified=1)
playlist2 = Playlist(
uri='uri2', name='name2', tracks=[Track(uri='uri2')],
last_modified=2)
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
class SearchResultTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
result = SearchResult(uri=uri)
self.assertEqual(result.uri, uri)
with self.assertRaises(AttributeError):
result.uri = None
def test_tracks(self):
tracks = [Track(), Track(), Track()]
result = SearchResult(tracks=tracks)
self.assertEqual(list(result.tracks), tracks)
with self.assertRaises(AttributeError):
result.tracks = None
def test_artists(self):
artists = [Artist(), Artist(), Artist()]
result = SearchResult(artists=artists)
self.assertEqual(list(result.artists), artists)
with self.assertRaises(AttributeError):
result.artists = None
def test_albums(self):
albums = [Album(), Album(), Album()]
result = SearchResult(albums=albums)
self.assertEqual(list(result.albums), albums)
with self.assertRaises(AttributeError):
result.albums = None
def test_invalid_kwarg(self):
with self.assertRaises(TypeError):
SearchResult(foo='baz')
def test_repr_without_results(self):
self.assertEquals(
"SearchResult(uri=u'uri')",
repr(SearchResult(uri='uri')))
def test_serialize_without_results(self):
self.assertDictEqual(
{'__model__': 'SearchResult', 'uri': 'uri'},
SearchResult(uri='uri').serialize())
def test_to_json_and_back(self):
result1 = SearchResult(uri='uri')
serialized = json.dumps(result1, cls=ModelJSONEncoder)
result2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(result1, result2)
| |
import re
from domain.analysis import Team, Player, PlayerPointsLine
"""
Represent a fantasy football season.
Contains all the games played during the season, all the teams involved,
and all the players who spent any time on the roster.
"""
class Season:
def __init__(self, year):
self.year = year
self.games = []
self.teams = []
self.players = []
"""
Add a game to the list of games played this season.
"""
def addGame(self, game):
self.games.append(game)
"""
Add a team to the season. To make sure there are no duplicates, first
checks if the team exists; this returns either the newly created team
or the previously existing team.
"""
def addTeam(self, teamName):
team = self.getTeamByName(teamName)
if not team:
team = Team(teamName)
self.teams.append(team)
return team
"""
Get a team that played this season, by the team's name.
"""
def getTeamByName(self, teamName):
for team in self.teams:
if team.name == teamName:
return team
else:
return None
"""
Add a player to the season roster. To make sure there are no duplicates,
first checks if the player exists; this returns either the newly created player
or the previously existing player.
"""
def addPlayer(self, playerId, playerName):
player = self.getPlayerById(playerId)
if not player:
player = Player(playerId, playerName)
self.players.append(player)
return player
"""
Get a player who played this season, by his player id.
"""
def getPlayerById(self, playerId):
for player in self.players:
if player.playerId == playerId:
return player
else:
return None
"""
Analyze all the games played this season, and calculate the actual and optimum
points for and against each team.
Also calculate the players who scored above their average against each team.
"""
def analyzeGames(self):
for game in self.games:
# add teams from this game
[ awayTeamName, homeTeamName ] = game.teams.keys()
awayTeam = self.addTeam(awayTeamName)
homeTeam = self.addTeam(homeTeamName)
# get the team scores
awayTeamScore = game.teams[awayTeamName]
homeTeamScore = game.teams[homeTeamName]
# record actual points
awayTeam.actualPointsFor += awayTeamScore.actualPoints
awayTeam.actualPointsAgainst += homeTeamScore.actualPoints
homeTeam.actualPointsFor += homeTeamScore.actualPoints
homeTeam.actualPointsAgainst += awayTeamScore.actualPoints
# record optimum points
awayTeam.optimumPointsFor += awayTeamScore.optimumPoints
awayTeam.optimumPointsAgainst += homeTeamScore.optimumPoints
homeTeam.optimumPointsFor += homeTeamScore.optimumPoints
homeTeam.optimumPointsAgainst += awayTeamScore.optimumPoints
# get the players who scored above average in this game for each team
for playerScore in awayTeamScore.players:
player = self.getPlayerById(playerScore.playerId)
line = player.getAboveAverageWeeklyPointsLine(playerScore.week)
if line:
homeTeam.addAboveAverageOpposingPlayerPointsLine(line)
for playerScore in homeTeamScore.players:
player = self.getPlayerById(playerScore.playerId)
line = player.getAboveAverageWeeklyPointsLine(playerScore.week)
if line:
awayTeam.addAboveAverageOpposingPlayerPointsLine(line)
"""
Analyze all the teams in the league for all the weeks that have been
parsed this season. Calculate their actual and optimum record.
"""
def analyzeTeams(self):
for game in self.games:
[ awayTeamName, homeTeamName ] = game.teams.keys()
awayTeam = self.addTeam(awayTeamName)
homeTeam = self.addTeam(homeTeamName)
# get the actual winner of this game
if game.actualWinner == awayTeam.name:
awayTeam.actualWins += 1
homeTeam.actualLosses += 1
elif game.actualWinner == homeTeam.name:
awayTeam.actualLosses += 1
homeTeam.actualWins += 1
else:
awayTeam.actualTies += 1
homeTeam.actualTies += 1
# get the optimum winner of this game
if game.optimumWinner == awayTeam.name:
awayTeam.optimumWins += 1
homeTeam.optimumLosses += 1
elif game.optimumWinner == homeTeam.name:
awayTeam.optimumLosses += 1
homeTeam.optimumWins += 1
else:
awayTeam.optimumTies += 1
homeTeam.optimumTies += 1
"""
Analyze all the players who were on a roster this season, and their scores,
calculating averages, etc.
"""
def analyzePlayers(self):
for game in self.games:
for teamScoreLine in game.teams.values():
for playerLine in teamScoreLine.players:
player = self.addPlayer(playerLine.playerId, playerLine.name)
player.addScoreLine(playerLine)
for player in self.players:
player.analyzeScores()
"""
Calculate which players scored above average for your bench, and below averge in your starting lineup.
"""
def analyzeTeamPlayers(self):
for game in self.games:
[ awayTeamName, homeTeamName] = game.teams.keys()
awayTeam = self.addTeam(awayTeamName)
homeTeam = self.addTeam(homeTeamName)
awayTeamScoreLine = game.teams[awayTeamName]
homeTeamScoreLine = game.teams[homeTeamName]
for playerScoreLine in awayTeamScoreLine.players:
player = self.getPlayerById(playerScoreLine.playerId)
pointsLine = PlayerPointsLine(player, playerScoreLine)
if pointsLine.isHighScoringBenchPlayer():
awayTeam.addHighScoringBenchPlayerPointsLine(pointsLine)
elif pointsLine.isLowScoringStarter():
awayTeam.addLowScoringStarterPlayerPointsLine(pointsLine)
for playerScoreLine in homeTeamScoreLine.players:
player = self.getPlayerById(playerScoreLine.playerId)
pointsLine = PlayerPointsLine(player, playerScoreLine)
if pointsLine.isHighScoringBenchPlayer():
homeTeam.addHighScoringBenchPlayerPointsLine(pointsLine)
elif pointsLine.isLowScoringStarter():
homeTeam.addLowScoringStarterPlayerPointsLine(pointsLine)
"""
Analyze all the players, games, and teams for this season.
"""
def analyze(self):
self.analyzePlayers()
self.analyzeGames()
self.analyzeTeams()
self.analyzeTeamPlayers()
"""
Print the summary of points scored by each team, both actual and optimal.
"""
def printTeamPointsSummary(self):
self.teams.sort(Team.sortByOptimumPointsForDescending)
for team in self.teams:
print "%s: APF: %d; APA: %d; OPF: %d; OPA: %d; dPF: %d; dPA: %d" % (team.name, team.actualPointsFor, team.actualPointsAgainst, team.optimumPointsFor, team.optimumPointsAgainst, team.optimumPointsFor - team.actualPointsFor, team.optimumPointsAgainst - team.actualPointsAgainst)
"""
Print the summary of each team's record, both actual and optimal.
"""
def printTeamRecordSummary(self):
self.teams.sort(Team.sortByOptimumWinsDescending)
for team in self.teams:
print "%s: actual record: %d-%d-%d; optimum record: %d-%d-%d" % (team.name, team.actualWins, team.actualLosses, team.actualTies, team.optimumWins, team.optimumLosses, team.optimumTies)
"""
Print a summary of the players who scored significantly above average for each team.
Optionally display WHICH players scored above average, and how much above average they were.
"""
def printTeamAboveAverageOpposingPlayersSummary(self, showIndividualPlayers=False):
for team in self.teams:
print "%s: # opposing players above average: %d; total above average: %d" % (team.name, len(team.aboveAverageOpposingPlayerPointsLines), team.getTotalOpposingPlayersPointsAboveAverage())
if showIndividualPlayers:
for line in team.aboveAverageOpposingPlayerPointsLines:
if line.weekPoints - line.averagePoints > 10:
print "%s: points: %d; above average: %d" % (line.name, line.weekPoints, line.weekPoints - line.averagePoints)
"""
Print a summary of each player's scores.
"""
def printPlayerScoreSummary(self):
for player in self.players:
print "%s: total points: %d; average points: %f" % (player.name, player.totalPoints, player.averagePoints)
"""
Print a summary of the players on each team that scored well on the bench.
"""
def printHighScoringBenchPlayersSummary(self):
for team in self.teams:
print team.name
for playerPointsLine in team.highScoringBenchPlayers:
print "%s, week %d: %d" % (playerPointsLine.name, playerPointsLine.week, playerPointsLine.weekPoints)
"""
Print a summary of the players on each team that scored badly while starting.
"""
def printLowScoringStartersSummary(self):
for team in self.teams:
print team.name
for line in team.lowScoringStarters:
print "%s, week %d: %d" % (line.name, line.week, line.weekPoints)
"""
Represents a single game in a single week, between two teams.
Reads and parses the HTML from the quick box score from that game,
and creates the team and player score lines. Determines who
won the game in reality, and who would have won if both teams had
been set optimally.
"""
class GameScore:
def __init__(self, year, week, game):
self.filename = '%s/%s/%s' % (year, week, game)
self.year = year
self.week = week
self.game = game
try:
self.file = open(self.filename, 'r')
except:
self.file = None
print "Could not read file: %s" % self.filename
self.teams = {}
self.actualWinner = ''
self.optimumWinner = ''
if self.file:
self.analyzeFile()
self.analyzeWinners()
"""
Determine who won in reality, and who would have won if both
teams played optimally.
"""
def analyzeWinners(self):
[ teamName1, teamName2 ] = self.teams.keys()
team1 = self.teams[teamName1]
team2 = self.teams[teamName2]
if team1.actualPoints > team2.actualPoints:
self.actualWinner = teamName1
elif team2.actualPoints > team1.actualPoints:
self.actualWinner = teamName2
else:
self.actualWinner = 'TIE'
if team1.optimumPoints > team2.optimumPoints:
self.optimumWinner = teamName1
elif team2.optimumPoints > team1.optimumPoints:
self.optimumWinner = teamName2
else:
self.optimumWinner = 'TIE'
"""
Parse the file and extract the player score lines for each player,
and add them to the team score lines.
"""
def analyzeFile(self):
teams = {}
teamName = ''
for line in self.file:
# we can determine which team we're counting by the title above the list of scores
teamNameSearch = re.search('<td.* class="tableHead">([\w\s\.]+)</td>', line)
if teamNameSearch:
teamName = teamNameSearch.group(1).replace(' BENCH', '')
try:
if not teams[teamName]:
teams[teamName] = []
except:
teams[teamName] = []
continue
try:
player = PlayerScoreLine(self.week, line)
teams[teamName].append(player)
except:
continue
for teamList in teams:
self.teams[teamList] = TeamScoreLine(self.week, teams[teamList])
"""
Represents a single player's scoring line for a single game.
"""
class PlayerScoreLine:
def __init__(self, week, line):
self.line = line
self.week = week
if self._parsePlayerId(line):
self.playerId = self._parsePlayerId(line)
self.teamId = self._parseTeamId(line)
( self.name, self.position ) = self._parseNameAndPosition(line)
self.slot = self._parseSlot(line)
self.points = self._parsePoints(line)
else:
raise Error("Cannot find player score")
def __str__(self):
return "week %s, player %s, team %s: %s, %s, %s, %s" % (self.week, self.playerId, self.teamId, self.name, self.position, self.slot, self.points)
def _parsePlayerId(self, line):
idSearch = re.search('id="plyr(\d+)"', line)
if idSearch:
return idSearch.group(1)
else:
raise Error("Cannot find playerId")
def _parseTeamId(self, line):
teamSearch = re.search('<div .* team_id="(\d+)"', line)
if teamSearch:
return teamSearch.group(1)
else:
raise Error("Cannot find team id")
def _parseNameAndPosition(self, line):
playerSearch = re.search('<div.+>([\w\s\.\/\'-]+)</div>\*?, \w+ ([\w\/]+)', line)
if playerSearch:
playerName = playerSearch.group(1)
playerPosition = playerSearch.group(2)
return (playerName, playerPosition)
else:
raise Error("Cannot find name and position")
def _parseSlot(self, line):
slotSearch = re.search('<td id="slot_\d+".*>([\w\/]+)</td><td', line)
if slotSearch:
return slotSearch.group(1)
else:
raise Error("Cannot find slot")
def _parsePoints(self, line):
pointsSearch = re.search('<td id="plscrg_\d+_totpts".*>(-?\d+)</td>', line)
if pointsSearch:
return int(pointsSearch.group(1))
else:
raise Error("Cannot find points")
"""
A comparison function to allow sorting players in a list
by the number of points they scored, in descending order.
"""
def compareByPointsDescending(playerA, playerB):
return cmp(playerB.points, playerA.points)
"""
Take a list of players for a given team in a given week, and calculate
their actual points scored as well as the number of points they'd have
scored if they set their roster optimally.
"""
class TeamScoreLine:
def __init__(self, week, players=[]):
self.week = week
self.players = players
self.actualPoints = 0
self.benchPoints = 0
self.irPoints = 0
self.optimumPoints = 0
self.analyzeActualPoints()
self.analyzeBenchPoints()
self.analyzeIRPoints()
self.analyzeOptimumPoints()
"""
Calculate the number of points they actually scored with their
starting lineup.
"""
def analyzeActualPoints(self):
self.actualPoints = self.getPointsBySlots(['QB', 'RB', 'RB/WR', 'WR', 'TE', 'D/ST', 'K'])
"""
Calculate the number of points they scored on their bench.
"""
def analyzeBenchPoints(self):
self.benchPoints = self.getPointsBySlots(['Bench'])
"""
Calculate the number of points the players on the IR scored.
"""
def analyzeIRPoints(self):
self.irPoints = self.getPointsBySlots(['IR'])
"""
Calculate the number of points they would have scored if the lineup
had been set optimally.
Assumes that you can start 1 QB, 2 RB, 2 WR, 1 RB/WR, 1 TE, 1 D/ST, 1 K.
Starts the top two RB, the top two WR, and the higher scoring of the third
best in either category.
"""
def analyzeOptimumPoints(self):
QBs = self.getPlayersByPositions(['QB'])
RBs = self.getPlayersByPositions(['RB'])
WRs = self.getPlayersByPositions(['WR'])
TEs = self.getPlayersByPositions(['TE'])
Ds = self.getPlayersByPositions(['D/ST'])
Ks = self.getPlayersByPositions(['K'])
self.optimumPoints = 0
# they get the top score from their QB, TE, D/ST, and K
if len(QBs) > 0:
self.optimumPoints += QBs[0].points
if len(TEs) > 0:
self.optimumPoints += TEs[0].points
if len(Ds) > 0:
self.optimumPoints += Ds[0].points
if len(Ks) > 0:
self.optimumPoints += Ks[0].points
# they get the top two scores from their RB
if len(RBs) > 0:
self.optimumPoints += RBs[0].points
if len(RBs) > 1:
self.optimumPoints += RBs[1].points
# they get the top two scores from their WR
if len(WRs) > 0:
self.optimumPoints += WRs[0].points
if len(WRs) > 1:
self.optimumPoints += WRs[1].points
# they get whichever score is higher from their third RB and third WR
if len(RBs) > 2:
thirdRB = RBs[2]
else:
thirdRB = None
if len(WRs) > 2:
thirdWR = WRs[2]
else:
thirdWR = None
if thirdRB and thirdWR:
if thirdRB.points > thirdWR.points:
self.optimumPoints += thirdRB.points
else:
self.optimumPoints += thirdWR.points
elif thirdRB:
self.optimumPoints += thirdRB.points
elif thirdWR:
self.optimumPoints += thirdWR.points
"""
Get the number of points scored by all the players in the
given slots.
"""
def getPointsBySlots(self, slots=[]):
points = 0
players = self.getPlayersBySlots(slots)
for player in players:
if player.slot in slots:
points += player.points
return points
"""
Get the players who were in the given slots.
"""
def getPlayersBySlots(self, slots=[]):
players = []
for player in self.players:
if player.slot in slots:
players.append(player)
return players
"""
Get the players who play the given positions.
"""
def getPlayersByPositions(self, positions=[]):
players =[]
for player in self.players:
if player.position in positions:
players.append(player)
players.sort(PlayerScoreLine.compareByPointsDescending)
return players
| |
#!/usr/bin/env python3
# Developer virtualenv setup for Certbot client
"""Aids in creating a developer virtual environment for Certbot.
When this module is run as a script, it takes the arguments that should
be passed to pip to install the Certbot packages as command line
arguments. If no arguments are provided, all Certbot packages and their
development dependencies are installed. The virtual environment will be
created with the name "venv" in the current working directory. You can
change the name of the virtual environment by setting the environment
variable VENV_NAME.
"""
from __future__ import print_function
from distutils.version import LooseVersion
import glob
import os
import re
import shutil
import subprocess
import sys
import time
REQUIREMENTS = [
'-e acme[test]',
'-e certbot[all]',
'-e certbot-apache',
'-e certbot-dns-cloudflare',
'-e certbot-dns-cloudxns',
'-e certbot-dns-digitalocean',
'-e certbot-dns-dnsimple',
'-e certbot-dns-dnsmadeeasy',
'-e certbot-dns-gehirn',
'-e certbot-dns-google',
'-e certbot-dns-linode',
'-e certbot-dns-luadns',
'-e certbot-dns-nsone',
'-e certbot-dns-ovh',
'-e certbot-dns-rfc2136',
'-e certbot-dns-route53',
'-e certbot-dns-sakuracloud',
'-e certbot-nginx',
'-e certbot-compatibility-test',
'-e certbot-ci',
'-e letstest',
]
if sys.platform == 'win32':
REQUIREMENTS.append('-e windows-installer')
REQUIREMENTS.remove('-e certbot-apache')
REQUIREMENTS.remove('-e certbot-compatibility-test')
VERSION_PATTERN = re.compile(r'^(\d+)\.(\d+).*$')
class PythonExecutableNotFoundError(Exception):
pass
def find_python_executable() -> str:
"""
Find the relevant python executable that is of the given python major version.
Will test, in decreasing priority order:
* the current Python interpreter
* 'pythonX' executable in PATH (with X the given major version) if available
* 'python' executable in PATH if available
* Windows Python launcher 'py' executable in PATH if available
Incompatible python versions for Certbot will be evicted (e.g. Python 3
versions less than 3.6).
:rtype: str
:return: the relevant python executable path
:raise RuntimeError: if no relevant python executable path could be found
"""
python_executable_path = None
# First try, current python executable
if _check_version('{0}.{1}.{2}'.format(
sys.version_info[0], sys.version_info[1], sys.version_info[2])):
return sys.executable
# Second try, with python executables in path
for one_version in ('3', '',):
try:
one_python = 'python{0}'.format(one_version)
output = subprocess.check_output([one_python, '--version'],
universal_newlines=True, stderr=subprocess.STDOUT)
if _check_version(output.strip().split()[1]):
return subprocess.check_output([one_python, '-c',
'import sys; sys.stdout.write(sys.executable);'],
universal_newlines=True)
except (subprocess.CalledProcessError, OSError):
pass
# Last try, with Windows Python launcher
try:
output_version = subprocess.check_output(['py', '-3', '--version'],
universal_newlines=True, stderr=subprocess.STDOUT)
if _check_version(output_version.strip().split()[1]):
return subprocess.check_output(['py', env_arg, '-c',
'import sys; sys.stdout.write(sys.executable);'],
universal_newlines=True)
except (subprocess.CalledProcessError, OSError):
pass
if not python_executable_path:
raise RuntimeError('Error, no compatible Python executable for Certbot could be found.')
def _check_version(version_str):
search = VERSION_PATTERN.search(version_str)
if not search:
return False
version = (int(search.group(1)), int(search.group(2)))
if version >= (3, 6):
return True
print('Incompatible python version for Certbot found: {0}'.format(version_str))
return False
def subprocess_with_print(cmd, env=None, shell=False):
if env is None:
env = os.environ
print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)
subprocess.check_call(cmd, env=env, shell=shell)
def subprocess_output_with_print(cmd, env=None, shell=False):
if env is None:
env = os.environ
print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)
return subprocess.check_output(cmd, env=env, shell=shell)
def get_venv_python_path(venv_path):
python_linux = os.path.join(venv_path, 'bin/python')
if os.path.isfile(python_linux):
return os.path.abspath(python_linux)
python_windows = os.path.join(venv_path, 'Scripts\\python.exe')
if os.path.isfile(python_windows):
return os.path.abspath(python_windows)
raise ValueError((
'Error, could not find python executable in venv path {0}: is it a valid venv ?'
.format(venv_path)))
def prepare_venv_path(venv_name):
"""Determines the venv path and prepares it for use.
This function cleans up any Python eggs in the current working directory
and ensures the venv path is available for use. The path used is the
VENV_NAME environment variable if it is set and venv_name otherwise. If
there is already a directory at the desired path, the existing directory is
renamed by appending a timestamp to the directory name.
:param str venv_name: The name or path at where the virtual
environment should be created if VENV_NAME isn't set.
:returns: path where the virtual environment should be created
:rtype: str
"""
for path in glob.glob('*.egg-info'):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
env_venv_name = os.environ.get('VENV_NAME')
if env_venv_name:
print('Creating venv at {0}'
' as specified in VENV_NAME'.format(env_venv_name))
venv_name = env_venv_name
if os.path.isdir(venv_name):
os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time())))
return venv_name
def install_packages(venv_name, pip_args):
"""Installs packages in the given venv.
:param str venv_name: The name or path at where the virtual
environment should be created.
:param pip_args: Command line arguments that should be given to
pip to install packages
:type pip_args: `list` of `str`
"""
# Using the python executable from venv, we ensure to execute following commands in this venv.
py_venv = get_venv_python_path(venv_name)
subprocess_with_print([py_venv, os.path.abspath('tools/pipstrap.py')])
# We only use this value during pip install because:
# 1) We're really only adding it for installing cryptography, which happens here, and
# 2) There are issues with calling it along with VIRTUALENV_NO_DOWNLOAD, which applies at the
# steps above, not during pip install.
env_pip_no_binary = os.environ.get('CERTBOT_PIP_NO_BINARY')
if env_pip_no_binary:
# Check OpenSSL version. If it's too low, don't apply the env variable.
openssl_version_string = str(subprocess_output_with_print(['openssl', 'version']))
matches = re.findall(r'OpenSSL ([^ ]+) ', openssl_version_string)
if not matches:
print('Could not find OpenSSL version, not setting PIP_NO_BINARY.')
else:
openssl_version = matches[0]
if LooseVersion(openssl_version) >= LooseVersion('1.0.2'):
print('Setting PIP_NO_BINARY to {0}'
' as specified in CERTBOT_PIP_NO_BINARY'.format(env_pip_no_binary))
os.environ['PIP_NO_BINARY'] = env_pip_no_binary
else:
print('Not setting PIP_NO_BINARY, as OpenSSL version is too old.')
command = [py_venv, os.path.abspath('tools/pip_install.py')]
command.extend(pip_args)
subprocess_with_print(command)
if 'PIP_NO_BINARY' in os.environ:
del os.environ['PIP_NO_BINARY']
if os.path.isdir(os.path.join(venv_name, 'bin')):
# Linux/OSX specific
print('-------------------------------------------------------------------')
print('Please run the following command to activate developer environment:')
print('source {0}/bin/activate'.format(venv_name))
print('-------------------------------------------------------------------')
elif os.path.isdir(os.path.join(venv_name, 'Scripts')):
# Windows specific
print('---------------------------------------------------------------------------')
print('Please run one of the following commands to activate developer environment:')
print('{0}\\Scripts\\activate.bat (for Batch)'.format(venv_name))
print('.\\{0}\\Scripts\\Activate.ps1 (for Powershell)'.format(venv_name))
print('---------------------------------------------------------------------------')
else:
raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name))
def create_venv(venv_path):
"""Create a Python virtual environment at venv_path.
:param str venv_path: path where the venv should be created
"""
python = find_python_executable()
command = [python, '-m', 'venv', venv_path]
subprocess_with_print(command)
def main(pip_args=None):
venv_path = prepare_venv_path('venv')
create_venv(venv_path)
if not pip_args:
pip_args = REQUIREMENTS
install_packages(venv_path, pip_args)
if __name__ == '__main__':
main(sys.argv[1:])
| |
#!/usr/bin/env python
"""Implement access to the windows registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import ctypes
import ctypes.wintypes
import io
import os
import stat
from future.builtins import filter
from future.builtins import range
from future.builtins import str
from grr_response_client.vfs_handlers import base as vfs_base
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.util import compatibility
try: # future.moves.winreg can't be used because mocking of modules is hard.
import winreg # pylint: disable=g-import-not-at-top
except ImportError: # In case of Python 2:
import _winreg as winreg # pylint: disable=g-import-not-at-top
# Difference between 1 Jan 1601 and 1 Jan 1970.
WIN_UNIX_DIFF_MSECS = 11644473600
# KEY_READ = STANDARD_RIGHTS_READ | KEY_QUERY_VALUE |
# KEY_ENUMERATE_SUB_KEYS | KEY_NOTIFY
# Also see: http://msdn.microsoft.com/en-us/library/windows/desktop/
# ms724878(v=vs.85).aspx
KEY_READ = 0x20019
def CanonicalPathToLocalPath(path):
path = path.replace("/", "\\")
return path.strip("\\")
# winreg is broken on Python 2.x and doesn't support unicode registry values.
# We provide some replacement functions here.
advapi32 = ctypes.windll.advapi32
LPDWORD = ctypes.POINTER(ctypes.wintypes.DWORD)
LPBYTE = ctypes.POINTER(ctypes.wintypes.BYTE)
ERROR_SUCCESS = 0
ERROR_MORE_DATA = 234
class FileTime(ctypes.Structure):
_fields_ = [("dwLowDateTime", ctypes.wintypes.DWORD),
("dwHighDateTime", ctypes.wintypes.DWORD)]
RegCloseKey = advapi32["RegCloseKey"] # pylint: disable=g-bad-name
RegCloseKey.restype = ctypes.c_long
RegCloseKey.argtypes = [ctypes.c_void_p]
class KeyHandle(object):
"""A wrapper class for a registry key handle."""
def __init__(self, value=0):
if value:
self.handle = ctypes.c_void_p(value)
else:
self.handle = ctypes.c_void_p()
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
self.Close()
return False
def Close(self):
if not self.handle:
return
if RegCloseKey is None:
return # Globals become None during exit.
rc = RegCloseKey(self.handle)
self.handle = ctypes.c_void_p()
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
def __del__(self):
self.Close()
def OpenKey(key, sub_key):
"""This calls the Windows OpenKeyEx function in a Unicode safe way."""
regopenkeyex = advapi32["RegOpenKeyExW"]
regopenkeyex.restype = ctypes.c_long
regopenkeyex.argtypes = [
ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_ulong, ctypes.c_ulong,
ctypes.POINTER(ctypes.c_void_p)
]
new_key = KeyHandle()
# Don't use KEY_WOW64_64KEY (0x100) since it breaks on Windows 2000
rc = regopenkeyex(
key.handle, sub_key, 0, KEY_READ,
ctypes.cast(
ctypes.byref(new_key.handle), ctypes.POINTER(ctypes.c_void_p)))
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return new_key
def CloseKey(key):
rc = RegCloseKey(key)
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
def QueryInfoKey(key):
"""This calls the Windows RegQueryInfoKey function in a Unicode safe way."""
regqueryinfokey = advapi32["RegQueryInfoKeyW"]
regqueryinfokey.restype = ctypes.c_long
regqueryinfokey.argtypes = [
ctypes.c_void_p, ctypes.c_wchar_p, LPDWORD, LPDWORD, LPDWORD, LPDWORD,
LPDWORD, LPDWORD, LPDWORD, LPDWORD, LPDWORD,
ctypes.POINTER(FileTime)
]
null = LPDWORD()
num_sub_keys = ctypes.wintypes.DWORD()
num_values = ctypes.wintypes.DWORD()
ft = FileTime()
rc = regqueryinfokey(key.handle, ctypes.c_wchar_p(), null, null,
ctypes.byref(num_sub_keys), null, null,
ctypes.byref(num_values), null, null, null,
ctypes.byref(ft))
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
last_modified = ft.dwLowDateTime | (ft.dwHighDateTime << 32)
last_modified = last_modified // 10000000 - WIN_UNIX_DIFF_MSECS
return (num_sub_keys.value, num_values.value, last_modified)
def QueryValueEx(key, value_name):
"""This calls the Windows QueryValueEx function in a Unicode safe way."""
regqueryvalueex = advapi32["RegQueryValueExW"]
regqueryvalueex.restype = ctypes.c_long
regqueryvalueex.argtypes = [
ctypes.c_void_p, ctypes.c_wchar_p, LPDWORD, LPDWORD, LPBYTE, LPDWORD
]
size = 256
data_type = ctypes.wintypes.DWORD()
while True:
tmp_size = ctypes.wintypes.DWORD(size)
buf = ctypes.create_string_buffer(size)
rc = regqueryvalueex(key.handle, value_name, LPDWORD(),
ctypes.byref(data_type), ctypes.cast(buf, LPBYTE),
ctypes.byref(tmp_size))
if rc != ERROR_MORE_DATA:
break
# We limit the size here to ~10 MB so the response doesn't get too big.
if size > 10 * 1024 * 1024:
raise OSError("Value too big to be read by GRR.")
size *= 2
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return _Reg2Py(buf, tmp_size.value, data_type.value), data_type.value
def EnumKey(key, index):
"""This calls the Windows RegEnumKeyEx function in a Unicode safe way."""
regenumkeyex = advapi32["RegEnumKeyExW"]
regenumkeyex.restype = ctypes.c_long
regenumkeyex.argtypes = [
ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.c_wchar_p, LPDWORD,
LPDWORD, ctypes.c_wchar_p, LPDWORD,
ctypes.POINTER(FileTime)
]
buf = ctypes.create_unicode_buffer(257)
length = ctypes.wintypes.DWORD(257)
rc = regenumkeyex(key.handle, index, ctypes.cast(buf, ctypes.c_wchar_p),
ctypes.byref(length), LPDWORD(), ctypes.c_wchar_p(),
LPDWORD(),
ctypes.POINTER(FileTime)())
if rc != 0:
raise ctypes.WinError(2)
return ctypes.wstring_at(buf, length.value).rstrip(u"\x00")
def EnumValue(key, index):
"""This calls the Windows RegEnumValue function in a Unicode safe way."""
regenumvalue = advapi32["RegEnumValueW"]
regenumvalue.restype = ctypes.c_long
regenumvalue.argtypes = [
ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.c_wchar_p, LPDWORD,
LPDWORD, LPDWORD, LPBYTE, LPDWORD
]
regqueryinfokey = advapi32["RegQueryInfoKeyW"]
regqueryinfokey.restype = ctypes.c_long
regqueryinfokey.argtypes = [
ctypes.c_void_p, ctypes.c_wchar_p, LPDWORD, LPDWORD, LPDWORD, LPDWORD,
LPDWORD, LPDWORD, LPDWORD, LPDWORD, LPDWORD,
ctypes.POINTER(FileTime)
]
null = ctypes.POINTER(ctypes.wintypes.DWORD)()
value_size = ctypes.wintypes.DWORD()
data_size = ctypes.wintypes.DWORD()
rc = regqueryinfokey(key.handle, ctypes.c_wchar_p(), null, null, null, null,
null, null, ctypes.byref(value_size),
ctypes.byref(data_size), null,
ctypes.POINTER(FileTime)())
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
value_size.value += 1
data_size.value += 1
value = ctypes.create_unicode_buffer(value_size.value)
while True:
data = ctypes.create_string_buffer(data_size.value)
tmp_value_size = ctypes.wintypes.DWORD(value_size.value)
tmp_data_size = ctypes.wintypes.DWORD(data_size.value)
data_type = ctypes.wintypes.DWORD()
rc = regenumvalue(key.handle, index, ctypes.cast(value, ctypes.c_wchar_p),
ctypes.byref(tmp_value_size), null,
ctypes.byref(data_type), ctypes.cast(data, LPBYTE),
ctypes.byref(tmp_data_size))
if rc != ERROR_MORE_DATA:
break
data_size.value *= 2
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return (value.value, _Reg2Py(data, tmp_data_size.value,
data_type.value), data_type.value)
def _Reg2Py(data, size, data_type):
"""Converts a Windows Registry value to the corresponding Python data type."""
if data_type == winreg.REG_DWORD:
if size == 0:
return 0
# DWORD is an unsigned 32-bit integer, see:
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-dtyp/262627d8-3418-4627-9218-4ffe110850b2
return ctypes.cast(data, ctypes.POINTER(ctypes.c_uint32)).contents.value
elif data_type == winreg.REG_SZ or data_type == winreg.REG_EXPAND_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00")
elif data_type == winreg.REG_MULTI_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00").split(u"\x00")
else:
if size == 0:
return None
return ctypes.string_at(data, size)
class RegistryFile(vfs_base.VFSHandler):
"""Emulate registry access through the VFS."""
supported_pathtype = rdf_paths.PathSpec.PathType.REGISTRY
auto_register = True
# Maps the registry types to protobuf enums
registry_map = {
winreg.REG_NONE:
rdf_client_fs.StatEntry.RegistryType.REG_NONE,
winreg.REG_SZ:
rdf_client_fs.StatEntry.RegistryType.REG_SZ,
winreg.REG_EXPAND_SZ:
rdf_client_fs.StatEntry.RegistryType.REG_EXPAND_SZ,
winreg.REG_BINARY:
rdf_client_fs.StatEntry.RegistryType.REG_BINARY,
winreg.REG_DWORD:
rdf_client_fs.StatEntry.RegistryType.REG_DWORD,
winreg.REG_DWORD_LITTLE_ENDIAN:
rdf_client_fs.StatEntry.RegistryType.REG_DWORD_LITTLE_ENDIAN,
winreg.REG_DWORD_BIG_ENDIAN:
rdf_client_fs.StatEntry.RegistryType.REG_DWORD_BIG_ENDIAN,
winreg.REG_LINK:
rdf_client_fs.StatEntry.RegistryType.REG_LINK,
winreg.REG_MULTI_SZ:
rdf_client_fs.StatEntry.RegistryType.REG_MULTI_SZ,
}
def __init__(self, base_fd, handlers, pathspec=None, progress_callback=None):
super(RegistryFile, self).__init__(
base_fd,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
self.value = None
self.value_type = winreg.REG_NONE
self.hive = None
self.hive_name = None
self.local_path = None
self.last_modified = 0
self.is_directory = True
self.fd = None
if base_fd is None:
self.pathspec.Append(pathspec)
elif base_fd.IsDirectory():
self.pathspec.last.path = utils.JoinPath(self.pathspec.last.path,
pathspec.path)
else:
raise IOError("Registry handler can not be stacked on another handler.")
path_components = list(filter(None, self.pathspec.last.path.split("/")))
try:
# The first component MUST be a hive
self.hive_name = path_components[0]
self.hive = KeyHandle(getattr(winreg, self.hive_name))
except AttributeError:
raise IOError("Unknown hive name %s" % self.hive_name)
except IndexError:
# A hive is not specified, we just list all the hives.
return
# Normalize the path casing if needed
self.key_name = "/".join(path_components[1:])
self.local_path = CanonicalPathToLocalPath(self.key_name)
try:
# Maybe its a value
key_name, value_name = os.path.split(self.local_path)
with OpenKey(self.hive, key_name) as key:
self.value, self.value_type = QueryValueEx(key, value_name)
# TODO: Registry-VFS has issues when keys and values of the
# same name exist. ListNames() does not work for a key, if a value of the
# same name exists. The original assumption was: "We are a value and
# therefore not a directory". This is false, since the Registry can have
# a key and a value of the same name in the same parent key.
self.is_directory = False
except OSError:
try:
# Try to get the default value for this key
with OpenKey(self.hive, self.local_path) as key:
# Check for default value.
try:
self.value, self.value_type = QueryValueEx(key, "")
except OSError:
# Empty default value
self.value = ""
self.value_type = winreg.REG_NONE
except OSError:
raise IOError("Unable to open key %s" % self.key_name)
def Stat(self, ext_attrs=None):
del ext_attrs # Unused.
# mtime is only available for keys, not values. Also special-casing root
# entry (it's not going to have a hive defined).
if self.is_directory and self.hive and not self.last_modified:
with OpenKey(self.hive, self.local_path) as key:
(self.number_of_keys, self.number_of_values,
self.last_modified) = QueryInfoKey(key)
return self._Stat("", self.value, self.value_type, mtime=self.last_modified)
def _Stat(self, name, value, value_type, mtime=None):
response = rdf_client_fs.StatEntry()
response_pathspec = self.pathspec.Copy()
# No matter how we got here, there is no need to do case folding from now on
# since this is the exact filename casing.
response_pathspec.path_options = rdf_paths.PathSpec.Options.CASE_LITERAL
response_pathspec.last.path = utils.JoinPath(response_pathspec.last.path,
name)
response.pathspec = response_pathspec
if self.IsDirectory():
response.st_mode = stat.S_IFDIR
else:
response.st_mode = stat.S_IFREG
if mtime:
response.st_mtime = mtime
if value is None:
response.st_size = 0
elif isinstance(value, bytes):
response.st_size = len(value)
else:
response.st_size = len(str(value).encode("utf-8"))
if value_type is not None:
response.registry_type = self.registry_map.get(value_type, 0)
response.registry_data = rdf_protodict.DataBlob().SetValue(value)
return response
def ListNames(self):
"""List the names of all keys and values."""
# TODO: This check is flawed, because the current definition of
# "IsDirectory" is the negation of "is a file". One registry path can
# actually refer to a key ("directory"), a value of the same name ("file")
# and the default value of the key at the same time.
if not self.IsDirectory():
return
# Handle the special case where no hive is specified and just list the hives
if self.hive is None:
for name in dir(winreg):
if name.startswith("HKEY_"):
yield name
return
try:
with OpenKey(self.hive, self.local_path) as key:
(self.number_of_keys, self.number_of_values,
self.last_modified) = QueryInfoKey(key)
found_keys = set()
# First keys
for i in range(self.number_of_keys):
try:
key_name = EnumKey(key, i)
found_keys.add(key_name)
yield key_name
except OSError:
pass
# Now Values
for i in range(self.number_of_values):
try:
name, unused_value, unused_value_type = EnumValue(key, i)
# A key might contain a sub-key and value of the same name. Do not
# yield the same name twice in this case. With only the name,
# the caller cannot differentiate between a key and a value anyway.
if name not in found_keys:
yield name
except OSError:
pass
except OSError as e:
raise IOError("Unable to list key %s: %s" % (self.key_name, e))
def ListFiles(self, ext_attrs=None):
"""A generator of all keys and values."""
del ext_attrs # Unused.
if not self.IsDirectory():
return
if self.hive is None:
for name in dir(winreg):
# TODO: `dir` call in Python 2 yields names as a list of
# `bytes` objects. Because `JoinPath` requires `unicode` objects, we
# have to properly convert these. Once support for Python 2 is dropped,
# this part be removed.
if compatibility.PY2:
name = name.decode("utf-8")
if name.startswith("HKEY_"):
response = rdf_client_fs.StatEntry(st_mode=stat.S_IFDIR)
response_pathspec = self.pathspec.Copy()
response_pathspec.last.path = utils.JoinPath(
response_pathspec.last.path, name)
response.pathspec = response_pathspec
yield response
return
try:
with OpenKey(self.hive, self.local_path) as key:
(self.number_of_keys, self.number_of_values,
self.last_modified) = QueryInfoKey(key)
# First keys - These will look like directories.
for i in range(self.number_of_keys):
try:
name = EnumKey(key, i)
key_name = utils.JoinPath(self.local_path, name)
try:
# Store the default value in the stat response for values.
with OpenKey(self.hive, key_name) as subkey:
value, value_type = QueryValueEx(subkey, "")
except OSError:
value, value_type = None, None
response = self._Stat(name, value, value_type)
# Keys look like Directories in the VFS.
response.st_mode = stat.S_IFDIR
yield response
except OSError:
pass
# Now Values - These will look like files.
for i in range(self.number_of_values):
try:
name, value, value_type = EnumValue(key, i)
response = self._Stat(name, value, value_type)
# Values look like files in the VFS.
response.st_mode = stat.S_IFREG
yield response
except OSError:
pass
except OSError as e:
raise IOError("Unable to list key %s: %s" % (self.key_name, e))
def IsDirectory(self):
return self.is_directory
def Read(self, length):
if not self.fd:
self.fd = io.BytesIO(self._bytes_value)
return self.fd.read(length)
def Seek(self, offset, whence=0):
if not self.fd:
self.fd = io.BytesIO(self._bytes_value)
return self.fd.seek(offset, whence)
@property
def _bytes_value(self):
if isinstance(self.value, bytes):
return self.value
else:
return str(self.value).encode("utf-8")
| |
"""Start up an Akara server on the command-line
This is an internal module not for use by other libraries.
"""
import os
import sys
import socket
import logging
import signal
import akara
from akara import read_config
from akara import logger, logger_config
from akara.multiprocess_http import AkaraPreforkServer
from akara import global_config
# Need this in order to install "/" as service.list_services. I think
# this code is somewhat clunky. There should be no reason to need the
# import here, but something has to install the top-level search
# handler, and doing it via a simple_service just makes things, well,
# simple. But the registry can't import services because services
# needs a fully-loaded registry to register things.
# Threw my hands up in the sky and broke the reference here.
# Caveat emptor.
from akara import services
def save_pid(pid_file):
"Save the current pid to the given PID filename"
# One line, newline terminated
pid_s = str(os.getpid()) + "\n"
try:
f = open(pid_file, "w")
except Exception, error:
raise Exception("Unable to open PID file: %s" %
(error,))
try:
try:
f.write(pid_s)
except Exception, error:
raise Exception("Unable to write to PID file %r: %s" %
(pid_file, error))
finally:
f.close()
def remove_pid(pid_file):
"Remove the given filename (which should be the PID file)"
try:
os.remove(pid_file)
except Exception, error:
if not os.path.exists(pid_file):
logger.error("Unable to remove PID file %r: %s",
pid_file, error)
else:
logger.info("Removed PID file %r", pid_file)
# There are two ways to run the Akara server, either in debug mode
# (running in the foreground, with the -X option) or in daemon mode
# (running in the background) which is the default. The latter is
# trickier to support.
# In that case the command-line program spawns off a new process,
# which is the master HTTP node ("the flup server"). It manages the
# subprocesses which actually handle the HTTP requests. The flup
# server starts up and either manages to set things up or fails
# because of some problem. The command-line program needs to exit with
# an error code if there was a problem, so there must be some sort of
# communications between the two.
# The solution is simple. Setup a pipe. The child sends either
# "success\n" or "failure\n" as appropriate. The parent (which is the
# command-line program) waits until it gets one of those messages.
class NotifyParent(object):
def __init__(self):
self.r_pipe, self.w_pipe = os.pipe()
def failure(self):
"Called in the child, when it must abort"
os.write(self.w_pipe, "failure\n")
def success(self):
"Called in the child, when it's ready for HTTP requests"
os.write(self.w_pipe, "success\n")
def read_and_close(self):
"Called in the parent, to wait for the child"
status = os.read(self.r_pipe, 1000)
os.close(self.r_pipe)
os.close(self.w_pipe)
return status
# Life is much easier in debug mode. There's no need to communicate
# anything to the non-existent parent.
class NoParent(object):
def failure(self):
pass
def success(self):
pass
def demonize():
notify_parent = NotifyParent()
if os.fork():
# In the command-line parent. Wait for child status.
status = notify_parent.read_and_close()
if status.startswith("success"):
raise SystemExit(0)
else:
raise SystemExit(1)
# In the child, which is the flup server.
try:
# Create a new session with this process as the group leader
try:
setsid = os.setsid
except AttributeError:
os.setpgid(0, 0)
else:
setsid()
except:
notify_parent.failure()
raise
return notify_parent
# Sets up the global_config module contents
def set_global_config(settings):
for name, value in settings.items():
setattr(global_config, name, value)
def main(args):
config_filename = args.config_filename
debug = args.debug
skip_pid_check = args.skip_pid_check
first_time = True
old_server_address = None
sock = None
while 1:
# This is the main loop for the flup server.
# Why is it a loop? A SIGHUP sent to the server
# will shut down flup then reread the configuration
# file, reload the extension modules, and start
# the flup server again.
try:
settings, config = read_config.read_config(config_filename)
except read_config.Error, err:
logger.fatal(str(err))
if first_time:
raise SystemExit("Cannot start Akara. Exiting.")
else:
raise SystemExit("Cannot restart Akara. Exiting.")
akara.raw_config = config
# Establish the global configuration module
set_global_config(settings)
# In debug mode (-X), display all log messages.
# Otherwise, use the configuration level
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(settings["log_level"])
# Open this now, so any errors can be reported
try:
logger_config.set_logfile(settings["error_log"])
except IOError, err:
# Only give the 'akara setup' text here because it's where
# we get to with an empty/nonexistant configuration file.
logger.fatal("""\
Could not open the Akara error log:
%s
Does that directory exist and is it writeable?
You may want to use 'akara setup' to set up the directory structure.""" % err)
sys.exit(1)
# Configure the access log
try:
logger_config.set_access_logfile(settings["access_log"])
except IOError, err:
logger.fatal("""\
Could not open the Akara access log:
%s
Does that directory exist and is it writeable?""" % err)
sys.exit(1)
# Don't start if the PID file already exists.
pid_file = settings["pid_file"]
if first_time and (not skip_pid_check) and os.path.exists(pid_file):
msg = ("Akara PID file %r already exists. Is another Akara instance running?\n"
"If not, remove the file or use the '-f' option to skip this check")
logger.fatal(msg % (pid_file,))
raise SystemExit(1)
if debug or not first_time:
notify_parent = NoParent()
else:
# Spawn off the actual listener.
# The parent will always raise an exception, and never return.
try:
notify_parent = demonize()
except Exception, err:
# This can come from the parent or the child.
logger.critical("Cannot spawn HTTP server", exc_info=True)
raise SystemExit("Exiting - check the log file for details")
# At this point we are in the child. Set things up as
# far as we can go, then tell the parent that we're ready.
try:
server_address = settings["server_address"]
if server_address != old_server_address:
if sock is not None:
sock.close()
sock = socket.socket()
# XXX Should SO_REUSEADDR be a configuration setting?
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Disable Nagle's algorithm, which causes problems with
# keep-alive. See:
# http://stackoverflow.com/questions/1781766/
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
host, port = settings["server_address"]
if host:
description = "interface %r port %r" % (host, port)
else:
description = "port %r" % (port,)
try:
sock.bind(settings["server_address"])
except socket.error, error:
raise SystemExit("Can not bind to " + description)
logger.info("Listening to " + description)
sock.listen(socket.SOMAXCONN)
old_server_address = server_address
# NOTE: StartServers not currently supported and likely won't be.
# Why? Because the old algorithm would add/cull the server count
# within a few check intervals (each about 1 second), so it
# didn't have much long-term effect.
logger.info("Akara server is running")
server = AkaraPreforkServer(
minSpare = settings["min_spare_servers"],
maxSpare = settings["max_spare_servers"],
maxChildren = settings["max_servers"],
maxRequests = settings["max_requests_per_server"],
settings = settings,
config = config,
)
# Everything is ready to go, except for saving the PID file
if first_time:
save_pid(pid_file)
except:
notify_parent.failure()
logger.critical("Could not set up the Akara HTTP server", exc_info=True)
raise SystemExit("Akara HTTP server exiting - check the log file for details")
else:
notify_parent.success()
# Fully demonize - no more logging to sys.std*
# Close the standard file descriptors.
# Redirect sys.std* to the log file
if first_time and not debug:
logger_config.remove_logging_to_stderr()
logger_config.redirect_stdio()
try:
hupReceived = server.run(sock)
except SystemExit:
# Propogate the SystemExit through the system. Remember,
# this is also the root of the call tree for the child
# which handles the request. The child exits at some point.
raise
# Strange. Why didn't flup disable this alarm?
signal.alarm(0)
if not hupReceived:
logger.info("Akara server shutting down.")
break
logger.info("Akara server is restarting.")
first_time = False
remove_pid(pid_file)
| |
#!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# WeaveTimeSync test with support for the following sync modes:
# - local
# - service
# - auto
# Also available: fault-injection (enabled via command-line option)
#
from __future__ import absolute_import
from __future__ import print_function
import itertools
import os
import getopt
import sys
import unittest
import set_test_path
from happy.Utils import *
import happy.HappyNodeList
import WeaveStateLoad
import WeaveStateUnload
import WeaveTime
import WeaveUtilities
import plugins.plaid.Plaid as Plaid
gFaultOpts = WeaveUtilities.FaultInjectionOptions()
gOptions = { 'enableFaults': False, 'mode': "local" }
class test_weave_time_01(unittest.TestCase):
def setUp(self):
self.tap = None
if "WEAVE_SYSTEM_CONFIG_USE_LWIP" in list(os.environ.keys()) and os.environ["WEAVE_SYSTEM_CONFIG_USE_LWIP"] == "1":
self.topology_file = os.path.dirname(os.path.realpath(__file__)) + \
"/../../../topologies/standalone/three_nodes_on_tap_thread_weave.json"
self.tap = "wpan0"
else:
self.topology_file = os.path.dirname(os.path.realpath(__file__)) + \
"/../../../topologies/standalone/three_nodes_on_thread_weave.json"
self.show_strace = False
# setting Mesh for thread test
options = WeaveStateLoad.option()
options["quiet"] = True
options["json_file"] = self.topology_file
setup_network = WeaveStateLoad.WeaveStateLoad(options)
ret = setup_network.run()
# set up Plaid for faster execution
plaid_opts = Plaid.default_options()
plaid_opts['num_clients'] = 3
if gOptions["mode"] == "service":
plaid_opts['num_clients'] = 2
plaid_opts["max_time_at_high_speed_secs"] = 40
if gOptions["mode"] == "local":
plaid_opts["max_time_at_high_speed_secs"] = 80
if gOptions["mode"] == "auto":
plaid_opts["max_time_at_high_speed_secs"] = 40
plaid_opts['strace'] = self.show_strace
self.plaid = Plaid.Plaid(plaid_opts)
self.use_plaid = self.plaid.isPlaidConfigured()
def tearDown(self):
# cleaning up
options = WeaveStateUnload.option()
options["quiet"] = True
options["json_file"] = self.topology_file
teardown_network = WeaveStateUnload.WeaveStateUnload(options)
teardown_network.run()
def test_weave_time(self):
options = happy.HappyNodeList.option()
options["quiet"] = True
mode = gOptions['mode']
# The following no-fault run is used to compute the fault counters
# that are used to generate a set of tests with faults injected.
# Note that the nodes are allowed to run at plaid speed only for the
# duration of the sequence; otherwise, the nodes would race ahead in
# time while the python script is trying to terminate them; this would
# cause very large event counters that then cause the generation of
# useless tests.
if self.use_plaid:
# print "starting plaid server"
self.__start_plaid_server()
value, data = self.__run_time_test_between("node01", "node02",
"node03", mode, use_plaid=self.use_plaid)
if self.use_plaid:
# print "stopping plaid server"
self.__stop_plaid_server()
err = self.__process_result("node01", "node02", "node03", mode, value)
# err=True => failure, err=False => success
self.assertEqual(err, False, "failed to run success case")
if not gOptions['enableFaults']:
return
output_logs = {}
output_logs['client'] = data['client_output']
output_logs['coordinator'] = data['coordinator_output']
output_logs['server'] = data['server_output']
num_tests = 0
num_failed_tests = 0
failed_tests = []
# During the fault injection tests, we don't need to limit the time executed at plaid speed.
self.plaid.max_time_at_high_speed_secs = 0
for node in gFaultOpts.nodes:
restart = True
fault_configs = gFaultOpts.generate_fault_config_list(node, output_logs[node], restart)
for fault_config in fault_configs:
test_tag = "_" + "_".join([str(x) for x in (num_tests, node, fault_config)])
print("tag: ", test_tag)
if self.use_plaid:
# print "starting plaid server"
self.__start_plaid_server()
value, data = self.__run_time_test_between("node01", "node02",
"node03", mode, num_iterations=3,
faults = {node: fault_config}, test_tag=test_tag, use_plaid=self.use_plaid)
if self.__process_result("node01", "node02", "node03", mode, value):
# returns 'True' if test failed
num_failed_tests += 1
failed_tests.append(test_tag)
if self.use_plaid:
# print "stopping plaid server"
self.__stop_plaid_server()
num_tests += 1
print("executed %d cases" % num_tests)
print("failed %d cases:" % num_failed_tests)
if num_failed_tests > 0:
for failed in failed_tests:
print(" " + failed)
self.assertEqual(num_failed_tests, 0, "Something failed")
def __process_result(self, nodeA, nodeB, nodeC, mode, value):
print("time sync test among client:" + nodeA + \
", coordinator:" + nodeB + ", server:" + nodeC + \
", sync mode:" + mode)
if value:
print(hgreen("Passed"))
failed = False
else:
print(hred("Failed"))
failed = True
return failed
def __run_time_test_between(self, nodeA, nodeB, nodeC, mode, num_iterations=None, faults = {}, test_tag = "", use_plaid=False):
options = WeaveTime.option()
options["quiet"] = False
options["client"] = nodeA
options["coordinator"] = nodeB
options["server"] = nodeC
options["mode"] = mode
options["tap"] = self.tap
options["client_faults"] = faults.get('client')
options["coordinator_faults"] = faults.get('coordinator')
options["server_faults"] = faults.get('server')
options["iterations"] = num_iterations
options["test_tag"] = test_tag
if use_plaid:
options["plaid_client_env"] = self.plaid.getPlaidClientLibEnv(nodeA)
options["plaid_coordinator_env"] = self.plaid.getPlaidClientLibEnv(nodeB)
options["plaid_server_env"] = self.plaid.getPlaidClientLibEnv(nodeC)
weave_time = WeaveTime.WeaveTime(options)
ret = weave_time.run()
value = ret.Value()
data = ret.Data()
return value, data
def __start_plaid_server(self):
self.plaid.startPlaidServerProcess()
def __stop_plaid_server(self):
self.plaid.stopPlaidServerProcess()
if __name__ == "__main__":
help_str = """usage:
--help Print this usage info and exit
--syncmode Time sync mode (local, service, auto)
--enable-faults Run fault injection tests\n"""
help_str += gFaultOpts.help_string
longopts = ["help", "enable-faults", "syncmode="]
longopts.extend(gFaultOpts.getopt_config)
try:
opts, args = getopt.getopt(sys.argv[1:], "hfm:", longopts)
except getopt.GetoptError as err:
print(help_str)
print(hred(str(err)))
sys.exit(hred("%s: Failed to parse arguments." % (__file__)))
opts = gFaultOpts.process_opts(opts)
for o, a in opts:
if o in ("-h", "--help"):
print(help_str)
sys.exit(0)
elif o in ("-m", "--syncmode"):
gOptions["mode"] = a
elif o in ("-f", "--enable-faults"):
gOptions["enableFaults"] = True
sys.argv = [sys.argv[0]]
WeaveUtilities.run_unittest()
| |
#!/usr/bin/env python
from __future__ import print_function
################################################################################
#
# test_ruffus_utility_parse_task_arguments.py
#
#################################################################################
"""
test_ruffus_utility.py
"""
import unittest
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = __import__ (ruffus_name)
try:
attrlist = ruffus.__all__
except AttributeError:
attrlist = dir (ruffus)
for attr in attrlist:
if attr[0:2] != "__":
globals()[attr] = getattr (ruffus, attr)
try:
attrlist = ruffus.ruffus_utility.__all__
except AttributeError:
attrlist = dir (ruffus.ruffus_utility)
for attr in attrlist:
if attr[0:2] != "__":
globals()[attr] = getattr (ruffus.ruffus_utility, attr)
import unittest
#_________________________________________________________________________________________
# Test_parse_transform_args
#_________________________________________________________________________________________
class Test_parse_transform_args (unittest.TestCase):
def test_parse_transform_args(self):
expected_arguments = ["input", "filter", "modify_inputs", "output", "extras"]
empty_unnamed_arguments = []
empty_named_arguments = {}
orig_unnamed_arguments = ["*.txt", suffix(".txt"), ".result", 1,2,3,4]
task_description = "@transform(%s)\ndef myfunc(...)\n"
expected_results = { 'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2],
'extras' : orig_unnamed_arguments[3:],
'modify_inputs_mode': 2,
'modify_inputs': None}
add_inputs_expected_results = { 'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2],
'extras' : orig_unnamed_arguments[3:],
'modify_inputs_mode': 0,
'modify_inputs': ("a.test", "b.test")}
replace_inputs_expected_results = { 'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2],
'extras' : orig_unnamed_arguments[3:],
'modify_inputs_mode': 1,
'modify_inputs': ("a.test", "b.test")}
# Error: empty list
with self.assertRaises(error_missing_args):
parse_task_arguments (empty_named_arguments, empty_named_arguments, expected_arguments, task_description)
# parse complete correctly
results = parse_task_arguments (orig_unnamed_arguments,
{}, expected_arguments, task_description)
self.assertEqual(results, expected_results)
# Error: missing argument
unnamed_arguments = orig_unnamed_arguments[0:1]
with self.assertRaises(error_missing_args):
results = parse_task_arguments (unnamed_arguments,
{}, expected_arguments, task_description)
# parse almost complete and rescued with named parameter
unnamed_arguments = orig_unnamed_arguments[0:2]
results = parse_task_arguments (unnamed_arguments,
{'output' : orig_unnamed_arguments[2],
'extras' : orig_unnamed_arguments[3:]},
expected_arguments, task_description)
self.assertEqual(results, expected_results)
# All named parameters
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2],
'extras' : orig_unnamed_arguments[3:]
},
expected_arguments, task_description)
self.assertEqual(results, expected_results)
# filter not regex suffix or formatter
with self.assertRaises(TypeError):
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : "a",
'output' : orig_unnamed_arguments[2],
'extras' : orig_unnamed_arguments[3:]
},
expected_arguments, task_description)
# Error: Unknown named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2],
'what' : orig_unnamed_arguments[3:]
},
expected_arguments, task_description)
# Error: Duplicate named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments (orig_unnamed_arguments,
{'input' : orig_unnamed_arguments[0],
'extras' : orig_unnamed_arguments[3:]
},
expected_arguments, task_description)
# add_inputs correct via named
results = parse_task_arguments (orig_unnamed_arguments,
{"add_inputs" : ("a.test", "b.test")}, expected_arguments, task_description)
self.assertEqual(results, add_inputs_expected_results)
# add_inputs correct via named and paranoid add_inputs wrapping
results = parse_task_arguments (orig_unnamed_arguments,
{"add_inputs" : add_inputs("a.test", "b.test")}, expected_arguments, task_description)
self.assertEqual(results, add_inputs_expected_results)
# add_inputs correct via unnamed
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(2, add_inputs("a.test", "b.test"))
results = parse_task_arguments (unnamed_arguments, {},
expected_arguments, task_description)
self.assertEqual(results, add_inputs_expected_results)
# replace_inputs correct via named
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : ("a.test", "b.test")}, expected_arguments, task_description)
self.assertEqual(results, replace_inputs_expected_results)
# replace_inputs correct via named and paranoid inputs() wrapping
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : inputs(("a.test", "b.test"))}, expected_arguments, task_description)
self.assertEqual(results, replace_inputs_expected_results)
# replace_inputs correct via unnamed
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(2, inputs(("a.test", "b.test")))
results = parse_task_arguments (unnamed_arguments, {},
expected_arguments, task_description)
self.assertEqual(results, replace_inputs_expected_results)
# Error: both add_inputs and replace_inputs via named
with self.assertRaises(error_too_many_args):
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : ("a.test", "b.test"),
"add_inputs" : ("a.test", "b.test")}, expected_arguments, task_description)
# Error: both add_inputs and replace_inputs via named / unnamed
with self.assertRaises(error_too_many_args):
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(2, inputs(("a.test", "b.test")))
results = parse_task_arguments (unnamed_arguments, {"add_inputs" : ("a.test", "b.test")},
expected_arguments, task_description)
# Error: both add_inputs and replace_inputs via named / unnamed
with self.assertRaises(error_too_many_args):
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(2, add_inputs("a.test", "b.test"))
results = parse_task_arguments (unnamed_arguments, {"replace_inputs" : ("a.test", "b.test")},
expected_arguments, task_description)
# Error: wrong number of arguments
with self.assertRaises(error_inputs_multiple_args):
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : inputs("a.test", "b.test")}, expected_arguments, task_description)
with self.assertRaises(error_inputs_multiple_args):
unnamed_arguments = orig_unnamed_arguments[0:2] + [inputs("a.test", "b.test")] + orig_unnamed_arguments[2:]
results = parse_task_arguments (unnamed_arguments, {}, expected_arguments, task_description)
# Error: no arguments
with self.assertRaises(error_inputs_multiple_args):
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : inputs()}, expected_arguments, task_description)
#________________________________________________________________________________________________________
# Test_parse_product_args
#________________________________________________________________________________________________________
class Test_parse_product_args (unittest.TestCase):
"""
Make sure (input, filter, input2, filter2, input3, filter3,..., output, extras...) works
for @product
"""
def test_parse_product_args(self):
self.maxDiff = None
expected_arguments = ["input", "filter", "inputN", "modify_inputs", "output", "extras"]
empty_unnamed_arguments = []
empty_named_arguments = {}
orig_unnamed_arguments = ["*.txt", formatter(".txt"), "*.contig", formatter(), "*.genome", formatter(), "{basename[0][0]}_{basename[1][0]}.result", 1,2,3,4]
task_description = "@product(%s)\ndef myfunc(...)\n"
expected_results = { 'input' : [orig_unnamed_arguments[0], orig_unnamed_arguments[2], orig_unnamed_arguments[4]],
'filter' : [orig_unnamed_arguments[1], orig_unnamed_arguments[3], orig_unnamed_arguments[5]],
'output' : orig_unnamed_arguments[6],
'extras' : orig_unnamed_arguments[7:],
'modify_inputs_mode': 2,
'modify_inputs': None}
add_inputs_expected_results = { 'input' : [orig_unnamed_arguments[0], orig_unnamed_arguments[2], orig_unnamed_arguments[4]],
'filter' : [orig_unnamed_arguments[1], orig_unnamed_arguments[3], orig_unnamed_arguments[5]],
'output' : orig_unnamed_arguments[6],
'extras' : orig_unnamed_arguments[7:],
'modify_inputs_mode': 0,
'modify_inputs': ("a.test", "b.test")}
replace_inputs_expected_results = { 'input' : [orig_unnamed_arguments[0], orig_unnamed_arguments[2], orig_unnamed_arguments[4]],
'filter' : [orig_unnamed_arguments[1], orig_unnamed_arguments[3], orig_unnamed_arguments[5]],
'output' : orig_unnamed_arguments[6],
'extras' : orig_unnamed_arguments[7:],
'modify_inputs_mode': 1,
'modify_inputs': ("a.test", "b.test")}
# Error: empty list
with self.assertRaises(error_missing_args):
parse_task_arguments (empty_named_arguments, empty_named_arguments, expected_arguments, task_description)
# parse complete correctly
results = parse_task_arguments (orig_unnamed_arguments,
{}, expected_arguments, task_description)
self.assertEqual(results, expected_results)
# Error: missing argument
unnamed_arguments = orig_unnamed_arguments[0:6]
with self.assertRaises(error_missing_args):
results = parse_task_arguments (unnamed_arguments,
{}, expected_arguments, task_description)
# parse almost complete and rescued with named parameter
unnamed_arguments = orig_unnamed_arguments[0:6]
results = parse_task_arguments (unnamed_arguments,
{'output' : expected_results['output'],
'extras' : expected_results['extras']},
expected_arguments, task_description)
self.assertEqual(results, expected_results)
# All named parameters
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'input2' : orig_unnamed_arguments[2],
'filter2' : orig_unnamed_arguments[3],
'input3' : orig_unnamed_arguments[4],
'filter3' : orig_unnamed_arguments[5],
'output' : orig_unnamed_arguments[6],
'extras' : orig_unnamed_arguments[7:]
},
expected_arguments, task_description)
self.assertEqual(results, expected_results)
# Error: Unknown named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'input2' : orig_unnamed_arguments[2],
'filter2' : orig_unnamed_arguments[3],
'input3' : orig_unnamed_arguments[4],
'filter3' : orig_unnamed_arguments[5],
'output' : orig_unnamed_arguments[6],
'what' : orig_unnamed_arguments[7:]
},
expected_arguments, task_description)
# Error: Duplicate named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments (orig_unnamed_arguments,
{'input' : orig_unnamed_arguments[0],
'extras' : orig_unnamed_arguments[7:]
},
expected_arguments, task_description)
# add_inputs correct via named
results = parse_task_arguments (orig_unnamed_arguments,
{"add_inputs" : ("a.test", "b.test")}, expected_arguments, task_description)
self.assertEqual(results, add_inputs_expected_results)
# add_inputs correct via named and paranoid add_inputs wrapping
results = parse_task_arguments (orig_unnamed_arguments,
{"add_inputs" : add_inputs("a.test", "b.test")}, expected_arguments, task_description)
self.assertEqual(results, add_inputs_expected_results)
# add_inputs correct via unnamed
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(6, add_inputs("a.test", "b.test"))
results = parse_task_arguments (unnamed_arguments, {},
expected_arguments, task_description)
self.assertEqual(results, add_inputs_expected_results)
# replace_inputs correct via named
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : ("a.test", "b.test")}, expected_arguments, task_description)
self.assertEqual(results, replace_inputs_expected_results)
# replace_inputs correct via named and paranoid inputs() wrapping
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : inputs(("a.test", "b.test"))}, expected_arguments, task_description)
self.assertEqual(results, replace_inputs_expected_results)
# replace_inputs correct via unnamed
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(6, inputs(("a.test", "b.test")))
results = parse_task_arguments (unnamed_arguments, {},
expected_arguments, task_description)
self.assertEqual(results, replace_inputs_expected_results)
# Error: both add_inputs and replace_inputs via named
with self.assertRaises(error_too_many_args):
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : ("a.test", "b.test"),
"add_inputs" : ("a.test", "b.test")}, expected_arguments, task_description)
# Error: both add_inputs and replace_inputs via named / unnamed
with self.assertRaises(error_too_many_args):
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(6, inputs(("a.test", "b.test")))
results = parse_task_arguments (unnamed_arguments, {"add_inputs" : ("a.test", "b.test")},
expected_arguments, task_description)
# Error: both add_inputs and replace_inputs via named / unnamed
with self.assertRaises(error_too_many_args):
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(6, add_inputs("a.test", "b.test"))
results = parse_task_arguments (unnamed_arguments, {"replace_inputs" : ("a.test", "b.test")},
expected_arguments, task_description)
# Error: wrong number of arguments
with self.assertRaises(error_inputs_multiple_args):
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : inputs("a.test", "b.test")}, expected_arguments, task_description)
# Error: no arguments
with self.assertRaises(error_inputs_multiple_args):
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : inputs()}, expected_arguments, task_description)
#________________________________________________________________________________________________________
# Test_parse_combinatorics_args
#________________________________________________________________________________________________________
class Test_parse_combinatorics_args (unittest.TestCase):
"""
Make sure (input, filter, tuple_size, output, extras...) works
for @combinations
"""
def test_parse_combinations_args(self):
expected_arguments = ["input", "filter", "tuple_size", "modify_inputs", "output", "extras"]
empty_unnamed_arguments = []
empty_named_arguments = {}
orig_unnamed_arguments = ["*.txt", suffix(".txt"), 5, ".result", 1,2,3,4]
task_description = "@combinations(%s)\ndef myfunc(...)\n"
expected_results = { 'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'tuple_size': orig_unnamed_arguments[2],
'output' : orig_unnamed_arguments[3],
'extras' : orig_unnamed_arguments[4:],
'modify_inputs_mode': 2,
'modify_inputs': None}
add_inputs_expected_results = { 'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'tuple_size': orig_unnamed_arguments[2],
'output' : orig_unnamed_arguments[3],
'extras' : orig_unnamed_arguments[4:],
'modify_inputs_mode': 0,
'modify_inputs': ("a.test", "b.test")}
replace_inputs_expected_results = { 'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'tuple_size': orig_unnamed_arguments[2],
'output' : orig_unnamed_arguments[3],
'extras' : orig_unnamed_arguments[4:],
'modify_inputs_mode': 1,
'modify_inputs': ("a.test", "b.test")}
# Error: empty list
with self.assertRaises(error_missing_args):
parse_task_arguments (empty_named_arguments, empty_named_arguments, expected_arguments, task_description)
# parse complete correctly
results = parse_task_arguments (orig_unnamed_arguments,
{}, expected_arguments, task_description)
self.assertEqual(results, expected_results)
# Error tuple_size not int
unnamed_arguments = orig_unnamed_arguments[:]
unnamed_arguments[2] = 'a'
with self.assertRaises(TypeError):
results = parse_task_arguments (unnamed_arguments,
{}, expected_arguments, task_description)
# Error: missing argument
unnamed_arguments = orig_unnamed_arguments[0:2]
with self.assertRaises(error_missing_args):
results = parse_task_arguments (unnamed_arguments,
{}, expected_arguments, task_description)
# parse almost complete and rescued with named parameter
unnamed_arguments = orig_unnamed_arguments[0:2]
results = parse_task_arguments (unnamed_arguments,
{
'tuple_size': orig_unnamed_arguments[2],
'output' : orig_unnamed_arguments[3],
'extras' : orig_unnamed_arguments[4:]},
expected_arguments, task_description)
self.assertEqual(results, expected_results)
# All named parameters
unnamed_arguments = orig_unnamed_arguments[0:2]
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'tuple_size': orig_unnamed_arguments[2],
'output' : orig_unnamed_arguments[3],
'extras' : orig_unnamed_arguments[4:]
},
expected_arguments, task_description)
self.assertEqual(results, expected_results)
# Error tuple_size not int
unnamed_arguments = orig_unnamed_arguments[0:2]
with self.assertRaises(TypeError):
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'tuple_size': "a",
'output' : orig_unnamed_arguments[3],
'extras' : orig_unnamed_arguments[4:]
},
expected_arguments, task_description)
# Error: Unknown named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'tuple_size': orig_unnamed_arguments[2],
'output' : orig_unnamed_arguments[3],
'what' : orig_unnamed_arguments[4:]
},
expected_arguments, task_description)
# Error: Duplicate named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments (orig_unnamed_arguments,
{'input' : orig_unnamed_arguments[0],
'extras' : orig_unnamed_arguments[3:]
},
expected_arguments, task_description)
# add_inputs correct via named
results = parse_task_arguments (orig_unnamed_arguments,
{"add_inputs" : ("a.test", "b.test")}, expected_arguments, task_description)
self.assertEqual(results, add_inputs_expected_results)
# add_inputs correct via named and paranoid add_inputs wrapping
results = parse_task_arguments (orig_unnamed_arguments,
{"add_inputs" : add_inputs("a.test", "b.test")}, expected_arguments, task_description)
self.assertEqual(results, add_inputs_expected_results)
# add_inputs correct via unnamed
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(3, add_inputs("a.test", "b.test"))
results = parse_task_arguments (unnamed_arguments, {},
expected_arguments, task_description)
self.assertEqual(results, add_inputs_expected_results)
# replace_inputs correct via named
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : ("a.test", "b.test")}, expected_arguments, task_description)
self.assertEqual(results, replace_inputs_expected_results)
# replace_inputs correct via named and paranoid inputs() wrapping
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : inputs(("a.test", "b.test"))}, expected_arguments, task_description)
self.assertEqual(results, replace_inputs_expected_results)
# replace_inputs correct via unnamed
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(3, inputs(("a.test", "b.test")))
results = parse_task_arguments (unnamed_arguments, {},
expected_arguments, task_description)
self.assertEqual(results, replace_inputs_expected_results)
# Error: both add_inputs and replace_inputs via named
with self.assertRaises(error_too_many_args):
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : ("a.test", "b.test"),
"add_inputs" : ("a.test", "b.test")}, expected_arguments, task_description)
# Error: both add_inputs and replace_inputs via named / unnamed
with self.assertRaises(error_too_many_args):
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(3, inputs(("a.test", "b.test")))
results = parse_task_arguments (unnamed_arguments, {"add_inputs" : ("a.test", "b.test")},
expected_arguments, task_description)
# Error: both add_inputs and replace_inputs via named / unnamed
with self.assertRaises(error_too_many_args):
unnamed_arguments = list(orig_unnamed_arguments)
unnamed_arguments.insert(3, add_inputs("a.test", "b.test"))
results = parse_task_arguments (unnamed_arguments, {"replace_inputs" : ("a.test", "b.test")},
expected_arguments, task_description)
# Error: wrong number of arguments
with self.assertRaises(error_inputs_multiple_args):
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : inputs("a.test", "b.test")}, expected_arguments, task_description)
# Error: no arguments
with self.assertRaises(error_inputs_multiple_args):
results = parse_task_arguments (orig_unnamed_arguments,
{"replace_inputs" : inputs()}, expected_arguments, task_description)
#________________________________________________________________________________________________________
# Test_parse_originate_args
#________________________________________________________________________________________________________
class Test_parse_originate_args (unittest.TestCase):
"""
Make sure @originate(output, extras...) works
"""
def test_parse_originate_args(self):
expected_arguments = ["output", "extras"]
empty_unnamed_arguments = []
empty_named_arguments = {}
orig_unnamed_arguments = [["a.1","b.1"], 1,2,3,4]
task_description = "@originate(%s)\ndef myfunc(...)\n"
expected_results = { 'output' : orig_unnamed_arguments[0],
'extras' : orig_unnamed_arguments[1:]}
# Error: empty list
with self.assertRaises(error_missing_args):
parse_task_arguments (empty_named_arguments, empty_named_arguments, expected_arguments, task_description)
# parse complete correctly
results = parse_task_arguments (orig_unnamed_arguments,
{}, expected_arguments, task_description)
self.assertEqual(results, expected_results)
# All named parameters
unnamed_arguments = orig_unnamed_arguments[0:2]
results = parse_task_arguments ([],
{'output' : orig_unnamed_arguments[0],
'extras' : orig_unnamed_arguments[1:]
},
expected_arguments, task_description)
self.assertEqual(results, expected_results)
# Error: Unknown named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments ([],
{'output' : orig_unnamed_arguments[0],
'what' : orig_unnamed_arguments[1:]
},
expected_arguments, task_description)
# Error: Duplicate named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments (orig_unnamed_arguments,
{'output' : orig_unnamed_arguments[0],
},
expected_arguments, task_description)
#_________________________________________________________________________________________
# Test_parse_mkdir_args
#_________________________________________________________________________________________
class Test_parse_mkdir_args (unittest.TestCase):
def test_parse_mkdir_args(self):
expected_arguments = ["input", "filter", "output"]
empty_unnamed_arguments = []
empty_named_arguments = {}
orig_unnamed_arguments = ["*.txt", suffix(".txt"), ".result"]
task_description = "@mkdir(%s)\ndef myfunc(...)\n"
expected_results = { 'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2]}
add_inputs_expected_results = { 'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2]}
replace_inputs_expected_results = { 'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2]}
# Error: empty list
with self.assertRaises(error_missing_args):
parse_task_arguments (empty_named_arguments, empty_named_arguments, expected_arguments, task_description)
# parse complete correctly
results = parse_task_arguments (orig_unnamed_arguments,
{}, expected_arguments, task_description)
self.assertEqual(results, expected_results)
# Error: missing argument
unnamed_arguments = orig_unnamed_arguments[0:1]
with self.assertRaises(error_missing_args):
results = parse_task_arguments (unnamed_arguments,
{}, expected_arguments, task_description)
# parse almost complete and rescued with named parameter
unnamed_arguments = orig_unnamed_arguments[0:2]
results = parse_task_arguments (unnamed_arguments,
{'output' : orig_unnamed_arguments[2]},
expected_arguments, task_description)
self.assertEqual(results, expected_results)
# All named parameters
unnamed_arguments = orig_unnamed_arguments[0:2]
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2],
},
expected_arguments, task_description)
self.assertEqual(results, expected_results)
# Error: Unknown named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments ([],
{'input' : orig_unnamed_arguments[0],
'filter' : orig_unnamed_arguments[1],
'output' : orig_unnamed_arguments[2],
'what' : orig_unnamed_arguments[3:]
},
expected_arguments, task_description)
# Error: Duplicate named arguments
with self.assertRaises(error_too_many_args):
results = parse_task_arguments (orig_unnamed_arguments,
{'input' : orig_unnamed_arguments[0],
},
expected_arguments, task_description)
# Error: no extras arguments allowed
with self.assertRaises(error_too_many_args):
results = parse_task_arguments (orig_unnamed_arguments + [1,formatter(),'a',4],
{'input' : orig_unnamed_arguments[0],
},
expected_arguments, task_description)
#
# Necessary to protect the "entry point" of the program under windows.
# see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming
#
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python (SPOTPY).
:author: Philipp Kraft
A collection of helper functions to describe spotpy setups
Usage:
>>> spotpy.describe.sampler(sampler)
>>> spotpy.describe.setup(model)
"""
from __future__ import division, absolute_import, unicode_literals
import sys
from .parameter import get_parameters_from_setup
from .algorithms._algorithm import _algorithm
if sys.version_info[0] >= 3:
from inspect import getdoc as _getdoc
unicode = str
else:
def _getdoc(obj):
u = obj.__doc__
try:
return u'\n'.join(l.strip() for l in u.split(u'\n') if l.strip())
except UnicodeDecodeError:
raise AssertionError(
'{}: Docstring uses unicode but {} misses the line ``from __future__ import unicode_literals``'
.format(obj, type(obj).__module__)
)
try:
from docutils.core import publish_string
except ImportError:
publish_string = None
def describe(obj):
"""
Returns a long string description of a sampler with its model
:param obj: A sampler
:return: str
"""
return 'Sampler:\n--------\n{}\n\nModel:\n------\n{}'.format(sampler(obj), setup(obj.setup))
def sampler(obj):
"""
Returns a string representation of the sampler.
By design, it is rather verbose and returns a
large multiline description
:return:
"""
cname = unicode(type(obj).__name__)
s = [cname, '=' * len(cname), _getdoc(obj),
' db format: ' + obj.dbformat,
' db name: ' + obj.dbname,
' save simulation: ' + str(obj.save_sim),
' parallel: ' + type(obj.repeat).__module__.split('.')[-1]]
return '\n'.join(s)
def setup(obj):
"""
Describes a spotpy setup using its class name, docstring and parameters
:param obj: A spotpy compatible model setup
:return: A describing string
"""
# Get class name
cname = unicode(type(obj).__name__)
# Add doc string
mdoc = _getdoc(obj).strip('\n').replace('\r', '\n')
# Get parameters from class
params = '\n'.join(' - {p}'.format(p=unicode(p)) for p in get_parameters_from_setup(obj))
parts = [cname, '=' * len(cname), mdoc, 'Parameters:', '-' * 11, params]
return '\n'.join(parts)
if sys.version_info > (3, 5):
from pathlib import Path
import webbrowser
class rst:
"""
Creates a reStructuredText description of a sampler or a setup
Usage:
>>>description = spotpy.describe.rst(sampler)
>>>print(description) # Prints the rst source text
>>># Add additional text section
>>>description.append('#. One idea' + '\n' + '#. Another one.', title='Ideas', titlelevel=2)
>>>description.append_image('media/image.png')
>>>print(description.as_html()) # Create html
>>>description.show_in_browser()
"""
caption_characters = '=-#~*+^'
def __init__(self, setup_or_sampler):
"""
Creates a reStructuredText description of a sampler or a setup
:param setup_or_sampler: Either a spotpy.algorithm sampler or a spotpy setup
"""
if isinstance(setup_or_sampler, _algorithm):
self.setup = setup_or_sampler.setup
self.sampler = setup_or_sampler
self.rst_text = [self._sampler_text()]
else:
self.setup = setup_or_sampler
self.sampler = None
self.rst_text = []
if self.setup:
self.rst_text.append(self._setup_text())
def append(self, text='', title=None, titlelevel=1):
"""
Appends additional descriptions in reStructured text to the generated text
:param text: The rst text to add
:param title: A title for the text
:param titlelevel: The level of the section (0->h1.title, 1->h1, 2->h2, etc.)
:return:
"""
res = '\n'
if title:
res += rst._as_rst_caption(title, titlelevel)
self.rst_text.append(res + text)
def append_image(self, imgpath, **kwargs):
"""
Links an image to the output
:param imgpath: Path to the image (must be found from the http server)
:param kwargs: Any keyword with value is translated in rst as `:keyword: value`
and added to the image description
>>>description.append_image('https://img.shields.io/travis/thouska/spotpy/master.svg',
... target='https://github.com/thouska',
... width='200px')
"""
rst = '.. image:: {}'.format(imgpath)
for k, v in kwargs.items():
rst += '\n :{}: {}'.format(k, v)
rst += '\n'
self.append(rst)
def append_math(self, latex):
"""
Appends a block equation to the output
:param latex: Latex formula
"""
rst = '.. math::\n'
rst += ' ' + latex + '\n'
self.append(rst)
def __str__(self):
return '\n'.join(self.rst_text)
@classmethod
def _as_rst_caption(cls, s, level=1):
"""
Marks text as a section caption
:param s: String to be marked as caption
:param level: Caption level 0-6, translates to 0=h1.title, 1=h1, 2=h2, etc.
:return: The string as rst caption
"""
return s + '\n' + cls.caption_characters[level] * len(s) + '\n\n'
css = """
body, table, div, p, dl {
font-family: Lucida Grande, Verdana, Geneva, Arial, sans-serif;
font-size: 16px;
}
li>p {
margin: 0px;
}
/* @group Heading Levels */
h1.title {
background-color: #fff;
color: #0040A0;
text-align: left;
font-size: 200%;
border: solid 2px #1f6992;
}
h1 {
background-color: #1f6992;
color: #fff;
padding: .2em .5em;
font-size: 150%;
}
h2 {
background-color: #cde;
color: #000;
padding: .2em .5em;
border-bottom: solid 2px #1f6992;
font-size: 120%;
}
h3 {
font-size: 100%;
border-bottom: solid 2px #0040A0;
}
div.line {
font-family: "Lucida Console", "Lucida Sans Typewriter","DejaVu Sans Mono",monospace;
font-size: 100%;
}
img {
max-width: 720px;
}
"""
def as_html(self, css=None):
"""
Converts the generated reStructuredText as html5
:css: A string containing a cascading style sheet. If None, the default css is used
:return: The html document as string
"""
if publish_string is None:
raise NotImplementedError('The docutils package needs to be installed')
args = {'input_encoding': 'unicode',
'output_encoding': 'unicode'}
res = publish_string(source=str(self),
writer_name='html5',
settings_overrides=args)
style_idx = res.index('</style>')
css = css or self.css
# Include css
res = res[:style_idx] + css + res[style_idx:]
return res
def show_in_browser(self, filename=None, css=None):
"""
Writes the content as html to disk and opens a browser showing the result
:param filename: The html filename, if None use <setup class name>.html
:param css: A style string, if None the default style is used
"""
html = self.as_html(css).replace('unicode', 'utf-8')
fn = filename or type(self.setup).__name__ + '.html'
path = Path(fn).absolute()
path.write_text(html, encoding='utf-8')
webbrowser.open_new_tab(path.as_uri())
def _sampler_text(self):
"""
Generates the rst for the sampler
:return:
"""
obj = self.sampler
cname = rst._as_rst_caption(type(obj).__name__, 0)
s = [
'- **db format:** ' + obj.dbformat,
'- **db name:** ' + obj.dbname,
'- **save simulation:** ' + str(obj.save_sim),
'- **parallel:** ' + type(obj.repeat).__module__.split('.')[-1],
'', ''
]
return cname + _getdoc(obj).strip('\n') + '\n\n' + '\n'.join(s)
def _setup_text(self):
"""
Generates the rst for the setup
:return:
"""
# Get class name
obj = self.setup
cname = rst._as_rst_caption(type(obj).__name__, 0)
# Add doc string
mdoc = _getdoc(obj).strip('\n').replace('\r', '\n') + '\n\n'
# Get parameters from class
param_caption = rst._as_rst_caption('Parameters', 1)
params = '\n'.join('#. **{p.name}:** {p}'.format(p=p) for p in get_parameters_from_setup(obj))
return cname + mdoc + param_caption + params
| |
from __future__ import unicode_literals
from django.utils.six.moves.urllib.parse import parse_qs, urlsplit
from reviewboard.hostingsvcs.utils.paginator import (APIPaginator,
InvalidPageError,
ProxyPaginator)
from reviewboard.testing import TestCase
class DummyAPIPaginator(APIPaginator):
start_query_param = 'start'
per_page_query_param = 'per-page'
def fetch_url(self, url):
return {
'data': [1, 2, 3],
'headers': {},
}
class APIPaginatorTests(TestCase):
"""Tests for APIPaginator."""
def test_construct_initial_load(self):
"""Testing APIPaginator construction performs initial load"""
paginator = DummyAPIPaginator(None, 'http://example.com', start=10)
self.assertEqual(paginator.page_data, [1, 2, 3])
def test_construct_with_start(self):
"""Testing APIPaginator construction with start=<value>"""
url = 'http://example.com/api/list/?foo=1'
paginator = DummyAPIPaginator(None, url, start=10)
parts = urlsplit(paginator.url)
query_params = parse_qs(parts[3])
self.assertEqual(query_params['foo'], ['1'])
self.assertEqual(query_params['start'], ['10'])
def test_construct_with_per_page(self):
"""Testing APIPaginator construction with per_page=<value>"""
url = 'http://example.com/api/list/?foo=1'
paginator = DummyAPIPaginator(None, url, per_page=10)
parts = urlsplit(paginator.url)
query_params = parse_qs(parts[3])
self.assertEqual(query_params['foo'], ['1'])
self.assertEqual(query_params['per-page'], ['10'])
def test_extract_page_info(self):
"""Testing APIPaginator page information extraction"""
class PageInfoAPIPaginator(APIPaginator):
def fetch_url(self, url):
return {
'data': ['a', 'b', 'c'],
'headers': {
'Foo': 'Bar',
},
'per_page': 10,
'total_count': 100,
'prev_url': 'http://example.com/?page=1',
'next_url': 'http://example.com/?page=3',
}
paginator = PageInfoAPIPaginator(None, 'http://example.com/')
self.assertEqual(paginator.page_data, ['a', 'b', 'c'])
self.assertEqual(paginator.page_headers['Foo'], 'Bar')
self.assertEqual(paginator.per_page, 10)
self.assertEqual(paginator.total_count, 100)
self.assertEqual(paginator.prev_url, 'http://example.com/?page=1')
self.assertEqual(paginator.next_url, 'http://example.com/?page=3')
def test_prev(self):
"""Testing APIPaginator.prev"""
prev_url = 'http://example.com/?page=1'
paginator = DummyAPIPaginator(None, 'http://example.com')
paginator.prev_url = prev_url
self.assertTrue(paginator.has_prev)
self.assertFalse(paginator.has_next)
data = paginator.prev()
self.assertEqual(data, [1, 2, 3])
self.assertEqual(paginator.url, prev_url)
def test_prev_without_prev_page(self):
"""Testing APIPaginator.prev without a previous page"""
paginator = DummyAPIPaginator(None, 'http://example.com')
url = paginator.url
self.assertFalse(paginator.has_prev)
self.assertRaises(InvalidPageError, paginator.prev)
self.assertEqual(paginator.url, url)
def test_next(self):
"""Testing APIPaginator.next"""
next_url = 'http://example.com/?page=3'
paginator = DummyAPIPaginator(None, 'http://example.com')
paginator.next_url = next_url
self.assertFalse(paginator.has_prev)
self.assertTrue(paginator.has_next)
data = paginator.next()
self.assertEqual(data, [1, 2, 3])
self.assertEqual(paginator.url, next_url)
def test_next_without_next_page(self):
"""Testing APIPaginator.next without a next page"""
paginator = DummyAPIPaginator(None, 'http://example.com')
url = paginator.url
self.assertFalse(paginator.has_next)
self.assertRaises(InvalidPageError, paginator.next)
self.assertEqual(paginator.url, url)
class ProxyPaginatorTests(TestCase):
"""Tests for ProxyPaginator."""
def setUp(self):
self.paginator = DummyAPIPaginator(None, 'http://example.com')
self.proxy = ProxyPaginator(self.paginator)
def test_has_prev(self):
"""Testing ProxyPaginator.has_prev"""
self.assertFalse(self.proxy.has_prev)
self.paginator.prev_url = 'http://example.com/?start=1'
self.assertTrue(self.proxy.has_prev)
def test_has_next(self):
"""Testing ProxyPaginator.has_next"""
self.assertFalse(self.proxy.has_next)
self.paginator.next_url = 'http://example.com/?start=2'
self.assertTrue(self.proxy.has_next)
def test_per_page(self):
"""Testing ProxyPaginator.per_page"""
self.paginator.per_page = 10
self.assertEqual(self.proxy.per_page, 10)
def test_total_count(self):
"""Testing ProxyPaginator.total_count"""
self.paginator.total_count = 100
self.assertEqual(self.proxy.total_count, 100)
def test_prev(self):
"""Testing ProxyPaginator.prev"""
prev_url = 'http://example.com/?page=1'
self.paginator.prev_url = prev_url
self.assertTrue(self.proxy.has_prev)
self.assertFalse(self.proxy.has_next)
data = self.proxy.prev()
self.assertEqual(data, [1, 2, 3])
self.assertEqual(self.paginator.url, prev_url)
def test_next(self):
"""Testing ProxyPaginator.next"""
next_url = 'http://example.com/?page=3'
self.paginator.next_url = next_url
self.assertFalse(self.proxy.has_prev)
self.assertTrue(self.proxy.has_next)
data = self.proxy.next()
self.assertEqual(data, [1, 2, 3])
self.assertEqual(self.paginator.url, next_url)
def test_normalize_page_data(self):
"""Testing ProxyPaginator.normalize_page_data"""
proxy = ProxyPaginator(
self.paginator,
normalize_page_data_func=lambda data: list(reversed(data)))
self.assertEqual(proxy.page_data, [3, 2, 1])
def test_normalize_page_data_on_prev(self):
"""Testing ProxyPaginator.normalize_page_data on prev"""
proxy = ProxyPaginator(
self.paginator,
normalize_page_data_func=lambda data: list(reversed(data)))
self.paginator.prev_url = 'http://example.com/?page=1'
data = proxy.prev()
self.assertEqual(data, [3, 2, 1])
def test_normalize_page_data_on_next(self):
"""Testing ProxyPaginator.normalize_page_data on next"""
proxy = ProxyPaginator(
self.paginator,
normalize_page_data_func=lambda data: list(reversed(data)))
self.paginator.next_url = 'http://example.com/?page=3'
data = proxy.next()
self.assertEqual(data, [3, 2, 1])
| |
"""
by Liam A.
used: http://www.december.com/html/spec/color,
http://orig14.deviantart.net/7b77/f/2013/203/5/5/cartoon_boy_by_navdbest-d6ekjw9.png
http://cartoon-birds.clipartonline.net/_/rsrc/1472868952735/blue-birds-cartoon-bird-images/blue_bird_clipart_image_9.png?height=320&width=320
"""
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, ImageAsset, TextAsset, Sound, SoundAsset
SCREEN_WIDTH = 1850
SCREEN_HEIGHT = 1000
# Colors
Lgreen = Color (0x7CFC00, 0.95)
turqo = Color (0x40E0D0, 0.97)
orange = Color (0xFF8600, 1)
black = Color (0x000000, 0.85)
purp = Color (0x68228B, 0.7)
brn = Color (0x5C3317, 0.9)
pale = Color (0xFFFACD, 0.8)
white = Color (0xFFFFFF, 0)
thinline = LineStyle(1, black)
noline = LineStyle(0, white)
#Lists & variables
clkun=[]
clkdx=[]
stage=0
color=0
h2 = (SCREEN_HEIGHT)/2
wth2 = (SCREEN_WIDTH)/2
#Assets
dotg = CircleAsset(3, noline, Lgreen)
dotq = CircleAsset(3, noline, turqo)
doto = CircleAsset(3, noline, orange)
dotb = CircleAsset(4, noline, black)
dotp = CircleAsset(3, noline, purp)
dotr = CircleAsset(2, noline, brn)
dotl = CircleAsset(3, noline, pale)
box = RectangleAsset(8, 1000, thinline, black)
label = TextAsset("Icons")
hide = TextAsset("Press return to hide this message.", width=500, style="30px Arial")
other = TextAsset("Press return again once you're done to advance to the next stage.", width=600)
#overall class
class Icon(Sprite):
def __init__(self,asset,position,prop):
self.b=0
self.c=0
chk = 0 #preparing to check a condition
self.ct = 1 #nothing has been clicked on
super().__init__(asset, position)
self.center=(0.5,0.5)
if prop==True:
Draw.listenMouseEvent("mousedown", self.ym_dn)
if prop==False:
go = Sound(self.noise)
go.play()
def ym_dn(self,event):
global stage
lgtha = len(clkun)
if stage == 1:
if (self.ct)%2 == 1:
#calculating whether the mouse is close to an icon:
self.diffx = self.x-event.x
self.diffy = self.y-event.y
self.diffx = abs(self.diffx)
self.diffy = abs(self.diffy)
if self.diffx <= 40:
self.b=2
else:
self.b=0
if self.diffy <= 40:
self.c=2
else:
self.c=0
if self.c==2 and self.b==2:
clkun.append((event.x,event.y)) #add coord. of where clicked...
clkun.append(type(self)) #and what icon was clicked, to list 'clkun'
else:
chk = clkun[lgtha-1]
if chk == type(self):
clkdx.append((event.x,event.y)) #add coord. of where clicked...
lgthb = len(clkdx)
clkun[lgtha-1](clkdx[lgthb-1], False) #place the selected icon: @ lgth+2, @ clicked location: lgth+1
self.ct += 1
#subclasses
class Flowr(Icon):
asset = ImageAsset("images/pinkflowr.png")
noise = SoundAsset("sounds/Flr.mp3")
def __init__(self,position,prop):
super().__init__(Flowr.asset, position,prop)
self.scale = 0.2
class Tree(Icon):
asset = ImageAsset("images/tree.png")
noise = SoundAsset("sounds/Tree.mp3")
def __init__(self,position,prop):
super().__init__(Tree.asset, position,prop)
self.scale = 0.5
class Cat(Icon):
asset = ImageAsset("images/cute-cartoon-cat-cute-light-brown-cartoon-cat-with-a-black-nose-and-7VM6VK-clipart.png")
noise = SoundAsset("sounds/Cat.mp3")
def __init__(self,position,prop):
super().__init__(Cat.asset, position,prop)
self.scale = 0.2
class Bunny(Icon):
asset = ImageAsset("images/bunny.png")
noise = SoundAsset("sounds/Bunny.mp3")
def __init__(self,position,prop):
super().__init__(Bunny.asset, position,prop)
self.scale = 0.8
class Bird(Icon):
asset = ImageAsset("images/blue_bird.png")
noise = SoundAsset("sounds/Birdie.mp3")
def __init__(self,position,prop):
super().__init__(Bird.asset, position,prop)
self.scale = 0.18
class kid(Icon):
asset = ImageAsset("images/cartoon_boy.png")
noise = SoundAsset("sounds/boi.mp3")
def __init__(self,position,prop):
super().__init__(kid.asset, position,prop)
self.scale = 0.06
class Draw(App):
def __init__(self, width, height):
global stage
super().__init__(width, height)
self.a=0
print("Welcome! Click and drag the icons to duplicate them.")
abun = Bunny((65, 500), True)
acat = Cat((80, 350), True)
atree = Tree((75, 225), True)
aflr = Flowr((50, 105), True)
abird = Bird((65, 600), True)
aboi = kid((55, 710), True)
Sprite(box, (132, 25))
Sprite(label, (50, 30))
start1 = TextAsset("Click on an icon to select it.", width=500)
start2 = TextAsset("Click somewhere else to place a copy of that icon there.", width=500)
self.txt3 = Sprite(hide, (wth2,h2+40))
self.txt4 = Sprite(start1, (wth2,h2))
self.txt5 = Sprite(start2, (wth2,(h2+20)))
self.txt9 = Sprite(other, (wth2, (h2+75)))
#self.txt3b = Sprite(hide, (wth2,(h2+40)))
Draw.listenKeyEvent("keydown", "enter", self.switch)
Draw.listenKeyEvent("keydown", "g", self.green)
Draw.listenKeyEvent("keydown", "q", self.turq)
Draw.listenKeyEvent("keydown", "o", self.orange)
Draw.listenKeyEvent("keydown", "b", self.black)
Draw.listenKeyEvent("keydown", "p", self.purp)
Draw.listenKeyEvent("keydown", "r", self.brn)
Draw.listenKeyEvent("keydown", "l", self.pale)
Draw.listenMouseEvent("mousedown", self.mse_isdn)
Draw.listenMouseEvent("mouseup", self.mseno)
Draw.listenMouseEvent("mousemove", self.move)
Draw.listenKeyEvent("keyup", "g", self.no_col)
Draw.listenKeyEvent("keyup", "q", self.no_col)
Draw.listenKeyEvent("keyup", "o", self.no_col)
Draw.listenKeyEvent("keyup", "b", self.no_col)
Draw.listenKeyEvent("keyup", "p", self.no_col)
Draw.listenKeyEvent("keyup", "r", self.no_col)
Draw.listenKeyEvent("keyup", "l", self.no_col)
def switch(self,event):
global stage
stage += 1
#print("news! ", stage) an indicator
if stage == 1:
self.txt4.visible = False
self.txt5.visible = False
self.txt3.visible = False
self.txt9.visible = False
if stage == 2:
print("You are done dragging and dropping!")
middle1 = TextAsset("Now you can draw on the screen by dragging the", width=500)
middle2 = TextAsset("mouse across the screen while pressing down both the mouse and", width=700)
middle3 = TextAsset("one of the following keys: 'q', 'r', 'o', 'p', 'g', 'l', or 'b' .", width=500)
self.txt6 = Sprite(middle1, (wth2,h2))
self.txt7 = Sprite(middle2, (wth2,(h2+20)))
self.txt8 = Sprite(middle3, (wth2,(h2+40)))
self.txt9a = Sprite(other, (wth2, (h2+95)))
self.txt3a = Sprite(hide, (wth2,(h2+60)))
if stage ==3:
print("Now try dragging the mouse across the screen while holding one of the following keys: 'b', 'r', 'p', 'l', 'g', 'o', or 'q'.")
self.txt6.visible = False
self.txt7.visible = False
self.txt8.visible = False
self.txt3a.visible = False
self.txt9a.visible = False
if stage == 4:
end1 = TextAsset("You have finished this program!", width=500)
end2 = TextAsset("If you ctrl+click, you can save or copy your image.", width=500)
self.txt1 = Sprite(end1, (wth2,h2))
self.txt2 = Sprite(end2, (wth2,h2+20))
self.txt3.visible = True
if stage == 5:
self.txt1.visible = False
self.txt2.visible = False
self.txt3.visible = False
def mse_isdn(self,event):
self.a=1
def mseno(self,event):
self.a=0
def move(self,event):
self.msx = event.x
self.msy = event.y
#color events
def green(self,event):
global color
if stage == 3:
color = 1
def turq(self,event):
global color
if stage == 3:
color = 2
def orange(self,event):
global color
if stage == 3:
color = 3
def black(self,event):
global color
if stage == 3:
color = 4
def purp(self,event):
global color
if stage == 3:
color = 5
def brn(self,event):
global color
if stage == 3:
color = 6
def pale(self,event):
global color
if stage == 3:
color = 7
def no_col(self,event):
global color
if stage == 3:
color = 0
def step(self):
global color
if self.a == 1 and color != 0:
if color == 1:
Sprite(dotg, (self.msx,self.msy))
if color == 2:
Sprite(dotq, (self.msx,self.msy))
if color == 3:
Sprite(doto, (self.msx,self.msy))
if color == 4:
Sprite(dotb, (self.msx,self.msy))
if color == 5:
Sprite(dotp, (self.msx,self.msy))
if color == 6:
Sprite(dotr, (self.msx,self.msy))
if color == 7:
Sprite(dotl, (self.msx,self.msy))
my_draw = Draw(SCREEN_WIDTH, SCREEN_HEIGHT)
my_draw.run()
| |
import inspect
import os
from collections import OrderedDict
import numpy as np
from numpy import nan
import pandas as pd
import pytz
import pytest
from numpy.testing import assert_allclose
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pvlib.location import Location
from pvlib import clearsky
from pvlib import solarposition
from pvlib import atmosphere
from pvlib import irradiance
from conftest import requires_scipy, requires_tables
def test_ineichen_series():
times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h',
tz='America/Phoenix')
apparent_zenith = pd.Series(np.array(
[124.0390863, 113.38779941, 82.85457044, 46.0467599, 10.56413562,
34.86074109, 72.41687122, 105.69538659, 124.05614124]),
index=times)
am = pd.Series(np.array(
[nan, nan, 6.97935524, 1.32355476, 0.93527685,
1.12008114, 3.01614096, nan, nan]),
index=times)
expected = pd.DataFrame(np.
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 65.49426624, 321.16092181, 25.54562017],
[ 704.6968125 , 888.90147035, 87.73601277],
[1044.1230677 , 953.24925854, 107.03109696],
[ 853.02065704, 922.06124712, 96.42909484],
[ 251.99427693, 655.44925241, 53.9901349 ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
columns=['ghi', 'dni', 'dhi'],
index=times)
out = clearsky.ineichen(apparent_zenith, am, 3)
assert_frame_equal(expected, out)
def test_ineichen_series_perez_enhancement():
times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h',
tz='America/Phoenix')
apparent_zenith = pd.Series(np.array(
[124.0390863, 113.38779941, 82.85457044, 46.0467599, 10.56413562,
34.86074109, 72.41687122, 105.69538659, 124.05614124]),
index=times)
am = pd.Series(np.array(
[nan, nan, 6.97935524, 1.32355476, 0.93527685,
1.12008114, 3.01614096, nan, nan]),
index=times)
expected = pd.DataFrame(np.
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 91.1249279 , 321.16092171, 51.17628184],
[ 716.46580547, 888.9014706 , 99.50500553],
[1053.42066073, 953.24925905, 116.3286895 ],
[ 863.54692748, 922.06124652, 106.9553658 ],
[ 271.06382275, 655.44925213, 73.05968076],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
columns=['ghi', 'dni', 'dhi'],
index=times)
out = clearsky.ineichen(apparent_zenith, am, 3, perez_enhancement=True)
assert_frame_equal(expected, out)
def test_ineichen_scalar_input():
expected = OrderedDict()
expected['ghi'] = 1038.159219
expected['dni'] = 942.2081860378344
expected['dhi'] = 110.26529293612793
out = clearsky.ineichen(10., 1., 3.)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_ineichen_nans():
length = 4
apparent_zenith = np.full(length, 10.)
apparent_zenith[0] = np.nan
linke_turbidity = np.full(length, 3.)
linke_turbidity[1] = np.nan
dni_extra = np.full(length, 1370.)
dni_extra[2] = np.nan
airmass_absolute = np.full(length, 1.)
expected = OrderedDict()
expected['ghi'] = np.full(length, np.nan)
expected['dni'] = np.full(length, np.nan)
expected['dhi'] = np.full(length, np.nan)
expected['ghi'][length-1] = 1042.72590228
expected['dni'][length-1] = 946.35279683
expected['dhi'][length-1] = 110.75033088
out = clearsky.ineichen(apparent_zenith, airmass_absolute,
linke_turbidity, dni_extra=dni_extra)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_ineichen_arrays():
expected = OrderedDict()
expected['ghi'] = (np.
array([[[1095.77074798, 1054.17449885, 1014.15727338],
[ 839.40909243, 807.54451692, 776.88954373],
[ 190.27859353, 183.05548067, 176.10656239]],
[[ 773.49041181, 625.19479557, 505.33080493],
[ 592.52803177, 478.92699901, 387.10585505],
[ 134.31520045, 108.56393694, 87.74977339]],
[[ 545.9968869 , 370.78162375, 251.79449885],
[ 418.25788117, 284.03520249, 192.88577665],
[ 94.81136442, 64.38555328, 43.72365587]]]))
expected['dni'] = (np.
array([[[1014.38807396, 942.20818604, 861.11344424],
[1014.38807396, 942.20818604, 861.11344424],
[1014.38807396, 942.20818604, 861.11344424]],
[[ 687.61305142, 419.14891162, 255.50098235],
[ 687.61305142, 419.14891162, 255.50098235],
[ 687.61305142, 419.14891162, 255.50098235]],
[[ 458.62196014, 186.46177428, 75.80970012],
[ 458.62196014, 186.46177428, 75.80970012],
[ 458.62196014, 186.46177428, 75.80970012]]]))
expected['dhi'] = (np.
array([[[ 81.38267402, 111.96631281, 153.04382915],
[ 62.3427452 , 85.77117175, 117.23837487],
[ 14.13195304, 19.44274618, 26.57578203]],
[[ 85.87736039, 206.04588395, 249.82982258],
[ 65.78587472, 157.84030442, 191.38074731],
[ 14.91244713, 35.77949226, 43.38249342]],
[[ 87.37492676, 184.31984947, 175.98479873],
[ 66.93307711, 141.19719644, 134.81217714],
[ 15.17249681, 32.00680597, 30.5594396 ]]]))
apparent_zenith = np.linspace(0, 80, 3)
airmass_absolute = np.linspace(1, 10, 3)
linke_turbidity = np.linspace(2, 4, 3)
apparent_zenith, airmass_absolute, linke_turbidity = \
np.meshgrid(apparent_zenith, airmass_absolute, linke_turbidity)
out = clearsky.ineichen(apparent_zenith, airmass_absolute, linke_turbidity)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_ineichen_dni_extra():
expected = pd.DataFrame(
np.array([[1042.72590228, 946.35279683, 110.75033088]]),
columns=['ghi', 'dni', 'dhi'])
out = clearsky.ineichen(10, 1, 3, dni_extra=pd.Series(1370))
assert_frame_equal(expected, out)
def test_ineichen_altitude():
expected = pd.DataFrame(
np.array([[1134.24312405, 994.95377835, 154.40492924]]),
columns=['ghi', 'dni', 'dhi'])
out = clearsky.ineichen(10, 1, 3, altitude=pd.Series(2000))
assert_frame_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity():
times = pd.date_range(start='2014-06-24', end='2014-06-25',
freq='12h', tz='America/Phoenix')
# expect same value on 2014-06-24 0000 and 1200, and
# diff value on 2014-06-25
expected = pd.Series(
np.array([3.11803278689, 3.11803278689, 3.13114754098]), index=times
)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_leapyear():
times = pd.date_range(start='2016-06-24', end='2016-06-25',
freq='12h', tz='America/Phoenix')
# expect same value on 2016-06-24 0000 and 1200, and
# diff value on 2016-06-25
expected = pd.Series(
np.array([3.11803278689, 3.11803278689, 3.13114754098]), index=times
)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_nointerp():
times = pd.date_range(start='2014-06-24', end='2014-06-25',
freq='12h', tz='America/Phoenix')
# expect same value for all days
expected = pd.Series(np.array([3., 3., 3.]), index=times)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875,
interp_turbidity=False)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_months():
times = pd.date_range(start='2014-04-01', end='2014-07-01',
freq='1M', tz='America/Phoenix')
expected = pd.Series(
np.array([2.89918032787, 2.97540983607, 3.19672131148]), index=times
)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_months_leapyear():
times = pd.date_range(start='2016-04-01', end='2016-07-01',
freq='1M', tz='America/Phoenix')
expected = pd.Series(
np.array([2.89918032787, 2.97540983607, 3.19672131148]), index=times
)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_nointerp_months():
times = pd.date_range(start='2014-04-10', end='2014-07-10',
freq='1M', tz='America/Phoenix')
expected = pd.Series(np.array([2.85, 2.95, 3.]), index=times)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875,
interp_turbidity=False)
assert_series_equal(expected, out)
# changing the dates shouldn't matter if interp=False
times = pd.date_range(start='2014-04-05', end='2014-07-05',
freq='1M', tz='America/Phoenix')
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875,
interp_turbidity=False)
assert_series_equal(expected, out)
def test_haurwitz():
apparent_solar_elevation = np.array([-20, -0.05, -0.001, 5, 10, 30, 50, 90])
apparent_solar_zenith = 90 - apparent_solar_elevation
data_in = pd.DataFrame(data=apparent_solar_zenith,
index=apparent_solar_zenith,
columns=['apparent_zenith'])
expected = pd.DataFrame(np.array([0.,
0.,
0.,
48.6298687941956,
135.741748091813,
487.894132885425,
778.766689344363,
1035.09203253450]),
columns=['ghi'],
index=apparent_solar_zenith)
out = clearsky.haurwitz(data_in['apparent_zenith'])
assert_frame_equal(expected, out)
def test_simplified_solis_scalar_elevation():
expected = OrderedDict()
expected['ghi'] = 1064.653145
expected['dni'] = 959.335463
expected['dhi'] = 129.125602
out = clearsky.simplified_solis(80)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_scalar_neg_elevation():
expected = OrderedDict()
expected['ghi'] = 0
expected['dni'] = 0
expected['dhi'] = 0
out = clearsky.simplified_solis(-10)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_series_elevation():
expected = pd.DataFrame(
np.array([[959.335463, 1064.653145, 129.125602]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
out = clearsky.simplified_solis(pd.Series(80))
assert_frame_equal(expected, out)
def test_simplified_solis_dni_extra():
expected = pd.DataFrame(np.array([[963.555414, 1069.33637, 129.693603]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
out = clearsky.simplified_solis(80, dni_extra=pd.Series(1370))
assert_frame_equal(expected, out)
def test_simplified_solis_pressure():
expected = pd.DataFrame(np.
array([[ 964.26930718, 1067.96543669, 127.22841797],
[ 961.88811874, 1066.36847963, 128.1402539 ],
[ 959.58112234, 1064.81837558, 129.0304193 ]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
out = clearsky.simplified_solis(
80, pressure=pd.Series([95000, 98000, 101000]))
assert_frame_equal(expected, out)
def test_simplified_solis_aod700():
expected = pd.DataFrame(np.
array([[ 1056.61710493, 1105.7229086 , 64.41747323],
[ 1007.50558875, 1085.74139063, 102.96233698],
[ 959.3354628 , 1064.65314509, 129.12560167],
[ 342.45810926, 638.63409683, 77.71786575],
[ 55.24140911, 7.5413313 , 0. ]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
aod700 = pd.Series([0.0, 0.05, 0.1, 1, 10])
out = clearsky.simplified_solis(80, aod700=aod700)
assert_frame_equal(expected, out)
def test_simplified_solis_precipitable_water():
expected = pd.DataFrame(np.
array([[ 1001.15353307, 1107.84678941, 128.58887606],
[ 1001.15353307, 1107.84678941, 128.58887606],
[ 983.51027357, 1089.62306672, 129.08755996],
[ 959.3354628 , 1064.65314509, 129.12560167],
[ 872.02335029, 974.18046717, 125.63581346]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
out = clearsky.simplified_solis(
80, precipitable_water=pd.Series([0.0, 0.2, 0.5, 1.0, 5.0]))
assert_frame_equal(expected, out)
def test_simplified_solis_small_scalar_pw():
expected = OrderedDict()
expected['ghi'] = 1107.84678941
expected['dni'] = 1001.15353307
expected['dhi'] = 128.58887606
out = clearsky.simplified_solis(80, precipitable_water=0.1)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_return_arrays():
expected = OrderedDict()
expected['ghi'] = np.array([[ 1148.40081325, 913.42330823],
[ 965.48550828, 760.04527609]])
expected['dni'] = np.array([[ 1099.25706525, 656.24601381],
[ 915.31689149, 530.31697378]])
expected['dhi'] = np.array([[ 64.1063074 , 254.6186615 ],
[ 62.75642216, 232.21931597]])
aod700 = np.linspace(0, 0.5, 2)
precipitable_water = np.linspace(0, 10, 2)
aod700, precipitable_water = np.meshgrid(aod700, precipitable_water)
out = clearsky.simplified_solis(80, aod700, precipitable_water)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_nans_arrays():
# construct input arrays that each have 1 nan offset from each other,
# the last point is valid for all arrays
length = 6
apparent_elevation = np.full(length, 80.)
apparent_elevation[0] = np.nan
aod700 = np.full(length, 0.1)
aod700[1] = np.nan
precipitable_water = np.full(length, 0.5)
precipitable_water[2] = np.nan
pressure = np.full(length, 98000.)
pressure[3] = np.nan
dni_extra = np.full(length, 1370.)
dni_extra[4] = np.nan
expected = OrderedDict()
expected['ghi'] = np.full(length, np.nan)
expected['dni'] = np.full(length, np.nan)
expected['dhi'] = np.full(length, np.nan)
expected['ghi'][length-1] = 1096.022736
expected['dni'][length-1] = 990.306854
expected['dhi'][length-1] = 128.664594
out = clearsky.simplified_solis(apparent_elevation, aod700,
precipitable_water, pressure, dni_extra)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_nans_series():
# construct input arrays that each have 1 nan offset from each other,
# the last point is valid for all arrays
length = 6
apparent_elevation = pd.Series(np.full(length, 80.))
apparent_elevation[0] = np.nan
aod700 = np.full(length, 0.1)
aod700[1] = np.nan
precipitable_water = np.full(length, 0.5)
precipitable_water[2] = np.nan
pressure = np.full(length, 98000.)
pressure[3] = np.nan
dni_extra = np.full(length, 1370.)
dni_extra[4] = np.nan
expected = OrderedDict()
expected['ghi'] = np.full(length, np.nan)
expected['dni'] = np.full(length, np.nan)
expected['dhi'] = np.full(length, np.nan)
expected['ghi'][length-1] = 1096.022736
expected['dni'][length-1] = 990.306854
expected['dhi'][length-1] = 128.664594
expected = pd.DataFrame.from_dict(expected)
out = clearsky.simplified_solis(apparent_elevation, aod700,
precipitable_water, pressure, dni_extra)
assert_frame_equal(expected, out)
@requires_tables
def test_linke_turbidity_corners():
"""Test Linke turbidity corners out of bounds."""
months = pd.DatetimeIndex('%d/1/2016' % (m + 1) for m in range(12))
def monthly_lt_nointerp(lat, lon, time=months):
"""monthly Linke turbidity factor without time interpolation"""
return clearsky.lookup_linke_turbidity(
time, lat, lon, interp_turbidity=False
)
# Northwest
assert np.allclose(
monthly_lt_nointerp(90, -180),
[1.9, 1.9, 1.9, 2.0, 2.05, 2.05, 2.1, 2.1, 2.0, 1.95, 1.9, 1.9])
# Southwest
assert np.allclose(
monthly_lt_nointerp(-90, -180),
[1.35, 1.3, 1.45, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.4, 1.4, 1.3])
# Northeast
assert np.allclose(
monthly_lt_nointerp(90, 180),
[1.9, 1.9, 1.9, 2.0, 2.05, 2.05, 2.1, 2.1, 2.0, 1.95, 1.9, 1.9])
# Southeast
assert np.allclose(
monthly_lt_nointerp(-90, 180),
[1.35, 1.7, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.7])
# test out of range exceptions at corners
with pytest.raises(IndexError):
monthly_lt_nointerp(91, -122) # exceeds max latitude
with pytest.raises(IndexError):
monthly_lt_nointerp(38.2, 181) # exceeds max longitude
with pytest.raises(IndexError):
monthly_lt_nointerp(-91, -122) # exceeds min latitude
with pytest.raises(IndexError):
monthly_lt_nointerp(38.2, -181) # exceeds min longitude
def test_degrees_to_index_1():
"""Test that _degrees_to_index raises an error when something other than
'latitude' or 'longitude' is passed."""
with pytest.raises(IndexError): # invalid value for coordinate argument
clearsky._degrees_to_index(degrees=22.0, coordinate='width')
@pytest.fixture
def detect_clearsky_data():
test_dir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
file = os.path.join(test_dir, '..', 'data', 'detect_clearsky_data.csv')
expected = pd.read_csv(file, index_col=0, parse_dates=True, comment='#')
expected = expected.tz_localize('UTC').tz_convert('Etc/GMT+7')
metadata = {}
with open(file) as f:
for line in f:
if line.startswith('#'):
key, value = line.strip('# \n').split(':')
metadata[key] = float(value)
else:
break
metadata['window_length'] = int(metadata['window_length'])
loc = Location(metadata['latitude'], metadata['longitude'],
altitude=metadata['elevation'])
# specify turbidity to guard against future lookup changes
cs = loc.get_clearsky(expected.index, linke_turbidity=2.658197)
return expected, cs
@requires_scipy
def test_detect_clearsky(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi'], cs.index, 10)
assert_series_equal(expected['Clear or not'], clear_samples,
check_dtype=False, check_names=False)
@requires_scipy
def test_detect_clearsky_components(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples, components, alpha = clearsky.detect_clearsky(
expected['GHI'], cs['ghi'], cs.index, 10, return_components=True)
assert_series_equal(expected['Clear or not'], clear_samples,
check_dtype=False, check_names=False)
assert isinstance(components, OrderedDict)
assert np.allclose(alpha, 0.9633903181941296)
@requires_scipy
def test_detect_clearsky_iterations(detect_clearsky_data):
expected, cs = detect_clearsky_data
alpha = 1.0448
with pytest.warns(RuntimeWarning):
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi']*alpha, cs.index, 10, max_iterations=1)
assert (clear_samples[:'2012-04-01 10:41:00'] == True).all()
assert (clear_samples['2012-04-01 10:42:00':] == False).all()
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi']*alpha, cs.index, 10, max_iterations=20)
assert_series_equal(expected['Clear or not'], clear_samples,
check_dtype=False, check_names=False)
@requires_scipy
def test_detect_clearsky_kwargs(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi'], cs.index, 10,
mean_diff=1000, max_diff=1000, lower_line_length=-1000,
upper_line_length=1000, var_diff=10, slope_dev=1000)
assert clear_samples.all()
@requires_scipy
def test_detect_clearsky_window(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi'], cs.index, 3)
expected = expected['Clear or not'].copy()
expected.iloc[-3:] = True
assert_series_equal(expected, clear_samples,
check_dtype=False, check_names=False)
@requires_scipy
def test_detect_clearsky_arrays(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(
expected['GHI'].values, cs['ghi'].values, cs.index, 10)
assert isinstance(clear_samples, np.ndarray)
assert (clear_samples == expected['Clear or not'].values).all()
@requires_scipy
def test_detect_clearsky_irregular_times(detect_clearsky_data):
expected, cs = detect_clearsky_data
times = cs.index.values.copy()
times[0] += 10**9
times = pd.DatetimeIndex(times)
with pytest.raises(NotImplementedError):
clear_samples = clearsky.detect_clearsky(
expected['GHI'].values, cs['ghi'].values, times, 10)
def test_bird():
"""Test Bird/Hulstrom Clearsky Model"""
times = pd.date_range(start='1/1/2015 0:00', end='12/31/2015 23:00',
freq='H')
tz = -7 # test timezone
gmt_tz = pytz.timezone('Etc/GMT%+d' % -(tz))
times = times.tz_localize(gmt_tz) # set timezone
# match test data from BIRD_08_16_2012.xls
latitude = 40.
longitude = -105.
press_mB = 840.
o3_cm = 0.3
h2o_cm = 1.5
aod_500nm = 0.1
aod_380nm = 0.15
b_a = 0.85
alb = 0.2
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = solarposition.hour_angle(times, longitude, eot) - 0.5 * 15.
declination = solarposition.declination_spencer71(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(
np.deg2rad(latitude), np.deg2rad(hour_angle), declination
)
zenith = np.rad2deg(zenith)
airmass = atmosphere.get_relative_airmass(zenith, model='kasten1966')
etr = irradiance.get_extra_radiation(times)
# test Bird with time series data
field_names = ('dni', 'direct_horizontal', 'ghi', 'dhi')
irrads = clearsky.bird(
zenith, airmass, aod_380nm, aod_500nm, h2o_cm, o3_cm, press_mB * 100.,
etr, b_a, alb
)
Eb, Ebh, Gh, Dh = (irrads[_] for _ in field_names)
clearsky_path = os.path.dirname(os.path.abspath(__file__))
pvlib_path = os.path.dirname(clearsky_path)
data_path = os.path.join(pvlib_path, 'data', 'BIRD_08_16_2012.csv')
testdata = pd.read_csv(data_path, usecols=range(1, 26), header=1).dropna()
testdata.index = times[1:48]
assert np.allclose(testdata['DEC'], np.rad2deg(declination[1:48]))
assert np.allclose(testdata['EQT'], eot[1:48], rtol=1e-4)
assert np.allclose(testdata['Hour Angle'], hour_angle[1:48])
assert np.allclose(testdata['Zenith Ang'], zenith[1:48])
dawn = zenith < 88.
dusk = testdata['Zenith Ang'] < 88.
am = pd.Series(np.where(dawn, airmass, 0.), index=times).fillna(0.0)
assert np.allclose(
testdata['Air Mass'].where(dusk, 0.), am[1:48], rtol=1e-3
)
direct_beam = pd.Series(np.where(dawn, Eb, 0.), index=times).fillna(0.)
assert np.allclose(
testdata['Direct Beam'].where(dusk, 0.), direct_beam[1:48], rtol=1e-3
)
direct_horz = pd.Series(np.where(dawn, Ebh, 0.), index=times).fillna(0.)
assert np.allclose(
testdata['Direct Hz'].where(dusk, 0.), direct_horz[1:48], rtol=1e-3
)
global_horz = pd.Series(np.where(dawn, Gh, 0.), index=times).fillna(0.)
assert np.allclose(
testdata['Global Hz'].where(dusk, 0.), global_horz[1:48], rtol=1e-3
)
diffuse_horz = pd.Series(np.where(dawn, Dh, 0.), index=times).fillna(0.)
assert np.allclose(
testdata['Dif Hz'].where(dusk, 0.), diffuse_horz[1:48], rtol=1e-3
)
# test keyword parameters
irrads2 = clearsky.bird(
zenith, airmass, aod_380nm, aod_500nm, h2o_cm, dni_extra=etr
)
Eb2, Ebh2, Gh2, Dh2 = (irrads2[_] for _ in field_names)
clearsky_path = os.path.dirname(os.path.abspath(__file__))
pvlib_path = os.path.dirname(clearsky_path)
data_path = os.path.join(pvlib_path, 'data', 'BIRD_08_16_2012_patm.csv')
testdata2 = pd.read_csv(data_path, usecols=range(1, 26), header=1).dropna()
testdata2.index = times[1:48]
direct_beam2 = pd.Series(np.where(dawn, Eb2, 0.), index=times).fillna(0.)
assert np.allclose(
testdata2['Direct Beam'].where(dusk, 0.), direct_beam2[1:48], rtol=1e-3
)
direct_horz2 = pd.Series(np.where(dawn, Ebh2, 0.), index=times).fillna(0.)
assert np.allclose(
testdata2['Direct Hz'].where(dusk, 0.), direct_horz2[1:48], rtol=1e-3
)
global_horz2 = pd.Series(np.where(dawn, Gh2, 0.), index=times).fillna(0.)
assert np.allclose(
testdata2['Global Hz'].where(dusk, 0.), global_horz2[1:48], rtol=1e-3
)
diffuse_horz2 = pd.Series(np.where(dawn, Dh2, 0.), index=times).fillna(0.)
assert np.allclose(
testdata2['Dif Hz'].where(dusk, 0.), diffuse_horz2[1:48], rtol=1e-3
)
# test scalars just at noon
# XXX: calculations start at 12am so noon is at index = 12
irrads3 = clearsky.bird(
zenith[12], airmass[12], aod_380nm, aod_500nm, h2o_cm, dni_extra=etr[12]
)
Eb3, Ebh3, Gh3, Dh3 = (irrads3[_] for _ in field_names)
# XXX: testdata starts at 1am so noon is at index = 11
np.allclose(
[Eb3, Ebh3, Gh3, Dh3],
testdata2[['Direct Beam', 'Direct Hz', 'Global Hz', 'Dif Hz']].iloc[11],
rtol=1e-3)
return pd.DataFrame({'Eb': Eb, 'Ebh': Ebh, 'Gh': Gh, 'Dh': Dh}, index=times)
| |
from __future__ import absolute_import
import datetime
import unittest
import numpy as np
import pandas as pd
from copy import copy
import pytest
from bokeh.core.properties import (field, value,
NumberSpec, ColorSpec, Bool, Int, Float, Complex, String,
Regex, Seq, List, Dict, Tuple, Instance, Any, Interval, Either,
Enum, Color, DashPattern, Size, Percent, Angle, AngleSpec, StringSpec,
DistanceSpec, FontSizeSpec, Override, Include, MinMaxBounds,
DataDistanceSpec, ScreenDistanceSpec)
from bokeh.core.has_props import HasProps
from bokeh.models import Plot
class Basictest(unittest.TestCase):
def test_simple_class(self):
class Foo(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1, 2, 3])
zz = Dict(String, Int)
s = String(None)
f = Foo()
self.assertEqual(f.x, 12)
self.assertEqual(f.y, "hello")
self.assert_(np.array_equal(np.array([1, 2, 3]), f.z))
self.assertEqual(f.s, None)
self.assertEqual(set(["x", "y", "z", "zz", "s"]), f.properties())
with_defaults = f.properties_with_values(include_defaults=True)
self.assertDictEqual(dict(x=12, y="hello", z=[1,2,3], zz={}, s=None), with_defaults)
without_defaults = f.properties_with_values(include_defaults=False)
self.assertDictEqual(dict(), without_defaults)
f.x = 18
self.assertEqual(f.x, 18)
f.y = "bar"
self.assertEqual(f.y, "bar")
without_defaults = f.properties_with_values(include_defaults=False)
self.assertDictEqual(dict(x=18, y="bar"), without_defaults)
f.z[0] = 100
without_defaults = f.properties_with_values(include_defaults=False)
self.assertDictEqual(dict(x=18, y="bar", z=[100,2,3]), without_defaults)
f.zz = {'a': 10}
without_defaults = f.properties_with_values(include_defaults=False)
self.assertDictEqual(dict(x=18, y="bar", z=[100,2,3], zz={'a': 10}), without_defaults)
def test_enum(self):
class Foo(HasProps):
x = Enum("blue", "red", "green") # the first item is the default
y = Enum("small", "medium", "large", default="large")
f = Foo()
self.assertEqual(f.x, "blue")
self.assertEqual(f.y, "large")
f.x = "red"
self.assertEqual(f.x, "red")
with self.assertRaises(ValueError):
f.x = "yellow"
f.y = "small"
self.assertEqual(f.y, "small")
with self.assertRaises(ValueError):
f.y = "yellow"
def test_inheritance(self):
class Base(HasProps):
x = Int(12)
y = String("hello")
class Child(Base):
z = Float(3.14)
c = Child()
self.assertEqual(frozenset(['x', 'y', 'z']), frozenset(c.properties()))
self.assertEqual(c.y, "hello")
def test_set(self):
class Foo(HasProps):
x = Int(12)
y = Enum("red", "blue", "green")
z = String("blah")
f = Foo()
self.assertEqual(f.x, 12)
self.assertEqual(f.y, "red")
self.assertEqual(f.z, "blah")
f.update(**dict(x=20, y="green", z="hello"))
self.assertEqual(f.x, 20)
self.assertEqual(f.y, "green")
self.assertEqual(f.z, "hello")
with self.assertRaises(ValueError):
f.update(y="orange")
def test_no_parens(self):
class Foo(HasProps):
x = Int
y = Int()
f = Foo()
self.assertEqual(f.x, f.y)
f.x = 13
self.assertEqual(f.x, 13)
def test_accurate_properties_sets(self):
class Base(HasProps):
num = Int(12)
container = List(String)
child = Instance(HasProps)
class Mixin(HasProps):
mixin_num = Int(12)
mixin_container = List(String)
mixin_child = Instance(HasProps)
class Sub(Base, Mixin):
sub_num = Int(12)
sub_container = List(String)
sub_child = Instance(HasProps)
b = Base()
self.assertEqual(set(["child"]),
b.properties_with_refs())
self.assertEqual(set(["container"]),
b.properties_containers())
self.assertEqual(set(["num", "container", "child"]),
b.properties())
self.assertEqual(set(["num", "container", "child"]),
b.properties(with_bases=True))
self.assertEqual(set(["num", "container", "child"]),
b.properties(with_bases=False))
m = Mixin()
self.assertEqual(set(["mixin_child"]),
m.properties_with_refs())
self.assertEqual(set(["mixin_container"]),
m.properties_containers())
self.assertEqual(set(["mixin_num", "mixin_container", "mixin_child"]),
m.properties())
self.assertEqual(set(["mixin_num", "mixin_container", "mixin_child"]),
m.properties(with_bases=True))
self.assertEqual(set(["mixin_num", "mixin_container", "mixin_child"]),
m.properties(with_bases=False))
s = Sub()
self.assertEqual(set(["child", "sub_child", "mixin_child"]),
s.properties_with_refs())
self.assertEqual(set(["container", "sub_container", "mixin_container"]),
s.properties_containers())
self.assertEqual(set(["num", "container", "child",
"mixin_num", "mixin_container", "mixin_child",
"sub_num", "sub_container", "sub_child"]),
s.properties())
self.assertEqual(set(["num", "container", "child",
"mixin_num", "mixin_container", "mixin_child",
"sub_num", "sub_container", "sub_child"]),
s.properties(with_bases=True))
self.assertEqual(set(["sub_num", "sub_container", "sub_child"]),
s.properties(with_bases=False))
# verify caching
self.assertIs(s.properties_with_refs(), s.properties_with_refs())
self.assertIs(s.properties_containers(), s.properties_containers())
self.assertIs(s.properties(), s.properties())
self.assertIs(s.properties(with_bases=True), s.properties(with_bases=True))
# this one isn't cached because we store it as a list __properties__ and wrap it
# in a new set every time
#self.assertIs(s.properties(with_bases=False), s.properties(with_bases=False))
def test_accurate_dataspecs(self):
class Base(HasProps):
num = NumberSpec(12)
not_a_dataspec = Float(10)
class Mixin(HasProps):
mixin_num = NumberSpec(14)
class Sub(Base, Mixin):
sub_num = NumberSpec(16)
base = Base()
mixin = Mixin()
sub = Sub()
self.assertEqual(set(["num"]), base.dataspecs())
self.assertEqual(set(["mixin_num"]), mixin.dataspecs())
self.assertEqual(set(["num", "mixin_num", "sub_num"]), sub.dataspecs())
self.assertDictEqual(dict(num=base.lookup("num")), base.dataspecs_with_props())
self.assertDictEqual(dict(mixin_num=mixin.lookup("mixin_num")), mixin.dataspecs_with_props())
self.assertDictEqual(dict(num=sub.lookup("num"),
mixin_num=sub.lookup("mixin_num"),
sub_num=sub.lookup("sub_num")),
sub.dataspecs_with_props())
def test_not_serialized(self):
class NotSerialized(HasProps):
x = Int(12, serialized=False)
y = String("hello")
o = NotSerialized()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, 'hello')
# non-serialized props are still in the list of props
self.assertTrue('x' in o.properties())
self.assertTrue('y' in o.properties())
# but they aren't in the dict of props with values, since their
# values are not important (already included in other values,
# as with the _units properties)
self.assertTrue('x' not in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
o.x = 42
o.y = 'world'
self.assertTrue('x' not in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' in o.properties_with_values(include_defaults=False))
def test_readonly(self):
class Readonly(HasProps):
x = Int(12, readonly=True) # with default
y = Int(readonly=True) # without default
z = String("hello")
o = Readonly()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, None)
self.assertEqual(o.z, 'hello')
# readonly props are still in the list of props
self.assertTrue('x' in o.properties())
self.assertTrue('y' in o.properties())
self.assertTrue('z' in o.properties())
# but they aren't in the dict of props with values
self.assertTrue('x' not in o.properties_with_values(include_defaults=True))
self.assertTrue('y' not in o.properties_with_values(include_defaults=True))
self.assertTrue('z' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
self.assertTrue('z' not in o.properties_with_values(include_defaults=False))
with self.assertRaises(RuntimeError):
o.x = 7
with self.assertRaises(RuntimeError):
o.y = 7
o.z = "xyz"
self.assertEqual(o.x, 12)
self.assertEqual(o.y, None)
self.assertEqual(o.z, 'xyz')
def test_include_defaults(self):
class IncludeDefaultsTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsTest()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, 'hello')
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
o.x = 42
o.y = 'world'
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' in o.properties_with_values(include_defaults=False))
self.assertTrue('y' in o.properties_with_values(include_defaults=False))
def test_include_defaults_with_kwargs(self):
class IncludeDefaultsKwargsTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsKwargsTest(x=14, y="world")
self.assertEqual(o.x, 14)
self.assertEqual(o.y, 'world')
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' in o.properties_with_values(include_defaults=False))
self.assertTrue('y' in o.properties_with_values(include_defaults=False))
def test_include_defaults_set_to_same(self):
class IncludeDefaultsSetToSameTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsSetToSameTest()
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
# this should no-op
o.x = 12
o.y = "hello"
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
def test_override_defaults(self):
class FooBase(HasProps):
x = Int(12)
class FooSub(FooBase):
x = Override(default=14)
def func_default():
return 16
class FooSubSub(FooBase):
x = Override(default=func_default)
f_base = FooBase()
f_sub = FooSub()
f_sub_sub = FooSubSub()
self.assertEqual(f_base.x, 12)
self.assertEqual(f_sub.x, 14)
self.assertEqual(f_sub_sub.x, 16)
self.assertEqual(12, f_base.properties_with_values(include_defaults=True)['x'])
self.assertEqual(14, f_sub.properties_with_values(include_defaults=True)['x'])
self.assertEqual(16, f_sub_sub.properties_with_values(include_defaults=True)['x'])
self.assertFalse('x' in f_base.properties_with_values(include_defaults=False))
self.assertFalse('x' in f_sub.properties_with_values(include_defaults=False))
self.assertFalse('x' in f_sub_sub.properties_with_values(include_defaults=False))
def test_include_delegate(self):
class IsDelegate(HasProps):
x = Int(12)
y = String("hello")
class IncludesDelegateWithPrefix(HasProps):
z = Include(IsDelegate, use_prefix=True)
z_y = Int(57) # override the Include
class IncludesDelegateWithoutPrefix(HasProps):
z = Include(IsDelegate, use_prefix=False)
y = Int(42) # override the Include
class IncludesDelegateWithoutPrefixUsingOverride(HasProps):
z = Include(IsDelegate, use_prefix=False)
y = Override(default="world") # override the Include changing just the default
o = IncludesDelegateWithoutPrefix()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, 42)
self.assertFalse(hasattr(o, 'z'))
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
o = IncludesDelegateWithoutPrefixUsingOverride()
self.assertEqual(o.x, 12)
self.assertEqual(o.y, 'world')
self.assertFalse(hasattr(o, 'z'))
self.assertTrue('x' in o.properties_with_values(include_defaults=True))
self.assertTrue('y' in o.properties_with_values(include_defaults=True))
self.assertTrue('x' not in o.properties_with_values(include_defaults=False))
self.assertTrue('y' not in o.properties_with_values(include_defaults=False))
o2 = IncludesDelegateWithPrefix()
self.assertEqual(o2.z_x, 12)
self.assertEqual(o2.z_y, 57)
self.assertFalse(hasattr(o2, 'z'))
self.assertFalse(hasattr(o2, 'x'))
self.assertFalse(hasattr(o2, 'y'))
self.assertFalse('z' in o2.properties_with_values(include_defaults=True))
self.assertFalse('x' in o2.properties_with_values(include_defaults=True))
self.assertFalse('y' in o2.properties_with_values(include_defaults=True))
self.assertTrue('z_x' in o2.properties_with_values(include_defaults=True))
self.assertTrue('z_y' in o2.properties_with_values(include_defaults=True))
self.assertTrue('z_x' not in o2.properties_with_values(include_defaults=False))
self.assertTrue('z_y' not in o2.properties_with_values(include_defaults=False))
# def test_kwargs_init(self):
# class Foo(HasProps):
# x = String
# y = Int
# z = Float
# f = Foo(x = "hello", y = 14)
# self.assertEqual(f.x, "hello")
# self.assertEqual(f.y, 14)
# with self.assertRaises(TypeError):
# # This should raise a TypeError: object.__init__() takes no parameters
# g = Foo(z = 3.14, q = "blah")
class TestNumberSpec(unittest.TestCase):
def test_field(self):
class Foo(HasProps):
x = NumberSpec("xfield")
f = Foo()
self.assertEqual(f.x, "xfield")
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"field": "xfield"})
f.x = "my_x"
self.assertEqual(f.x, "my_x")
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"field": "my_x"})
def test_value(self):
class Foo(HasProps):
x = NumberSpec("xfield")
f = Foo()
self.assertEqual(f.x, "xfield")
f.x = 12
self.assertEqual(f.x, 12)
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"value": 12})
f.x = 15
self.assertEqual(f.x, 15)
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"value": 15})
f.x = dict(value=32)
self.assertDictEqual(Foo.__dict__["x"].serializable_value(f), {"value": 32})
f.x = None
self.assertIs(Foo.__dict__["x"].serializable_value(f), None)
def test_default(self):
class Foo(HasProps):
y = NumberSpec(default=12)
f = Foo()
self.assertEqual(f.y, 12)
self.assertDictEqual(Foo.__dict__["y"].serializable_value(f), {"value": 12})
f.y = "y1"
self.assertEqual(f.y, "y1")
# Once we set a concrete value, the default is ignored, because it is unused
f.y = 32
self.assertEqual(f.y, 32)
self.assertDictEqual(Foo.__dict__["y"].serializable_value(f), {"value": 32})
def test_multiple_instances(self):
class Foo(HasProps):
x = NumberSpec("xfield")
a = Foo()
b = Foo()
a.x = 13
b.x = 14
self.assertEqual(a.x, 13)
self.assertEqual(b.x, 14)
self.assertDictEqual(Foo.__dict__["x"].serializable_value(a), {"value": 13})
self.assertDictEqual(Foo.__dict__["x"].serializable_value(b), {"value": 14})
b.x = {"field": "x3"}
self.assertDictEqual(Foo.__dict__["x"].serializable_value(a), {"value": 13})
self.assertDictEqual(Foo.__dict__["x"].serializable_value(b), {"field": "x3"})
def test_autocreate_no_parens(self):
class Foo(HasProps):
x = NumberSpec
a = Foo()
self.assertIs(a.x, None)
a.x = 14
self.assertEqual(a.x, 14)
def test_set_from_json_keeps_mode(self):
class Foo(HasProps):
x = NumberSpec(default=None)
a = Foo()
self.assertIs(a.x, None)
# set as a value
a.x = 14
self.assertEqual(a.x, 14)
# set_from_json keeps the previous dict-ness or lack thereof
a.set_from_json('x', dict(value=16))
self.assertEqual(a.x, 16)
# but regular assignment overwrites the previous dict-ness
a.x = dict(value=17)
self.assertDictEqual(a.x, dict(value=17))
# set as a field
a.x = "bar"
self.assertEqual(a.x, "bar")
# set_from_json keeps the previous dict-ness or lack thereof
a.set_from_json('x', dict(field="foo"))
self.assertEqual(a.x, "foo")
# but regular assignment overwrites the previous dict-ness
a.x = dict(field="baz")
self.assertDictEqual(a.x, dict(field="baz"))
class TestFontSizeSpec(unittest.TestCase):
def test_font_size_from_string(self):
class Foo(HasProps):
x = FontSizeSpec(default=None)
css_units = "%|em|ex|ch|ic|rem|vw|vh|vi|vb|vmin|vmax|cm|mm|q|in|pc|pt|px"
a = Foo()
self.assertIs(a.x, None)
for unit in css_units.split("|"):
v = '10%s' % unit
a.x = v
self.assertEqual(a.x, dict(value=v))
self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))
v = '10.2%s' % unit
a.x = v
self.assertEqual(a.x, dict(value=v))
self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))
f = '_10%s' % unit
a.x = f
self.assertEqual(a.x, f)
self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))
f = '_10.2%s' % unit
a.x = f
self.assertEqual(a.x, f)
self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))
for unit in css_units.upper().split("|"):
v = '10%s' % unit
a.x = v
self.assertEqual(a.x, dict(value=v))
self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))
v = '10.2%s' % unit
a.x = v
self.assertEqual(a.x, dict(value=v))
self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))
f = '_10%s' % unit
a.x = f
self.assertEqual(a.x, f)
self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))
f = '_10.2%s' % unit
a.x = f
self.assertEqual(a.x, f)
self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))
def test_bad_font_size_values(self):
class Foo(HasProps):
x = FontSizeSpec(default=None)
a = Foo()
with self.assertRaises(ValueError):
a.x = "6"
with self.assertRaises(ValueError):
a.x = 6
with self.assertRaises(ValueError):
a.x = ""
def test_fields(self):
class Foo(HasProps):
x = FontSizeSpec(default=None)
a = Foo()
a.x = "_120"
self.assertEqual(a.x, "_120")
a.x = dict(field="_120")
self.assertEqual(a.x, dict(field="_120"))
a.x = "foo"
self.assertEqual(a.x, "foo")
a.x = dict(field="foo")
self.assertEqual(a.x, dict(field="foo"))
class TestAngleSpec(unittest.TestCase):
def test_default_none(self):
class Foo(HasProps):
x = AngleSpec(None)
a = Foo()
self.assertIs(a.x, None)
self.assertEqual(a.x_units, 'rad')
a.x = 14
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
def test_autocreate_no_parens(self):
class Foo(HasProps):
x = AngleSpec
a = Foo()
self.assertIs(a.x, None)
self.assertEqual(a.x_units, 'rad')
a.x = 14
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
def test_default_value(self):
class Foo(HasProps):
x = AngleSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
def test_setting_dict_sets_units(self):
class Foo(HasProps):
x = AngleSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
a.x = { 'value' : 180, 'units' : 'deg' }
self.assertDictEqual(a.x, { 'value' : 180 })
self.assertEqual(a.x_units, 'deg')
def test_setting_json_sets_units_keeps_dictness(self):
class Foo(HasProps):
x = AngleSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
a.set_from_json('x', { 'value' : 180, 'units' : 'deg' })
self.assertEqual(a.x, 180)
self.assertEqual(a.x_units, 'deg')
def test_setting_dict_does_not_modify_original_dict(self):
class Foo(HasProps):
x = AngleSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'rad')
new_value = { 'value' : 180, 'units' : 'deg' }
new_value_copy = copy(new_value)
self.assertDictEqual(new_value_copy, new_value)
a.x = new_value
self.assertDictEqual(a.x, { 'value' : 180 })
self.assertEqual(a.x_units, 'deg')
self.assertDictEqual(new_value_copy, new_value)
class TestDistanceSpec(unittest.TestCase):
def test_default_none(self):
class Foo(HasProps):
x = DistanceSpec(None)
a = Foo()
self.assertIs(a.x, None)
self.assertEqual(a.x_units, 'data')
a.x = 14
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'data')
def test_autocreate_no_parens(self):
class Foo(HasProps):
x = DistanceSpec
a = Foo()
self.assertIs(a.x, None)
self.assertEqual(a.x_units, 'data')
a.x = 14
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'data')
def test_default_value(self):
class Foo(HasProps):
x = DistanceSpec(default=14)
a = Foo()
self.assertEqual(a.x, 14)
self.assertEqual(a.x_units, 'data')
class TestColorSpec(unittest.TestCase):
def test_field(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, "colorfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "colorfield"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
def test_field_default(self):
class Foo(HasProps):
col = ColorSpec(default="red")
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, "red")
self.assertDictEqual(desc.serializable_value(f), {"value": "red"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
def test_default_tuple(self):
class Foo(HasProps):
col = ColorSpec(default=(128, 255, 124))
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, (128, 255, 124))
self.assertDictEqual(desc.serializable_value(f), {"value": "rgb(128, 255, 124)"})
def test_fixed_value(self):
class Foo(HasProps):
col = ColorSpec("gray")
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, "gray")
self.assertDictEqual(desc.serializable_value(f), {"value": "gray"})
def test_named_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "red"
self.assertEqual(f.col, "red")
self.assertDictEqual(desc.serializable_value(f), {"value": "red"})
f.col = "forestgreen"
self.assertEqual(f.col, "forestgreen")
self.assertDictEqual(desc.serializable_value(f), {"value": "forestgreen"})
def test_case_insensitive_named_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "RED"
self.assertEqual(f.col, "RED")
self.assertDictEqual(desc.serializable_value(f), {"value": "RED"})
f.col = "ForestGreen"
self.assertEqual(f.col, "ForestGreen")
self.assertDictEqual(desc.serializable_value(f), {"value": "ForestGreen"})
def test_named_value_set_none(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = None
self.assertDictEqual(desc.serializable_value(f), {"value": None})
def test_named_value_unset(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
self.assertDictEqual(desc.serializable_value(f), {"field": "colorfield"})
def test_named_color_overriding_default(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "forestgreen"
self.assertEqual(f.col, "forestgreen")
self.assertDictEqual(desc.serializable_value(f), {"value": "forestgreen"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
def test_hex_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "#FF004A"
self.assertEqual(f.col, "#FF004A")
self.assertDictEqual(desc.serializable_value(f), {"value": "#FF004A"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
def test_tuple_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = (128, 200, 255)
self.assertEqual(f.col, (128, 200, 255))
self.assertDictEqual(desc.serializable_value(f), {"value": "rgb(128, 200, 255)"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.serializable_value(f), {"field": "myfield"})
f.col = (100, 150, 200, 0.5)
self.assertEqual(f.col, (100, 150, 200, 0.5))
self.assertDictEqual(desc.serializable_value(f), {"value": "rgba(100, 150, 200, 0.5)"})
def test_set_dict(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = {"field": "myfield"}
self.assertDictEqual(f.col, {"field": "myfield"})
f.col = "field2"
self.assertEqual(f.col, "field2")
self.assertDictEqual(desc.serializable_value(f), {"field": "field2"})
class TestDashPattern(unittest.TestCase):
def test_named(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
self.assertEqual(f.pat, [])
f.pat = "solid"
self.assertEqual(f.pat, [])
f.pat = "dashed"
self.assertEqual(f.pat, [6])
f.pat = "dotted"
self.assertEqual(f.pat, [2, 4])
f.pat = "dotdash"
self.assertEqual(f.pat, [2, 4, 6, 4])
f.pat = "dashdot"
self.assertEqual(f.pat, [6, 4, 2, 4])
def test_string(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
f.pat = ""
self.assertEqual(f.pat, [])
f.pat = "2"
self.assertEqual(f.pat, [2])
f.pat = "2 4"
self.assertEqual(f.pat, [2, 4])
f.pat = "2 4 6"
self.assertEqual(f.pat, [2, 4, 6])
with self.assertRaises(ValueError):
f.pat = "abc 6"
def test_list(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
f.pat = ()
self.assertEqual(f.pat, ())
f.pat = (2,)
self.assertEqual(f.pat, (2,))
f.pat = (2, 4)
self.assertEqual(f.pat, (2, 4))
f.pat = (2, 4, 6)
self.assertEqual(f.pat, (2, 4, 6))
with self.assertRaises(ValueError):
f.pat = (2, 4.2)
with self.assertRaises(ValueError):
f.pat = (2, "a")
def test_invalid(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
with self.assertRaises(ValueError):
f.pat = 10
with self.assertRaises(ValueError):
f.pat = 10.1
with self.assertRaises(ValueError):
f.pat = {}
class Foo(HasProps):
pass
class Bar(HasProps):
pass
class Baz(HasProps):
pass
class TestProperties(unittest.TestCase):
def test_Any(self):
prop = Any()
self.assertTrue(prop.is_valid(None))
self.assertTrue(prop.is_valid(False))
self.assertTrue(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertTrue(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertTrue(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertTrue(prop.is_valid({}))
self.assertTrue(prop.is_valid(Foo()))
def test_Bool(self):
prop = Bool()
self.assertTrue(prop.is_valid(None))
self.assertTrue(prop.is_valid(False))
self.assertTrue(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(np.bool8(False)))
self.assertTrue(prop.is_valid(np.bool8(True)))
self.assertFalse(prop.is_valid(np.int8(0)))
self.assertFalse(prop.is_valid(np.int8(1)))
self.assertFalse(prop.is_valid(np.int16(0)))
self.assertFalse(prop.is_valid(np.int16(1)))
self.assertFalse(prop.is_valid(np.int32(0)))
self.assertFalse(prop.is_valid(np.int32(1)))
self.assertFalse(prop.is_valid(np.int64(0)))
self.assertFalse(prop.is_valid(np.int64(1)))
self.assertFalse(prop.is_valid(np.uint8(0)))
self.assertFalse(prop.is_valid(np.uint8(1)))
self.assertFalse(prop.is_valid(np.uint16(0)))
self.assertFalse(prop.is_valid(np.uint16(1)))
self.assertFalse(prop.is_valid(np.uint32(0)))
self.assertFalse(prop.is_valid(np.uint32(1)))
self.assertFalse(prop.is_valid(np.uint64(0)))
self.assertFalse(prop.is_valid(np.uint64(1)))
self.assertFalse(prop.is_valid(np.float16(0)))
self.assertFalse(prop.is_valid(np.float16(1)))
self.assertFalse(prop.is_valid(np.float32(0)))
self.assertFalse(prop.is_valid(np.float32(1)))
self.assertFalse(prop.is_valid(np.float64(0)))
self.assertFalse(prop.is_valid(np.float64(1)))
self.assertFalse(prop.is_valid(np.complex64(1.0+1.0j)))
self.assertFalse(prop.is_valid(np.complex128(1.0+1.0j)))
if hasattr(np, "complex256"):
self.assertFalse(prop.is_valid(np.complex256(1.0+1.0j)))
def test_Int(self):
prop = Int()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
# TODO: self.assertFalse(prop.is_valid(np.bool8(False)))
# TODO: self.assertFalse(prop.is_valid(np.bool8(True)))
self.assertTrue(prop.is_valid(np.int8(0)))
self.assertTrue(prop.is_valid(np.int8(1)))
self.assertTrue(prop.is_valid(np.int16(0)))
self.assertTrue(prop.is_valid(np.int16(1)))
self.assertTrue(prop.is_valid(np.int32(0)))
self.assertTrue(prop.is_valid(np.int32(1)))
self.assertTrue(prop.is_valid(np.int64(0)))
self.assertTrue(prop.is_valid(np.int64(1)))
self.assertTrue(prop.is_valid(np.uint8(0)))
self.assertTrue(prop.is_valid(np.uint8(1)))
self.assertTrue(prop.is_valid(np.uint16(0)))
self.assertTrue(prop.is_valid(np.uint16(1)))
self.assertTrue(prop.is_valid(np.uint32(0)))
self.assertTrue(prop.is_valid(np.uint32(1)))
self.assertTrue(prop.is_valid(np.uint64(0)))
self.assertTrue(prop.is_valid(np.uint64(1)))
self.assertFalse(prop.is_valid(np.float16(0)))
self.assertFalse(prop.is_valid(np.float16(1)))
self.assertFalse(prop.is_valid(np.float32(0)))
self.assertFalse(prop.is_valid(np.float32(1)))
self.assertFalse(prop.is_valid(np.float64(0)))
self.assertFalse(prop.is_valid(np.float64(1)))
self.assertFalse(prop.is_valid(np.complex64(1.0+1.0j)))
self.assertFalse(prop.is_valid(np.complex128(1.0+1.0j)))
if hasattr(np, "complex256"):
self.assertFalse(prop.is_valid(np.complex256(1.0+1.0j)))
def test_Float(self):
prop = Float()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
# TODO: self.assertFalse(prop.is_valid(np.bool8(False)))
# TODO: self.assertFalse(prop.is_valid(np.bool8(True)))
self.assertTrue(prop.is_valid(np.int8(0)))
self.assertTrue(prop.is_valid(np.int8(1)))
self.assertTrue(prop.is_valid(np.int16(0)))
self.assertTrue(prop.is_valid(np.int16(1)))
self.assertTrue(prop.is_valid(np.int32(0)))
self.assertTrue(prop.is_valid(np.int32(1)))
self.assertTrue(prop.is_valid(np.int64(0)))
self.assertTrue(prop.is_valid(np.int64(1)))
self.assertTrue(prop.is_valid(np.uint8(0)))
self.assertTrue(prop.is_valid(np.uint8(1)))
self.assertTrue(prop.is_valid(np.uint16(0)))
self.assertTrue(prop.is_valid(np.uint16(1)))
self.assertTrue(prop.is_valid(np.uint32(0)))
self.assertTrue(prop.is_valid(np.uint32(1)))
self.assertTrue(prop.is_valid(np.uint64(0)))
self.assertTrue(prop.is_valid(np.uint64(1)))
self.assertTrue(prop.is_valid(np.float16(0)))
self.assertTrue(prop.is_valid(np.float16(1)))
self.assertTrue(prop.is_valid(np.float32(0)))
self.assertTrue(prop.is_valid(np.float32(1)))
self.assertTrue(prop.is_valid(np.float64(0)))
self.assertTrue(prop.is_valid(np.float64(1)))
self.assertFalse(prop.is_valid(np.complex64(1.0+1.0j)))
self.assertFalse(prop.is_valid(np.complex128(1.0+1.0j)))
if hasattr(np, "complex256"):
self.assertFalse(prop.is_valid(np.complex256(1.0+1.0j)))
def test_Complex(self):
prop = Complex()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertTrue(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
# TODO: self.assertFalse(prop.is_valid(np.bool8(False)))
# TODO: self.assertFalse(prop.is_valid(np.bool8(True)))
self.assertTrue(prop.is_valid(np.int8(0)))
self.assertTrue(prop.is_valid(np.int8(1)))
self.assertTrue(prop.is_valid(np.int16(0)))
self.assertTrue(prop.is_valid(np.int16(1)))
self.assertTrue(prop.is_valid(np.int32(0)))
self.assertTrue(prop.is_valid(np.int32(1)))
self.assertTrue(prop.is_valid(np.int64(0)))
self.assertTrue(prop.is_valid(np.int64(1)))
self.assertTrue(prop.is_valid(np.uint8(0)))
self.assertTrue(prop.is_valid(np.uint8(1)))
self.assertTrue(prop.is_valid(np.uint16(0)))
self.assertTrue(prop.is_valid(np.uint16(1)))
self.assertTrue(prop.is_valid(np.uint32(0)))
self.assertTrue(prop.is_valid(np.uint32(1)))
self.assertTrue(prop.is_valid(np.uint64(0)))
self.assertTrue(prop.is_valid(np.uint64(1)))
self.assertTrue(prop.is_valid(np.float16(0)))
self.assertTrue(prop.is_valid(np.float16(1)))
self.assertTrue(prop.is_valid(np.float32(0)))
self.assertTrue(prop.is_valid(np.float32(1)))
self.assertTrue(prop.is_valid(np.float64(0)))
self.assertTrue(prop.is_valid(np.float64(1)))
self.assertTrue(prop.is_valid(np.complex64(1.0+1.0j)))
self.assertTrue(prop.is_valid(np.complex128(1.0+1.0j)))
if hasattr(np, "complex256"):
self.assertTrue(prop.is_valid(np.complex256(1.0+1.0j)))
def test_String(self):
prop = String()
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Regex(self):
with self.assertRaises(TypeError):
prop = Regex()
prop = Regex("^x*$")
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Seq(self):
with self.assertRaises(TypeError):
prop = Seq()
prop = Seq(Int)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertTrue(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertTrue(prop.is_valid(np.array([])))
self.assertFalse(prop.is_valid(set([])))
self.assertFalse(prop.is_valid({}))
self.assertTrue(prop.is_valid((1, 2)))
self.assertTrue(prop.is_valid([1, 2]))
self.assertTrue(prop.is_valid(np.array([1, 2])))
self.assertFalse(prop.is_valid({1, 2}))
self.assertFalse(prop.is_valid({1: 2}))
self.assertFalse(prop.is_valid(Foo()))
df = pd.DataFrame([1, 2])
self.assertTrue(prop.is_valid(df.index))
self.assertTrue(prop.is_valid(df.iloc[0]))
def test_List(self):
with self.assertRaises(TypeError):
prop = List()
prop = List(Int)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Dict(self):
with self.assertRaises(TypeError):
prop = Dict()
prop = Dict(String, List(Int))
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertTrue(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Tuple(self):
with self.assertRaises(TypeError):
prop = Tuple()
with self.assertRaises(TypeError):
prop = Tuple(Int)
prop = Tuple(Int, String, List(Int))
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid((1, "", [1, 2, 3])))
self.assertFalse(prop.is_valid((1.0, "", [1, 2, 3])))
self.assertFalse(prop.is_valid((1, True, [1, 2, 3])))
self.assertFalse(prop.is_valid((1, "", (1, 2, 3))))
self.assertFalse(prop.is_valid((1, "", [1, 2, "xyz"])))
def test_Instance(self):
with self.assertRaises(TypeError):
prop = Instance()
prop = Instance(Foo)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertTrue(prop.is_valid(Foo()))
self.assertFalse(prop.is_valid(Bar()))
self.assertFalse(prop.is_valid(Baz()))
def test_Instance_from_json(self):
class MapOptions(HasProps):
lat = Float
lng = Float
zoom = Int(12)
v1 = Instance(MapOptions).from_json(dict(lat=1, lng=2))
v2 = MapOptions(lat=1, lng=2)
self.assertTrue(v1.equals(v2))
def test_Interval(self):
with self.assertRaises(TypeError):
prop = Interval()
with self.assertRaises(ValueError):
prop = Interval(Int, 0.0, 1.0)
prop = Interval(Int, 0, 255)
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(127))
self.assertFalse(prop.is_valid(-1))
self.assertFalse(prop.is_valid(256))
prop = Interval(Float, 0.0, 1.0)
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(0.5))
self.assertFalse(prop.is_valid(-0.001))
self.assertFalse(prop.is_valid( 1.001))
def test_Either(self):
with self.assertRaises(TypeError):
prop = Either()
prop = Either(Interval(Int, 0, 100), Regex("^x*$"), List(Int))
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(100))
self.assertFalse(prop.is_valid(-100))
self.assertTrue(prop.is_valid("xxx"))
self.assertFalse(prop.is_valid("yyy"))
self.assertTrue(prop.is_valid([1, 2, 3]))
self.assertFalse(prop.is_valid([1, 2, ""]))
def test_Enum(self):
with self.assertRaises(TypeError):
prop = Enum()
with self.assertRaises(TypeError):
prop = Enum("red", "green", 1)
with self.assertRaises(TypeError):
prop = Enum("red", "green", "red")
prop = Enum("red", "green", "blue")
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid("red"))
self.assertTrue(prop.is_valid("green"))
self.assertTrue(prop.is_valid("blue"))
self.assertFalse(prop.is_valid("RED"))
self.assertFalse(prop.is_valid("GREEN"))
self.assertFalse(prop.is_valid("BLUE"))
self.assertFalse(prop.is_valid(" red"))
self.assertFalse(prop.is_valid(" green"))
self.assertFalse(prop.is_valid(" blue"))
from bokeh.core.enums import LineJoin
prop = Enum(LineJoin)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid("miter"))
self.assertTrue(prop.is_valid("round"))
self.assertTrue(prop.is_valid("bevel"))
self.assertFalse(prop.is_valid("MITER"))
self.assertFalse(prop.is_valid("ROUND"))
self.assertFalse(prop.is_valid("BEVEL"))
self.assertFalse(prop.is_valid(" miter"))
self.assertFalse(prop.is_valid(" round"))
self.assertFalse(prop.is_valid(" bevel"))
from bokeh.core.enums import NamedColor
prop = Enum(NamedColor)
self.assertTrue(prop.is_valid("red"))
self.assertTrue(prop.is_valid("Red"))
self.assertTrue(prop.is_valid("RED"))
def test_Color(self):
prop = Color()
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid((0, 127, 255)))
self.assertFalse(prop.is_valid((0, -127, 255)))
self.assertFalse(prop.is_valid((0, 127)))
self.assertFalse(prop.is_valid((0, 127, 1.0)))
self.assertFalse(prop.is_valid((0, 127, 255, 255)))
self.assertTrue(prop.is_valid((0, 127, 255, 1.0)))
self.assertTrue(prop.is_valid("#00aaff"))
self.assertTrue(prop.is_valid("#00AAFF"))
self.assertTrue(prop.is_valid("#00AaFf"))
self.assertFalse(prop.is_valid("00aaff"))
self.assertFalse(prop.is_valid("00AAFF"))
self.assertFalse(prop.is_valid("00AaFf"))
self.assertFalse(prop.is_valid("#00AaFg"))
self.assertFalse(prop.is_valid("#00AaFff"))
self.assertTrue(prop.is_valid("blue"))
self.assertTrue(prop.is_valid("BLUE"))
self.assertFalse(prop.is_valid("foobar"))
self.assertEqual(prop.transform((0, 127, 255)), "rgb(0, 127, 255)")
self.assertEqual(prop.transform((0, 127, 255, 0.1)), "rgba(0, 127, 255, 0.1)")
def test_DashPattern(self):
prop = DashPattern()
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertTrue(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid("solid"))
self.assertTrue(prop.is_valid("dashed"))
self.assertTrue(prop.is_valid("dotted"))
self.assertTrue(prop.is_valid("dotdash"))
self.assertTrue(prop.is_valid("dashdot"))
self.assertFalse(prop.is_valid("DASHDOT"))
self.assertTrue(prop.is_valid([1, 2, 3]))
self.assertFalse(prop.is_valid([1, 2, 3.0]))
self.assertTrue(prop.is_valid("1 2 3"))
self.assertFalse(prop.is_valid("1 2 x"))
def test_Size(self):
prop = Size()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(100))
self.assertTrue(prop.is_valid(100.1))
self.assertFalse(prop.is_valid(-100))
self.assertFalse(prop.is_valid(-0.001))
def test_Percent(self):
prop = Percent()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(0.5))
self.assertFalse(prop.is_valid(-0.001))
self.assertFalse(prop.is_valid( 1.001))
def test_Angle(self):
prop = Angle()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_MinMaxBounds_with_no_datetime(self):
prop = MinMaxBounds(accept_datetime=False)
# Valid values
self.assertTrue(prop.is_valid('auto'))
self.assertTrue(prop.is_valid(None))
self.assertTrue(prop.is_valid((12, 13)))
self.assertTrue(prop.is_valid((-32, -13)))
self.assertTrue(prop.is_valid((12.1, 13.1)))
self.assertTrue(prop.is_valid((None, 13.1)))
self.assertTrue(prop.is_valid((-22, None)))
# Invalid values
self.assertFalse(prop.is_valid('string'))
self.assertFalse(prop.is_valid(12))
self.assertFalse(prop.is_valid(('a', 'b')))
self.assertFalse(prop.is_valid((13, 12)))
self.assertFalse(prop.is_valid((13.1, 12.2)))
self.assertFalse(prop.is_valid((datetime.date(2012, 10, 1), datetime.date(2012, 12, 2))))
def test_MinMaxBounds_with_datetime(self):
prop = MinMaxBounds(accept_datetime=True)
# Valid values
self.assertTrue(prop.is_valid((datetime.date(2012, 10, 1), datetime.date(2012, 12, 2))))
# Invalid values
self.assertFalse(prop.is_valid((datetime.date(2012, 10, 1), 22)))
def test_HasProps_equals():
class Foo(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1,2,3])
class FooUnrelated(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1,2,3])
v = Foo().equals(Foo())
assert v is True
v = Foo(x=1).equals(Foo(x=1))
assert v is True
v = Foo(x=1).equals(Foo(x=2))
assert v is False
v = Foo(x=1).equals(1)
assert v is False
v = Foo().equals(FooUnrelated())
assert v is False
def test_HasProps_clone():
p1 = Plot(plot_width=1000)
c1 = p1.properties_with_values(include_defaults=False)
p2 = p1._clone()
c2 = p2.properties_with_values(include_defaults=False)
assert c1 == c2
def test_HasProps_pretty():
class Foo1(HasProps):
a = Int(12)
b = String("hello")
assert Foo1().pretty() == "bokeh.core.tests.test_properties.Foo1(a=12, b='hello')"
class Foo2(HasProps):
a = Int(12)
b = String("hello")
c = List(Int, [1, 2, 3])
assert Foo2().pretty() == "bokeh.core.tests.test_properties.Foo2(a=12, b='hello', c=[1, 2, 3])"
class Foo3(HasProps):
a = Int(12)
b = String("hello")
c = List(Int, [1, 2, 3])
d = Float(None)
assert Foo3().pretty() == "bokeh.core.tests.test_properties.Foo3(a=12, b='hello', c=[1, 2, 3], d=None)"
class Foo4(HasProps):
a = Int(12)
b = String("hello")
c = List(Int, [1, 2, 3])
d = Float(None)
e = Instance(Foo1, lambda: Foo1())
assert Foo4().pretty() == """\
bokeh.core.tests.test_properties.Foo4(
a=12,
b='hello',
c=[1, 2, 3],
d=None,
e=bokeh.core.tests.test_properties.Foo1(a=12, b='hello'))"""
class Foo5(HasProps):
foo6 = Any # can't use Instance(".core.tests.test_properties.Foo6")
class Foo6(HasProps):
foo5 = Instance(Foo5)
f5 = Foo5()
f6 = Foo6(foo5=f5)
f5.foo6 = f6
assert f5.pretty() == """\
bokeh.core.tests.test_properties.Foo5(
foo6=bokeh.core.tests.test_properties.Foo6(
foo5=bokeh.core.tests.test_properties.Foo5(...)))"""
def test_field_function():
assert field("foo") == dict(field="foo")
# TODO (bev) would like this to work I think
#assert field("foo", transform="junk") == dict(field="foo", transform="junk")
def test_value_function():
assert value("foo") == dict(value="foo")
# TODO (bev) would like this to work I think
#assert value("foo", transform="junk") == dict(value="foo", transform="junk")
def test_strict_dataspec_key_values():
for typ in (NumberSpec, StringSpec, FontSizeSpec, ColorSpec, DataDistanceSpec, ScreenDistanceSpec):
class Foo(HasProps):
x = typ("x")
f = Foo()
with pytest.raises(ValueError):
f.x = dict(field="foo", units="junk")
def test_strict_unitspec_key_values():
class FooUnits(HasProps):
x = DistanceSpec("x")
f = FooUnits()
f.x = dict(field="foo", units="screen")
with pytest.raises(ValueError):
f.x = dict(field="foo", units="junk", foo="crap")
class FooUnits(HasProps):
x = AngleSpec("x")
f = FooUnits()
f.x = dict(field="foo", units="deg")
with pytest.raises(ValueError):
f.x = dict(field="foo", units="junk", foo="crap")
| |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from email.mime.text import MIMEText
import smtplib
import socket
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from atomic_reactor.plugin import ExitPlugin, PluginFailedException
from atomic_reactor.plugins.pre_check_and_set_rebuild import is_rebuild
from atomic_reactor.plugins.exit_koji_import import KojiImportPlugin
from atomic_reactor.plugins.exit_koji_promote import KojiPromotePlugin
from atomic_reactor.koji_util import create_koji_session
from atomic_reactor.util import get_build_json
class SendMailPlugin(ExitPlugin):
"""This plugins sends notifications about build results.
Example configuration (see arguments for init for detailed explanation):
"exit_plugins": [{
"name": "sendmail",
"args": {
"send_on": ["auto_canceled", "auto_fail"],
"url": "https://openshift-instance.com",
"smtp_host": "smtp-server.com",
"from_address": "osbs@mycompany.com",
"error_addresses": ["admin@mycompany.com", "manager@mycompany.com"],
"additional_addresses": ["jsmith@mycompany.com", "user@mycompany.com"],
"email_domain": "example.com",
"to_koji_submitter": True,
"to_koji_pkgowner": True,
}
}]
"""
key = "sendmail"
# symbolic constants for states
MANUAL_SUCCESS = 'manual_success'
MANUAL_FAIL = 'manual_fail'
MANUAL_CANCELED = 'manual_canceled'
AUTO_SUCCESS = 'auto_success'
AUTO_FAIL = 'auto_fail'
AUTO_CANCELED = 'auto_canceled'
DEFAULT_SUBMITTER = 'Unknown'
allowed_states = set([MANUAL_SUCCESS, MANUAL_FAIL, MANUAL_CANCELED,
AUTO_SUCCESS, AUTO_FAIL, AUTO_CANCELED])
def __init__(self, tasker, workflow,
smtp_host, from_address,
send_on=(AUTO_CANCELED, AUTO_FAIL, MANUAL_SUCCESS, MANUAL_FAIL),
url=None,
error_addresses=(),
additional_addresses=(),
email_domain=None,
koji_hub=None,
koji_root=None,
koji_proxyuser=None,
koji_ssl_certs_dir=None,
koji_krb_principal=None,
koji_krb_keytab=None,
to_koji_submitter=False,
to_koji_pkgowner=False):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param send_on: list of str, list of build states when a notification should be sent
see 'allowed_states' constant and rules in '_should_send' function
:param url: str, URL to OSv3 instance where the build logs are stored
:param smtp_host: str, URL of SMTP server to use to send the message (e.g. "foo.com:25")
:param from_address: str, the "From" of the notification email
:param error_addresses: list of str, list of email addresses where to send an email
if an error occurred (e.g. if we can't find out who to notify about the failed build)
:param additional_addresses: list of str, always send a message to these email addresses
:param email_domain: str, email domain used when email addresses cannot be fetched via
kerberos principal
:param koji_hub: str, koji hub (xmlrpc)
:param koji_root: str, koji root (storage)
:param koji_proxyuser: str, proxy user
:param koji_ssl_certs_dir: str, path to "cert", "ca", and "serverca"
:param koji_krb_principal: str, name of Kerberos principal
:param koji_krb_keytab: str, Kerberos keytab
:param to_koji_submitter: bool, send a message to the koji submitter
:param to_koji_pkgowner: bool, send messages to koji package owners
"""
super(SendMailPlugin, self).__init__(tasker, workflow)
self.send_on = set(send_on)
self.url = url
self.additional_addresses = list(additional_addresses)
self.smtp_host = smtp_host
self.from_address = from_address
self.error_addresses = list(error_addresses)
self.email_domain = email_domain
self.koji_hub = koji_hub
# Make sure koji_root doesn't end with a slash for a prettier link
self.koji_root = koji_root[:-1] if koji_root and koji_root[-1] == '/' else koji_root
self.koji_auth_info = {
'proxyuser': koji_proxyuser,
'ssl_certs_dir': koji_ssl_certs_dir,
'krb_principal': koji_krb_principal,
'krb_keytab': koji_krb_keytab,
}
self.to_koji_submitter = to_koji_submitter
self.to_koji_pkgowner = to_koji_pkgowner
self.submitter = self.DEFAULT_SUBMITTER
try:
metadata = get_build_json().get("metadata", {})
self.koji_task_id = int(metadata['labels']['koji-task-id'])
except Exception:
self.log.exception("Failed to fetch koji task ID")
self.koji_task_id = None
else:
self.log.info("Koji task ID: %s", self.koji_task_id)
self.koji_build_id = self.workflow.exit_results.get(KojiImportPlugin.key)
if not self.koji_build_id:
self.koji_build_id = self.workflow.exit_results.get(KojiPromotePlugin.key)
if not self.koji_build_id:
self.log.info("Failed to fetch koji build ID")
else:
self.log.info("Koji build ID: %s", self.koji_build_id)
else:
self.log.info("Koji build ID: %s", self.koji_build_id)
try:
self.session = create_koji_session(self.koji_hub, self.koji_auth_info)
except Exception:
self.log.exception("Failed to connect to koji")
self.session = None
else:
self.log.info("Koji connection established")
def _should_send(self, rebuild, success, auto_canceled, manual_canceled):
"""Return True if any state in `self.send_on` meets given conditions, thus meaning
that a notification mail should be sent.
"""
should_send = False
should_send_mapping = {
self.MANUAL_SUCCESS: not rebuild and success,
self.MANUAL_FAIL: not rebuild and not success,
self.MANUAL_CANCELED: not rebuild and manual_canceled,
self.AUTO_SUCCESS: rebuild and success,
self.AUTO_FAIL: rebuild and not success,
self.AUTO_CANCELED: rebuild and auto_canceled
}
for state in self.send_on:
should_send |= should_send_mapping[state]
return should_send
def _render_mail(self, rebuild, success, auto_canceled, manual_canceled):
"""Render and return subject and body of the mail to send."""
subject_template = '%(endstate)s building image %(image)s'
body_template = '\n'.join([
'Image: %(image)s',
'Status: %(endstate)s',
'Submitted by: %(user)s',
'Logs: %(logs)s',
])
endstate = None
if auto_canceled or manual_canceled:
endstate = 'Canceled'
else:
endstate = 'Succeeded' if success else 'Failed'
url = self._get_logs_url()
formatting_dict = {
'image': self.workflow.image,
'endstate': endstate,
'user': '<autorebuild>' if rebuild else self.submitter,
'logs': url
}
return (subject_template % formatting_dict, body_template % formatting_dict)
def _send_mail(self, receivers_list, subject, body):
"""Actually sends the mail with `subject` and `body` to all members of `receivers_list`."""
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = self.from_address
msg['To'] = ', '.join([x.strip() for x in receivers_list])
s = None
try:
s = smtplib.SMTP(self.smtp_host)
s.sendmail(self.from_address, receivers_list, msg.as_string())
except (socket.gaierror, smtplib.SMTPException):
self.log.error('Error communicating with SMTP server')
raise
finally:
if s is not None:
s.quit()
def _get_email_from_koji_obj(self, obj):
if obj.get('krb_principal'):
return obj['krb_principal'].lower()
else:
if not self.email_domain:
raise RuntimeError("Empty email_domain specified")
return '@'.join([obj['name'], self.email_domain])
def _get_koji_submitter(self):
if not self.koji_task_id:
return ""
koji_task_info = self.session.getTaskInfo(self.koji_task_id)
koji_task_owner = self.session.getUser(koji_task_info['owner'])
koji_task_owner_email = self._get_email_from_koji_obj(koji_task_owner)
self.submitter = koji_task_owner_email
return koji_task_owner_email
def _get_koji_owners(self):
result = []
if not self.koji_build_id:
return result
koji_build_info = self.session.getBuild(self.koji_build_id)
koji_tags = self.session.listTags(self.koji_build_id)
for koji_tag in koji_tags:
koji_tag_id = koji_tag['id']
koji_package_id = koji_build_info['package_id']
koji_pkg_tag_config = self.session.getPackageConfig(koji_tag_id, koji_package_id)
koji_pkg_tag_owner = self.session.getUser(koji_pkg_tag_config['owner_id'])
result.append(self._get_email_from_koji_obj(koji_pkg_tag_owner))
return result
def _get_logs_url(self):
url = None
try:
# We're importing this here in order to trap ImportError
from koji import PathInfo
pathinfo = PathInfo(topdir=self.koji_root)
url = '/'.join([pathinfo.work(), pathinfo.taskrelpath(self.koji_task_id)])
except Exception:
self.log.exception("Failed to fetch logs from koji")
if self.url and self.workflow.openshift_build_selflink:
url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')
return url
def _get_receivers_list(self):
receivers_list = []
if self.additional_addresses:
receivers_list += self.additional_addresses
if self.session and (self.to_koji_submitter or self.to_koji_pkgowner):
if self.to_koji_submitter:
try:
koji_task_owner_email = self._get_koji_submitter()
except Exception:
self.log.exception("Failed to include a task submitter")
else:
receivers_list.append(koji_task_owner_email)
if self.to_koji_pkgowner:
try:
koji_task_owner_emails = self._get_koji_owners()
except Exception:
self.log.exception("Failed to include a package owner")
else:
receivers_list += koji_task_owner_emails
# Remove duplicates
receivers_list = list(set(receivers_list))
# Remove empty and None items
receivers_list = [x for x in receivers_list if x]
if not receivers_list:
raise RuntimeError("No recepients found")
return receivers_list
def run(self):
# verify that given states are subset of allowed states
unknown_states = self.send_on - self.allowed_states
if len(unknown_states) > 0:
raise PluginFailedException('Unknown state(s) "%s" for sendmail plugin' %
'", "'.join(sorted(unknown_states)))
rebuild = is_rebuild(self.workflow)
success = not self.workflow.build_process_failed
auto_canceled = self.workflow.autorebuild_canceled
manual_canceled = self.workflow.build_canceled
self.log.info('checking conditions for sending notification ...')
if self._should_send(rebuild, success, auto_canceled, manual_canceled):
self.log.info('notification about build result will be sent')
try:
self.log.debug('getting list of receivers for this component ...')
receivers = self._get_receivers_list()
except RuntimeError as e:
self.log.error('couldn\'t get list of receivers, sending error message ...')
# Render the body although the receivers cannot be fetched for error message
_, expected_body = self._render_mail(
rebuild, success, auto_canceled, manual_canceled)
body = '\n'.join([
'Failed to get contact for %s, error: %s' % (str(self.workflow.image), str(e)),
'Since your address is in "error_addresses", this email was sent to you to '
'take action on this.',
'Wanted to send following mail:',
'',
expected_body
])
receivers = self.error_addresses
self.log.info('sending notification to %s ...', receivers)
subject, body = self._render_mail(rebuild, success, auto_canceled, manual_canceled)
self._send_mail(receivers, subject, body)
else:
self.log.info('conditions for sending notification not met, doing nothing')
| |
# -*- coding: utf-8 -*-
import sys
from heapq import heappush, heappop
import itertools
from functools import total_ordering
def is_scalar(obj):
return not hasattr(obj, '__iter__') or isinstance(obj, basestring)
def prefixed(arg, prefix=None):
if prefix and not arg.startswith(prefix):
arg = prefix + arg
return arg
@total_ordering
class MaxInt(long):
"""
A quite-large integer type that tries to be like float('inf')
(Infinity), but can be used for slicing and other integer
operations. float('inf') is generally more correct, except that
mixing a float and integer in arithmetic operations will result in
a float, which will raise an error on slicing.
"""
def __new__(cls, *a, **kw):
return super(MaxInt, cls).__new__(cls, sys.maxint + 1)
def __init__(self, name='MAX'):
self._name = str(name)
def __repr__(self):
return self._name
def __str__(self):
return repr(self)
# TODO: better math
for func in ('__add__', '__sub__', '__mul__', '__floordiv__', '__div__',
'__mod__', '__divmod__', '__pow__', '__lshift__',
'__rshift__'):
locals()[func] = lambda self, other: self
def __gt__(self, other):
return not self == other
def __eq__(self, other):
return isinstance(other, MaxInt)
def __int__(self):
return self
class OperationExample(object):
"""
Sort of like a partial, but specialer.
# other types of tests?
"""
def __init__(self,
param=None,
limit=None,
op_type=None,
**kw):
self.op_type = op_type
self.param = param
self.limit = limit
self.doc = kw.pop('doc', '')
self.test = kw.pop('test', None)
# test defaults to limit_equal_or_depleted in test_ops.py
if kw:
raise TypeError('got unexpected keyword arguments: %r' % kw)
@property
def op_name(self):
if self.op_type is None:
return None
return self.op_type.__name__
@property
def disp_name(self):
if not self.op_type:
return '(unbound OperationExample)'
tmpl = '%(type)s(%(param)r, limit=%(limit)s)'
if self.op_type.input_field is None:
tmpl = '%(type)s(limit=%(limit)s)'
return tmpl % {'type': self.op_type.__name__,
'param': self.param,
'limit': self.limit}
def bind_op_type(self, op_type):
if self.op_type is None:
self.op_type = op_type
if self.limit is None:
try:
pql = op_type.per_query_limit
except AttributeError:
pql = op_type.subop_chain[0].per_query_limit
self.limit = pql.get_limit()
return
def make_op(self, mag=None):
if not self.op_type:
raise TypeError('no Operation type assigned')
mag = int(mag or 1)
limit = self.limit * mag
if self.op_type.input_field is None:
return self.op_type(limit=limit)
return self.op_type(self.param, limit=limit)
def __repr__(self):
cn = self.__class__.__name__
kwargs = ['param', 'limit', 'test', 'doc']
kw_parts = ['op_type=%s' % self.op_name]
vals = [getattr(self, a) for a in kwargs if getattr(self, a)]
kw_parts.extend(['%s=%r' % (a, v) for a, v in zip(kwargs, vals)])
kwarg_str = ', '.join(kw_parts)
return '%s(%s)' % (cn, kwarg_str)
__str__ = __repr__
"""
TypeWrapper and MetaTypeWrapper are a pair of what are technically
metaclasses, but really just a very overwrought way of enabling
customized versions of types floating around in some
locations. Because Wapiti is a DSL, but also just a bunch of Python,
we have to deal with the fact that if you modify a type/class, it will
be modified everywhere that references it.
TL;DR: This overblown thing lets Operations use something like
Prioritized(GetCategory, key='total_count'), which sets a priority for
better queueing, without modifying the GetCategory Operation
itself. (Different operations will want to prioritiez different
things.)
(There is almost certainly a better way, but this was a bit of
fun. Ever made an object that is an instance and a subclass of
itself?)
"""
def make_type_wrapper(name, init_args=None):
init_args = init_args or []
args, defaults = [], {}
for ia in init_args:
try:
arg, _default = ia
defaults[arg] = _default
except ValueError:
arg = ia
if not isinstance(arg, basestring):
raise TypeError('expected string arg name, not %r' % arg)
args.append(arg)
attrs = {'_args': args, '_defaults': defaults}
return WrapperType(str(name), (Wrapper,), attrs)
class WrapperType(type):
@property
def _repr_args(self):
ret = []
for a in self._args:
try:
ret.append((a, self._defaults[a]))
except KeyError:
ret.append(a)
return ret
def __repr__(cls):
name, cname = cls.__name__, cls.__class__.__name__
if cls._repr_args:
return '%s(%r, %r)' % (cname, name, cls._repr_args)
else:
return '%s(%r)' % (cname, name)
class Wrapper(object):
__metaclass__ = WrapperType
_args, _defaults = [], {}
def __init__(self, to_wrap, *args, **kwargs):
wrapped_dict = {}
if isinstance(to_wrap, Wrapper):
wrapped_dict = dict(to_wrap._wrapped_dict)
to_wrap = to_wrap._wrapped
self.__dict__['_wrapped'] = to_wrap
self.__dict__['_wrapped_dict'] = wrapped_dict
cn = self.__name__
for arg_i, arg_name in enumerate(self._args):
try:
val = args[arg_i]
if arg_name in kwargs:
raise TypeError('%s got multiple values for arg %r'
% (cn, arg_name))
except IndexError:
try:
val = kwargs.pop(arg_name)
except KeyError:
try:
val = self._defaults[arg_name]
except KeyError:
raise TypeError('%s expected required arg %r'
% (cn, arg_name))
setattr(self, arg_name, val)
return
def __repr__(self):
kv = ', '.join(['%s=%r' % (k, v) for k, v
in self._wrapped_dict.items()])
tmpl = "<wrapped %r (%s)>"
return tmpl % (self._wrapped, kv)
def __getattr__(self, name):
return getattr(self._wrapped, name)
def __setattr__(self, name, val):
super(Wrapper, self).__setattr__(name, val)
self._wrapped_dict[name] = val
def __delattr__(self, name, val):
super(Wrapper, self).__delattr__(name, val)
self._wrapped_dict.pop(name, None)
def __call__(self, *a, **kw):
return self._wrapped(*a, **kw)
REMOVED = '<removed-task>'
class PriorityQueue(object):
"""
Real quick type based on the heapq docs.
"""
def __init__(self):
self._pq = []
self._entry_map = {}
self.counter = itertools.count()
def add(self, task, priority=None):
# larger numbers = higher priority
priority = -int(priority or 0)
if task in self._entry_map:
self.remove_task(task)
count = next(self.counter)
entry = [priority, count, task]
self._entry_map[task] = entry
heappush(self._pq, entry)
def remove(self, task):
entry = self._entry_map.pop(task)
entry[-1] = REMOVED
def _cull(self):
while self._pq:
priority, count, task = self._pq[0]
if task is REMOVED:
heappop(self._pq)
continue
return
raise IndexError('empty priority queue')
def peek(self, default=REMOVED):
try:
self._cull()
_, _, task = self._pq[0]
except IndexError:
if default is not REMOVED:
return default
raise IndexError('peek on empty queue')
return task
def pop(self, default=REMOVED):
try:
self._cull()
_, _, task = heappop(self._pq)
del self._entry_map[task]
except IndexError:
if default is not REMOVED:
return default
raise IndexError('pop on empty queue')
return task
def __len__(self):
return len(self._entry_map)
def chunked_iter(src, size, **kw):
"""
Generates 'size'-sized chunks from 'src' iterable. Unless
the optional 'fill' keyword argument is provided, iterables
not even divisible by 'size' will have a final chunk that is
smaller than 'size'.
Note that fill=None will in fact use None as the fill value.
>>> list(chunked_iter(range(10), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(chunked_iter(range(10), 3, fill=None))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
"""
size = int(size)
if size <= 0:
raise ValueError('expected a positive integer chunk size')
do_fill = True
try:
fill_val = kw.pop('fill')
except KeyError:
do_fill = False
fill_val = None
if kw:
raise ValueError('got unexpected keyword arguments: %r' % kw.keys())
if not src:
return
cur_chunk = []
i = 0
for item in src:
cur_chunk.append(item)
i += 1
if i % size == 0:
yield cur_chunk
cur_chunk = []
if cur_chunk:
if do_fill:
lc = len(cur_chunk)
cur_chunk[lc:] = [fill_val] * (size - lc)
yield cur_chunk
return
# From http://en.wikipedia.org/wiki/Wikipedia:Namespace
NAMESPACES = {
'Main': 0,
'Talk': 1,
'User': 2,
'User talk': 3,
'Wikipedia': 4,
'Wikipedia talk': 5,
'File': 6,
'File talk': 7,
'MediaWiki': 8,
'MediaWiki talk': 9,
'Template': 10,
'Template talk': 11,
'Help': 12,
'Help talk': 13,
'Category': 14,
'Category talk': 15,
'Portal': 100,
'Portal talk': 101,
'Book': 108,
'Book talk': 109,
'Special': -1,
'Media': -2}
def bucketize(src, keyfunc=None):
"""
Group values in 'src' iterable by value returned by 'keyfunc'.
keyfunc defaults to bool, which will group the values by
truthiness; at most there will be two keys, True and False, and
each key will have a list with at least one item.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if keyfunc is None:
keyfunc = bool
if not callable(keyfunc):
raise TypeError('expected callable key function')
ret = {}
for val in src:
key = keyfunc(val)
ret.setdefault(key, []).append(val)
return ret
def bucketize_bool(src, keyfunc=None):
"""
Like bucketize, but for added convenience returns a tuple of
(truthy_values, falsy_values).
>>> nonempty, empty = bucketize_bool(['', '', 'hi', '', 'bye'])
>>> nonempty
['hi', 'bye']
keyfunc defaults to bool, but can be carefully overridden to
use any function that returns either True or False.
>>> import string
>>> is_digit = lambda x: x in string.digits
>>> decimal_digits, hexletters = bucketize_bool(string.hexdigits, is_digit)
>>> ''.join(decimal_digits), ''.join(hexletters)
('0123456789', 'abcdefABCDEF')
"""
bucketized = bucketize(src, keyfunc)
return bucketized.get(True, []), bucketized.get(False, [])
def coerce_namespace(ns_arg):
ns_str = str(ns_arg).capitalize()
return NAMESPACES.get(ns_str, ns_str)
| |
# -*- test-case-name: txweb2.dav.test.test_report_expand -*-
##
# Copyright (c) 2006-2015 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
"""
WebDAV principal-match report
"""
__all__ = ["report_DAV__principal_match"]
from twisted.internet.defer import deferredGenerator, waitForDeferred
from twext.python.log import Logger
from txweb2 import responsecode
from txweb2.http import StatusResponse, HTTPError
from txdav.xml import element
from txdav.xml.element import dav_namespace
from txweb2.dav.http import ErrorResponse, MultiStatusResponse
from txweb2.dav.method import prop_common
from txweb2.dav.method.report import NumberOfMatchesWithinLimits
from txweb2.dav.method.report import max_number_of_matches
from txweb2.dav.resource import isPrincipalResource
log = Logger()
def report_DAV__principal_match(self, request, principal_match):
"""
Generate a principal-match REPORT. (RFC 3744, section 9.3)
"""
# Verify root element
if not isinstance(principal_match, element.PrincipalMatch):
raise ValueError("%s expected as root element, not %s."
% (element.PrincipalMatch.sname(), principal_match.sname()))
# Only handle Depth: 0
depth = request.headers.getHeader("depth", "0")
if depth != "0":
log.error("Non-zero depth is not allowed: %s" % (depth,))
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Depth %s not allowed" % (depth,)))
# Get a single DAV:prop element from the REPORT request body
propertiesForResource = None
propElement = None
principalPropElement = None
lookForPrincipals = True
for child in principal_match.children:
if child.qname() == (dav_namespace, "prop"):
propertiesForResource = prop_common.propertyListForResource
propElement = child
elif child.qname() == (dav_namespace, "self"):
lookForPrincipals = True
elif child.qname() == (dav_namespace, "principal-property"):
# Must have one and only one property in this element
if len(child.children) != 1:
log.error("Wrong number of properties in DAV:principal-property: %s"
% (len(child.children),))
raise HTTPError(StatusResponse(
responsecode.BAD_REQUEST,
"DAV:principal-property must contain exactly one property"
))
lookForPrincipals = False
principalPropElement = child.children[0]
# Run report for each referenced principal
try:
responses = []
matchcount = 0
myPrincipalURL = self.currentPrincipal(request).children[0]
if lookForPrincipals:
# Find the set of principals that represent "self".
# First add "self"
principal = waitForDeferred(request.locateResource(str(myPrincipalURL)))
yield principal
principal = principal.getResult()
selfItems = [principal, ]
# Get group memberships for "self" and add each of those
d = waitForDeferred(principal.groupMemberships())
yield d
memberships = d.getResult()
selfItems.extend(memberships)
# Now add each principal found to the response provided the principal resource is a child of
# the current resource.
for principal in selfItems:
# Get all the URIs that point to the principal resource
# FIXME: making the assumption that the principalURL() is the URL of the resource we found
principal_uris = [principal.principalURL()]
principal_uris.extend(principal.alternateURIs())
# Compare each one to the request URI and return at most one that matches
for uri in principal_uris:
if uri.startswith(request.uri):
# Check size of results is within limit
matchcount += 1
if matchcount > max_number_of_matches:
raise NumberOfMatchesWithinLimits(max_number_of_matches)
d = waitForDeferred(prop_common.responseForHref(
request,
responses,
element.HRef.fromString(uri),
principal,
propertiesForResource,
propElement
))
yield d
d.getResult()
break
else:
# Do some optimisation of access control calculation by determining any inherited ACLs outside of
# the child resource loop and supply those to the checkPrivileges on each child.
filteredaces = waitForDeferred(self.inheritedACEsforChildren(request))
yield filteredaces
filteredaces = filteredaces.getResult()
children = []
d = waitForDeferred(self.findChildren("infinity", request, lambda x, y: children.append((x, y)),
privileges=(element.Read(),), inherited_aces=filteredaces))
yield d
d.getResult()
for child, uri in children:
# Try to read the requested property from this resource
try:
prop = waitForDeferred(child.readProperty(principalPropElement.qname(), request))
yield prop
prop = prop.getResult()
if prop:
prop.removeWhitespaceNodes()
if prop and len(prop.children) == 1 and isinstance(prop.children[0], element.HRef):
# Find principal associated with this property and test it
principal = waitForDeferred(request.locateResource(str(prop.children[0])))
yield principal
principal = principal.getResult()
if principal and isPrincipalResource(principal):
d = waitForDeferred(principal.principalMatch(myPrincipalURL))
yield d
matched = d.getResult()
if matched:
# Check size of results is within limit
matchcount += 1
if matchcount > max_number_of_matches:
raise NumberOfMatchesWithinLimits(max_number_of_matches)
d = waitForDeferred(prop_common.responseForHref(
request,
responses,
element.HRef.fromString(uri),
child,
propertiesForResource,
propElement
))
yield d
d.getResult()
except HTTPError:
# Just ignore a failure to access the property. We treat this like a property that does not exist
# or does not match the principal.
pass
except NumberOfMatchesWithinLimits:
log.error("Too many matching components in principal-match report")
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
element.NumberOfMatchesWithinLimits()
))
yield MultiStatusResponse(responses)
report_DAV__principal_match = deferredGenerator(report_DAV__principal_match)
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python API for executing a tf.data.Dataset using a tf.data service."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import compression_ops
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
from tensorflow.python.data.experimental.ops.distribute_options import ExternalStatePolicy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
class ProcessingMode(object):
"""tf.data service processing modes."""
PARALLEL_EPOCHS = "parallel_epochs"
DISTRIBUTED_EPOCH = "distributed_epoch"
@staticmethod
def validate(mode):
"""Raises a ValueError if the given object is not a valid processing mode."""
valid_modes = [
ProcessingMode.PARALLEL_EPOCHS, ProcessingMode.DISTRIBUTED_EPOCH
]
if mode not in valid_modes:
raise ValueError(
"{0} is not a valid processing mode. Valid modes: {1}".format(
mode, valid_modes))
class _DataServiceDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` that reads elements from the tf.data service."""
def __init__(self,
dataset_id,
processing_mode,
address,
protocol,
job_name=None,
max_outstanding_requests=None,
task_refresh_interval_hint_ms=None):
"""Constructs a _DataServiceDatasetV2.
Args:
dataset_id: The dataset id for the dataset to read from.
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have
each tf.data worker process a copy of the dataset, or
"distributed_epoch" to split a single iteration of the dataset across
all the workers.
address: The tf.data service address, e.g. "localhost:5000".
protocol: The protocol to use for communicating with the tf.data service,
e.g. "grpc".
job_name: (Optional.) The name of the job. This argument makes it possible
for multiple datasets to share the same job. The default behavior is
that the dataset creates anonymous, exclusively owned jobs.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the
amount of memory used, since `distribute` won't use more than
`element_size` * `max_outstanding_requests` of memory.
task_refresh_interval_hint_ms: (Optional.) A hint for how often to query
the dispatcher for task changes.
"""
if job_name is None:
job_name = ""
if max_outstanding_requests is None:
max_outstanding_requests = dataset_ops.AUTOTUNE
if task_refresh_interval_hint_ms is None:
task_refresh_interval_hint_ms = dataset_ops.AUTOTUNE
self._dataset_id = ops.convert_to_tensor(
dataset_id, dtype=dtypes.int64, name="dataset_id")
self._processing_mode = ops.convert_to_tensor(
processing_mode, dtype=dtypes.string, name="processing_mode")
self._address = ops.convert_to_tensor(
address, dtype=dtypes.string, name="address")
self._protocol = ops.convert_to_tensor(
protocol, dtype=dtypes.string, name="protocol")
self._job_name = ops.convert_to_tensor(
job_name, dtype=dtypes.string, name="job_name")
self._max_outstanding_requests = ops.convert_to_tensor(
max_outstanding_requests,
dtype=dtypes.int64,
name="max_outstanding_requests")
# Datasets executed by the tf.data service produce compressed elements
# represented by scalar DT_VARIANTs.
self._element_spec = tensor_spec.TensorSpec(shape=(), dtype=dtypes.variant)
variant_tensor = gen_experimental_dataset_ops.data_service_dataset(
dataset_id=self._dataset_id,
processing_mode=self._processing_mode,
address=self._address,
protocol=self._protocol,
job_name=self._job_name,
max_outstanding_requests=self._max_outstanding_requests,
task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
iteration_counter=gen_experimental_dataset_ops.dummy_iteration_counter(
),
**self._flat_structure)
super(_DataServiceDatasetV2, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._element_spec
class _DataServiceDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` that executes its input through the tf.data service."""
@functools.wraps(_DataServiceDatasetV2.__init__)
def __init__(self, dataset_id, processing_mode, address, protocol, job_name,
max_outstanding_requests, task_refresh_interval_hint_ms):
self._wrapped = _DataServiceDatasetV2(
dataset_id=dataset_id,
processing_mode=processing_mode,
address=address,
protocol=protocol,
job_name=job_name,
max_outstanding_requests=max_outstanding_requests,
task_refresh_interval_hint_ms=task_refresh_interval_hint_ms)
super(_DataServiceDatasetV1, self).__init__(self._wrapped)
if tf2.enabled():
_DataServiceDataset = _DataServiceDatasetV2
else:
_DataServiceDataset = _DataServiceDatasetV1
def _parse_service(service):
"""Parses a tf.data service string into a (protocol, address) tuple.
Args:
service: A string in the format "protocol://address".
Returns:
The parsed (protocol, address) tuple
"""
if not isinstance(service, six.string_types):
raise ValueError(
"service must be a string, but service was of type {0}. service={1}"
.format(type(service), service))
if not service:
raise ValueError("service must not be empty")
parts = service.split("://")
if len(parts) == 1:
raise ValueError("service string %s does not begin with a protocol. "
"The service should be in the format "
"<protocol>://<address>, e.g. grpc://localhost:5000" %
service)
if len(parts) > 2:
raise ValueError("malformed service string has multiple '://': %s" %
service)
return parts
def _from_dataset_id(processing_mode,
service,
dataset_id,
element_spec,
job_name=None,
max_outstanding_requests=None,
task_refresh_interval_hint_ms=None):
"""Creates a dataset which reads data from the tf.data service.
This transformation is similar to `from_dataset_id`, but supports additional
parameters which we do not yet want to add to the public Python API.
Args:
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have
each tf.data worker process a copy of the dataset, or
"distributed_epoch" to split a single iteration of the dataset across
all the workers.
service: A string indicating how to connect to the tf.data service. The
string should be in the format "<protocol>://<address>", e.g.
"grpc://localhost:5000".
dataset_id: The id of the dataset to read from. This id is returned by
`register_dataset` when the dataset is registered with the tf.data
service.
element_spec: A nested structure of `tf.TypeSpec`s representing the type of
elements produced by the dataset. Use `tf.data.Dataset.element_spec` to
see the element spec for a given dataset.
job_name: (Optional.) The name of the job. This argument makes it possible
for multiple datasets to share the same job. The default behavior is that
the dataset creates anonymous, exclusively owned jobs.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the amount
of memory used, since `distribute` won't use more than `element_size` *
`max_outstanding_requests` of memory.
task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the
dispatcher for task changes.
Returns:
A `tf.data.Dataset` which reads from the tf.data service.
"""
ProcessingMode.validate(processing_mode)
if job_name is not None:
if not isinstance(job_name, six.string_types):
raise ValueError("job_name must be a string, but job_name was of type "
"{0}. job_name={1}".format(type(job_name), job_name))
if not job_name:
raise ValueError("job_name must not be empty")
if element_spec is None:
raise ValueError("element_spec must not be None")
protocol, address = _parse_service(service)
dataset = _DataServiceDataset(
dataset_id=dataset_id,
processing_mode=processing_mode,
address=address,
protocol=protocol,
job_name=job_name,
max_outstanding_requests=max_outstanding_requests,
task_refresh_interval_hint_ms=task_refresh_interval_hint_ms)
dataset = dataset.map(
lambda x: compression_ops.uncompress(x, output_spec=element_spec),
num_parallel_calls=dataset_ops.AUTOTUNE)
# Disable autosharding for shared jobs.
if job_name:
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
def _distribute(processing_mode,
service,
job_name=None,
max_outstanding_requests=None,
task_refresh_interval_hint_ms=None):
"""A transformation that moves dataset processing to the tf.data service.
This transformation is similar to `distribute`, but supports additional
parameters which we do not yet want to add to the public Python API.
Args:
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have
each tf.data worker process a copy of the dataset, or
"distributed_epoch" to split a single iteration of the dataset across
all the workers.
service: A string indicating how to connect to the tf.data service. The
string should be in the format "<protocol>://<address>", e.g.
"grpc://localhost:5000".
job_name: (Optional.) The name of the job. This argument makes it possible
for multiple datasets to share the same job. The default behavior is that
the dataset creates anonymous, exclusively owned jobs.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the amount
of memory used, since `distribute` won't use more than `element_size` *
`max_outstanding_requests` of memory.
task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the
dispatcher for task changes.
Returns:
Dataset: A `Dataset` of the elements produced by the data service.
"""
ProcessingMode.validate(processing_mode)
def _apply_fn(dataset): # pylint: disable=missing-docstring
dataset_id = register_dataset(service, dataset)
return _from_dataset_id(
processing_mode,
service,
dataset_id,
dataset.element_spec,
job_name=job_name,
max_outstanding_requests=max_outstanding_requests,
task_refresh_interval_hint_ms=task_refresh_interval_hint_ms)
return _apply_fn
@tf_export("data.experimental.service.distribute")
def distribute(processing_mode,
service,
job_name=None,
max_outstanding_requests=None):
"""A transformation that moves dataset processing to the tf.data service.
When you iterate over a dataset containing the `distribute` transformation,
the tf.data service creates a "job" which produces data for the dataset
iteration.
The tf.data service uses a cluster of workers to prepare data for training
your model.
The `processing_mode` argument to `tf.data.experimental.service.distribute`
describes how to leverage multiple workers to process the input dataset.
Currently, there are two processing modes to choose from: "distributed_epoch"
and "parallel_epochs".
"distributed_epoch" means that the dataset will be split across all tf.data
service workers.
The dispatcher produces "splits" for the dataset and sends them to workers for
further processing. For example, if a dataset begins with a list of filenames,
the dispatcher will iterate through the filenames and send the filenames to
tf.data workers, which will perform the rest of the dataset transformations on
those files. "distributed_epoch" is useful when your model needs to see each
element of the dataset exactly once, or if it needs to see the data in a
generally-sequential order. "distributed_epoch" only works for datasets with
splittable sources, such as `Dataset.from_tensor_slices`,
`Dataset.list_files`, or `Dataset.range`.
"parallel_epochs" means that the entire input dataset will be processed
independently by each of the tf.data service workers.
For this reason, it is important to shuffle data (e.g. filenames)
non-deterministically, so that each worker will process the elements of the
dataset in a different order. "parallel_epochs" can be used to distribute
datasets that aren't splittable.
With two workers, "parallel_epochs" will produce every element of the dataset
twice:
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> # Start two workers
>>> workers = [
... tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address)) for _ in range(2)
... ]
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode="parallel_epochs", service=dispatcher.target))
>>> print(sorted(list(dataset.as_numpy_iterator())))
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9]
"distributed_epoch", on the other hand, will still produce each element once:
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> workers = [
... tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address)) for _ in range(2)
... ]
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode="distributed_epoch", service=dispatcher.target))
>>> print(sorted(list(dataset.as_numpy_iterator())))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
When using `apply(tf.data.experimental.service.distribute(...))`, the dataset
before the `apply` transformation executes within the tf.data service, while
the operations after `apply` happen within the local process.
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> workers = [
... tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address)) for _ in range(2)
... ]
>>> dataset = tf.data.Dataset.range(5)
>>> dataset = dataset.map(lambda x: x*x)
>>> dataset = dataset.apply(
... tf.data.experimental.service.distribute("parallel_epochs",
... dispatcher.target))
>>> dataset = dataset.map(lambda x: x+1)
>>> print(sorted(list(dataset.as_numpy_iterator())))
[1, 1, 2, 2, 5, 5, 10, 10, 17, 17]
In the above example, the dataset operations (before applying the `distribute`
function on the elements) will be executed on the tf.data workers,
and the elements are provided over RPC. The remaining transformations
(after the call to `distribute`) will be executed locally. The dispatcher
and the workers will bind to usused free ports (which are chosen at random),
in order to communicate with each other. However, to bind them to specific
ports, the `port` parameter can be passed.
The `job_name` argument allows jobs to be shared across multiple
datasets. Instead of each dataset creating its own job, all
datasets with the same `job_name` will consume from the same job. A new job
will be created for each iteration of the dataset (with each repetition of
`Dataset.repeat` counting as a new iteration). Suppose the `DispatchServer`
is serving on `localhost:5000` and two training workers (in either a single
client or multi-client setup) iterate over the below dataset, and there is a
single tf.data worker:
```
range5_dataset = tf.data.Dataset.range(5)
dataset = range5_dataset.apply(tf.data.experimental.service.distribute(
"parallel_epochs", "grpc://localhost:5000", job_name="my_job_name"))
for iteration in range(3):
print(list(dataset))
```
The elements of each job will be split between the two processes, with
elements being consumed by the processes on a first-come first-served basis.
One possible result is that process 1 prints
```
[0, 2, 4]
[0, 1, 3]
[1]
```
and process 2 prints
```
[1, 3]
[2, 4]
[0, 2, 3, 4]
```
Job names must not be re-used across different training jobs within the
lifetime of the tf.data service. In general, the tf.data service is expected
to live for the duration of a single training job.
To use the tf.data service with multiple training jobs, make sure to use
different job names to avoid conflicts. For example, suppose a training job
calls `distribute` with `job_name="job"` and reads until end of input. If
another independent job connects to the same tf.data service and tries to read
from `job_name="job"`, it will immediately receive end of input, without
getting any data.
**Keras and Distribution Strategies**
The dataset produced by the `distribute` transformation can be passed to
Keras' `Model.fit` or Distribution Strategy's
`tf.distribute.Strategy.experimental_distribute_dataset` like any other
`tf.data.Dataset`. We recommend setting a `job_name` on the call to
`distribute` so that if there are multiple workers, they read data from the
same job. Note that the autosharding normally performed by
`experimental_distribute_dataset` will be disabled when setting a `job_name`,
since sharing the job already results in splitting data across the workers.
When using a shared job, data will be dynamically balanced across workers, so
that they reach end of input about the same time. This results in better
worker utilization than with autosharding, where each worker processes an
independent set of files, and some workers may run out of data earlier than
others.
Args:
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have
each tf.data worker process a copy of the dataset, or
"distributed_epoch" to split a single iteration of the dataset across
all the workers.
service: A string indicating how to connect to the tf.data service. The
string should be in the format "protocol://address", e.g.
"grpc://localhost:5000".
job_name: (Optional.) The name of the job. This argument makes it possible
for multiple datasets to share the same job. The default behavior is that
the dataset creates anonymous, exclusively owned jobs.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the amount
of memory used, since `distribute` won't use more than `element_size` *
`max_outstanding_requests` of memory.
Returns:
Dataset: A `Dataset` of the elements produced by the data service.
"""
return _distribute(
processing_mode=processing_mode,
service=service,
job_name=job_name,
max_outstanding_requests=max_outstanding_requests)
@tf_export("data.experimental.service.register_dataset")
def register_dataset(service, dataset):
"""Registers a dataset with the tf.data service.
`register_dataset` registers a dataset with the tf.data service so that
datasets can be created later with
`tf.data.experimental.service.from_dataset_id`. This is useful when the
dataset
is registered by one process, then used in another process. When the same
process is both registering and reading from the dataset, it is simpler to use
`tf.data.experimental.service.distribute` instead.
If the dataset is already registered with the tf.data service,
`register_dataset` returns the already-registered dataset's id.
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> worker = tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address))
>>> dataset = tf.data.Dataset.range(10)
>>> dataset_id = tf.data.experimental.service.register_dataset(
... dispatcher.target, dataset)
>>> dataset = tf.data.experimental.service.from_dataset_id(
... processing_mode="parallel_epochs",
... service=dispatcher.target,
... dataset_id=dataset_id,
... element_spec=dataset.element_spec)
>>> print(list(dataset.as_numpy_iterator()))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Args:
service: A string indicating how to connect to the tf.data service. The
string should be in the format "protocol://address", e.g.
"grpc://localhost:5000".
dataset: A `tf.data.Dataset` to register with the tf.data service.
Returns:
A scalar int64 tensor of the registered dataset's id.
"""
protocol, address = _parse_service(service)
external_state_policy = dataset.options().experimental_external_state_policy
if external_state_policy is None:
external_state_policy = ExternalStatePolicy.WARN
# Compress the dataset elements to reduce the amount of data that needs to
# be sent over the network.
dataset = dataset.map(
lambda *x: compression_ops.compress(x),
num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.prefetch(dataset_ops.AUTOTUNE)
# Apply options so that the dataset executed in the tf.data service will
# be optimized and support autotuning.
dataset = dataset._apply_options() # pylint: disable=protected-access
dataset_id = gen_experimental_dataset_ops.register_dataset(
dataset._variant_tensor, # pylint: disable=protected-access
address=address,
protocol=protocol,
external_state_policy=external_state_policy.value)
return dataset_id
@tf_export("data.experimental.service.from_dataset_id")
def from_dataset_id(processing_mode,
service,
dataset_id,
element_spec=None,
job_name=None,
max_outstanding_requests=None):
"""Creates a dataset which reads data from the tf.data service.
This is useful when the dataset is registered by one process, then used in
another process. When the same process is both registering and reading from
the dataset, it is simpler to use `tf.data.experimental.service.distribute`
instead.
Before using `from_dataset_id`, the dataset must have been registered with the
tf.data service using `tf.data.experimental.service.register_dataset`.
`register_dataset` returns a dataset id for the registered dataset. That is
the `dataset_id` which should be passed to `from_dataset_id`.
The `element_spec` argument indicates the `tf.TypeSpec`s for the elements
produced by the dataset. Currently `element_spec` must be explicitly
specified, and match the dataset registered under `dataset_id`. `element_spec`
defaults to `None` so that in the future we can support automatically
discovering the `element_spec` by querying the tf.data service.
`tf.data.experimental.service.distribute` is a convenience method which
combines `register_dataset` and `from_dataset_id` into a dataset
transformation.
See the documentation for `tf.data.experimental.service.distribute` for more
detail about how `from_dataset_id` works.
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> worker = tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address))
>>> dataset = tf.data.Dataset.range(10)
>>> dataset_id = tf.data.experimental.service.register_dataset(
... dispatcher.target, dataset)
>>> dataset = tf.data.experimental.service.from_dataset_id(
... processing_mode="parallel_epochs",
... service=dispatcher.target,
... dataset_id=dataset_id,
... element_spec=dataset.element_spec)
>>> print(list(dataset.as_numpy_iterator()))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Args:
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have
each tf.data worker process a copy of the dataset, or
"distributed_epoch" to split a single iteration of the dataset across
all the workers.
service: A string indicating how to connect to the tf.data service. The
string should be in the format "protocol://address", e.g.
"grpc://localhost:5000".
dataset_id: The id of the dataset to read from. This id is returned by
`register_dataset` when the dataset is registered with the tf.data
service.
element_spec: A nested structure of `tf.TypeSpec`s representing the type of
elements produced by the dataset. Use `tf.data.Dataset.element_spec` to
see the element spec for a given dataset.
job_name: (Optional.) The name of the job. This argument makes it possible
for multiple datasets to share the same job. The default behavior is that
the dataset creates anonymous, exclusively owned jobs.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the amount
of memory used, since `distribute` won't use more than `element_size` *
`max_outstanding_requests` of memory.
Returns:
A `tf.data.Dataset` which reads from the tf.data service.
"""
return _from_dataset_id(
processing_mode=processing_mode,
service=service,
dataset_id=dataset_id,
element_spec=element_spec,
job_name=job_name,
max_outstanding_requests=max_outstanding_requests)
| |
# -*- coding: utf-8 -*-
#
# PR Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/tests/unit_tests/modules/eden/pr.py
#
import unittest
import datetime
from gluon import *
from gluon.storage import Storage
# =============================================================================
class PRTests(unittest.TestCase):
""" PR Tests """
def setUp(self):
""" Set up organisation records """
auth = current.auth
s3db = current.s3db
auth.override = True
otable = s3db.org_organisation
org1 = Storage(name="Test PR Organisation 1",
acronym="TPO",
country="UK",
website="http://tpo.example.org")
org1_id = otable.insert(**org1)
org1.update(id=org1_id)
s3db.update_super(otable, org1)
org2 = Storage(name="Test PR Organisation 2",
acronym="PTO",
country="US",
website="http://pto.example.com")
org2_id = otable.insert(**org2)
org2.update(id=org2_id)
s3db.update_super(otable, org2)
self.org1 = s3db.pr_get_pe_id("org_organisation", org1_id)
self.org2 = s3db.pr_get_pe_id("org_organisation", org2_id)
def testGetRealmUsers(self):
auth = current.auth
s3db = current.s3db
auth.s3_impersonate("admin@example.com")
admin_id = auth.user.id
admin_pe_id = auth.s3_user_pe_id(admin_id)
auth.s3_impersonate("normaluser@example.com")
user_id = auth.user.id
user_pe_id = auth.s3_user_pe_id(user_id)
auth.s3_impersonate(None)
org1 = self.org1
org2 = self.org2
users = s3db.pr_realm_users(org1)
self.assertEqual(users, Storage())
users = s3db.pr_realm_users(org2)
self.assertEqual(users, Storage())
s3db.pr_add_affiliation(org1, admin_pe_id, role="Volunteer", role_type=9)
s3db.pr_add_affiliation(org2, user_pe_id, role="Staff")
users = s3db.pr_realm_users(org1)
self.assertFalse(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users(org2)
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2])
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users(org1, roles="Volunteer")
self.assertFalse(user_id in users)
self.assertTrue(admin_id in users)
users = s3db.pr_realm_users(org2, roles="Volunteer")
self.assertFalse(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2], roles="Volunteer")
self.assertFalse(user_id in users)
self.assertTrue(admin_id in users)
users = s3db.pr_realm_users(org1, roles="Staff")
self.assertFalse(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users(org2, roles="Staff")
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2], roles="Staff")
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2], roles=["Staff", "Volunteer"])
self.assertTrue(user_id in users)
self.assertTrue(admin_id in users)
users = s3db.pr_realm_users([org1, org2], role_types=1)
self.assertTrue(user_id in users)
self.assertFalse(admin_id in users)
users = s3db.pr_realm_users([org1, org2], role_types=9)
self.assertFalse(user_id in users)
self.assertTrue(admin_id in users)
users = s3db.pr_realm_users([org1, org2], role_types=None)
self.assertTrue(user_id in users)
self.assertTrue(admin_id in users)
s3db.pr_remove_affiliation(org2, user_pe_id, role="Staff")
users = s3db.pr_realm_users([org1, org2], role_types=None)
self.assertFalse(user_id in users)
self.assertTrue(admin_id in users)
# None as realm should give a list of all current users
table = auth.settings.table_user
query = (table.deleted != True)
rows = current.db(query).select(table.id)
all_users = [row.id for row in rows]
users = s3db.pr_realm_users(None)
self.assertTrue(all([u in users for u in all_users]))
def tearDown(self):
current.db.rollback()
current.auth.override = False
# =============================================================================
class PersonDeduplicateTests(unittest.TestCase):
""" PR Tests """
def setUp(self):
s3db = current.s3db
ptable = s3db.pr_person
ctable = s3db.pr_contact
person1 = Storage(first_name = "Test",
last_name = "UserDEDUP",
initials = "TU",
date_of_birth = datetime.date(1974, 4, 13))
person1_id = ptable.insert(**person1)
person1.update(id=person1_id)
s3db.update_super(ptable, person1)
self.person1_id = person1_id
self.pe1_id = s3db.pr_get_pe_id(ptable, person1_id)
person2 = Storage(first_name = "Test",
last_name = "UserDEDUP",
initials = "OU",
date_of_birth = datetime.date(1974, 4, 23))
person2_id = ptable.insert(**person2)
person2.update(id=person2_id)
s3db.update_super(ptable, person2)
self.person2_id = person2_id
self.pe2_id = s3db.pr_get_pe_id(ptable, person2_id)
def testHook(self):
s3db = current.s3db
deduplicate = s3db.get_config("pr_person", "deduplicate")
self.assertNotEqual(deduplicate, None)
self.assertTrue(callable(deduplicate))
def testMatchNames(self):
s3db = current.s3db
from s3.s3import import S3ImportItem
deduplicate = s3db.get_config("pr_person", "deduplicate")
# Test Match
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person)
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Mismatch
person = Storage(first_name = "Other",
last_name = "UserDEDUP")
item = self.import_item(person)
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
def testMatchEmail(self):
s3db = current.s3db
from s3.s3import import S3ImportItem
deduplicate = s3db.get_config("pr_person", "deduplicate")
# Test without contact records in the DB
# Test Match
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person, email="testuser@example.com")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Mismatch
person = Storage(first_name = "Other",
last_name = "UserDEDUP")
item = self.import_item(person, email="testuser@example.com")
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
# Insert contact records into the DB
ctable = s3db.pr_contact
email = Storage(pe_id = self.pe1_id,
contact_method = "EMAIL",
value = "testuser@example.com")
ctable.insert(**email)
email = Storage(pe_id = self.pe2_id,
contact_method = "EMAIL",
value = "otheruser@example.org")
ctable.insert(**email)
# Test with contact records in the DB
# Test Match - same names, same email
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person, email="testuser@example.com")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same names, different email
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person, email="otheremail@example.com")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same names, same email, but different record
person = Storage(first_name = "Test",
last_name = "UserDEDUP")
item = self.import_item(person, email="otheruser@example.org")
deduplicate(item)
self.assertEqual(item.id, self.person2_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Mismatch - First names different
person = Storage(first_name = "Other",
last_name = "UserDEDUP")
item = self.import_item(person, email="testuser@example.com")
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
def testMatchInitials(self):
s3db = current.s3db
from s3.s3import import S3ImportItem
deduplicate = s3db.get_config("pr_person", "deduplicate")
# Insert contact records into the DB
ctable = s3db.pr_contact
email = Storage(pe_id = self.pe1_id,
contact_method = "EMAIL",
value = "testuser@example.com")
ctable.insert(**email)
email = Storage(pe_id = self.pe2_id,
contact_method = "EMAIL",
value = "otheruser@example.org")
ctable.insert(**email)
# Test Match - same initials
person = Storage(initials="TU")
item = self.import_item(person)
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same names, different initials
person = Storage(first_name="Test",
last_name="UserDEDUP",
initials="OU")
item = self.import_item(person)
deduplicate(item)
self.assertEqual(item.id, self.person2_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same names, different initials, and email
person = Storage(first_name="Test",
last_name="UserDEDUP",
initials="OU")
item = self.import_item(person, email="testuser@example.org")
deduplicate(item)
self.assertEqual(item.id, self.person2_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same initials
person = Storage(initials="OU")
item = self.import_item(person)
deduplicate(item)
self.assertEqual(item.id, self.person2_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
# Test Match - same initials, same email
person = Storage(initials="TU")
item = self.import_item(person, email="testuser@example.com")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
self.assertEqual(item.method, S3ImportItem.METHOD.UPDATE)
def testMatchDOB(self):
s3db = current.s3db
deduplicate = s3db.get_config("pr_person", "deduplicate")
# Insert contact records into the DB
ctable = s3db.pr_contact
email = Storage(pe_id = self.pe1_id,
contact_method = "EMAIL",
value = "testuser@example.com")
ctable.insert(**email)
email = Storage(pe_id = self.pe2_id,
contact_method = "EMAIL",
value = "otheruser@example.org")
ctable.insert(**email)
# Test Match - same initials, different email, same DOB
person = Storage(initials="TU",
date_of_birth=datetime.date(1974, 4, 13))
item = self.import_item(person, email="otheremail@example.com")
deduplicate(item)
self.assertEqual(item.id, self.person1_id)
# Test MisMatch - same initials, different email, different DOB
person = Storage(initials="TU",
date_of_birth=datetime.date(1975, 6, 17))
item = self.import_item(person, email="otheremail@example.com")
deduplicate(item)
self.assertNotEqual(item.id, self.person1_id)
self.assertNotEqual(item.id, self.person2_id)
def import_item(self, person, email=None, sms=None):
""" Construct a fake import item """
from s3.s3import import S3ImportItem
def item(tablename, data):
return Storage(id = None,
method = None,
tablename = tablename,
data = data,
components = [],
METHOD = S3ImportItem.METHOD)
import_item = item("pr_person", person)
if email:
import_item.components.append(item("pr_contact",
Storage(contact_method = "EMAIL",
value = email)))
if sms:
import_item.components.append(item("pr_contact",
Storage(contact_method = "SMS",
value = sms)))
return import_item
def tearDown(self):
current.db.rollback()
self.pe_id = None
self.person_id = None
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner().run(suite)
return
if __name__ == "__main__":
run_suite(
PRTests,
PersonDeduplicateTests,
)
# END ========================================================================
| |
#!/usr/bin/env python3
#
# Copyright (c) 2013-2016, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
"""
The simulator consists of:
* machine model
* topology
* scheduler
"""
import config
import helpers
import simulation
import argparse
import logging
import sys
import sys
sys.path.append('./contrib/python-graph/core')
def simulate(args):
machine = args.machine
config.args.machine = args.machine
config.args.group = args.group
config.args.multicast = args.multicast
config.args.hybrid = args.hybrid
config.args.hybrid_cluster = args.hybrid_cluster
print ("machine: %s, topology: %s, hybrid: %s" % \
(machine, args.overlay, args.hybrid))
m_class = config.arg_machine(machine)
m = m_class()
assert m != None
gr = m.get_graph()
if args.multicast:
print ("Building a multicast")
# --------------------------------------------------
# Switch main action
# XXX Cleanup required
if True:
# Generate model headers
helpers.output_quroum_start(m, len(args.overlay))
all_last_nodes = []
all_leaf_nodes = []
model_descriptions = []
num_models = 0
# Generate representation of each topology
for _overlay in args.overlay:
if config.args.hybrid :
_overlay = _overlay + "-hybrid"
# ------------------------------
# Hybrid
hyb_cluster = None
shm_writers = None
hyb_leaf_nodes = None
if config.args.hybrid:
print (args.hybrid_cluster)
if 'socket' in args.hybrid_cluster:
print ("Clustering: Sockets")
hyb_cluster = m.machine_topology['Package'].get()
elif 'all' in args.hybrid_cluster:
print ("Clustering: All cores")
hyb_cluster = [range(0, m.machine_topology['numcpus'])]
elif 'numa' in args.hybrid_cluster:
print ("Clustering: NUMA nodes")
if len(args.hybrid_cluster) > 4:
hyb_cluster = m.machine_topology['NUMA'].get()
size = float(args.hybrid_cluster[4:])
if size > 1:
# Merge NUMA nodes
if ((size % 2) != 0):
raise Exception(('Only support powers of two for'
' numa node merge'))
if (size > (len(hyb_cluster)/2)):
raise Exception(('Only support values less or equal to half'
'the numa nodes'))
new_cluster = []
for i in range(0,len(hyb_cluster), int(size)):
tmp = []
for j in range(0, int(size)):
tmp += hyb_cluster[i+j]
new_cluster.append(tmp)
hyb_cluster = new_cluster
else:
# Split NUMA nodes
print (hyb_cluster)
new_cluster = []
split = int(1/size)
if split > (len(hyb_cluster[0])/2):
raise Exception(('Single core in clusters not allowed'))
if (len(hyb_cluster[0]) % split) != 0:
raise Exception(('Only support splitting numa nodes if'
' the numa size is divisible by the number'
' of splits'))
for i in range(0, len(hyb_cluster)):
seg_len = int(len(hyb_cluster[0])/split)
for j in range(1, split+1):
tmp1 = hyb_cluster[i][(j-1)*seg_len:j*seg_len]
new_cluster.append(tmp1)
hyb_cluster = new_cluster
print (hyb_cluster)
else:
hyb_cluster = m.machine_topology['NUMA'].get()
else:
print ("Warning: Unknown cluster argument for hybrid, using default option")
print ("Clustering: NUMA nodes")
hyb_cluster = m.machine_topology['NUMA'].get()
# Simulate a multicast tree
args.multicast = True
shm_writers = [ min(x) for x in hyb_cluster ]
hyb_leaf_nodes = [ max(x) for x in hyb_cluster ]
args.group = map(int, shm_writers)
config.args.group = map(int, shm_writers)
#args.group = ','.join(map(str, shm_writers))
# type(topology) = hybrid.Hybrid | binarytree.BinaryTree -- inherited from overlay.Overlay
(topo, evs, root, sched, topology) = \
simulation._simulation_wrapper(_overlay, m, gr, args.multicast)
hierarchies = topo.get_tree()
# Dictionary for translating core IDs
d = helpers.core_index_dict(m.graph.nodes())
tmp = topology.get_name()
if hyb_cluster:
tmp += " (hybrid)"
model_descriptions.append(tmp)
tmp_last_node = -1
receive_order = None
for (label, ev) in evs:
if label == 'atomic broadcast':
tmp_last_node = ev.last_node
receive_order = ev.node_finished_list
print ("Cost %s for tree is: %d (%d), last node is %s" % \
(label, ev.time, ev.time_no_ab, ev.last_node))
assert receive_order != None
# Output c configuration for quorum program
helpers.output_quorum_configuration(m, hierarchies, root, sched,
topology, num_models,
shm_clusters=hyb_cluster,
shm_writers=shm_writers)
if config.args.hybrid:
# Set ONE reader of the shared memory cluster as last node
all_leaf_nodes.append(hyb_leaf_nodes)
all_last_nodes.append(max(hyb_leaf_nodes))
else:
# Determine last node for this model
all_leaf_nodes.append([d[l] for l in topo.get_leaf_nodes(sched)])
# Determine last node for this model
all_last_nodes.append(tmp_last_node)
# Output final graph: we have to do this here, as the
# final topology for the adaptive tree is not known before
# simulating it.
if not config.running_as_server:
helpers.draw_final(m, sched, topo)
num_models += 1
print (all_leaf_nodes)
# Cut down number of leafs
LEAFS_MAX = 10
if len(all_leaf_nodes[0])>LEAFS_MAX:
# Filter last nodes, only taking leaf nodes
_l = [ x for x in receive_order if x in all_leaf_nodes[0] ]
assert(len(_l) >= len(all_leaf_nodes[0]))
all_leaf_nodes[0] = _l[-10:]
helpers.warn('Cropping leaf nodes to: %s' % ','.join(map(str, all_leaf_nodes[0])))
helpers.warn('Leaf nodes are: %s - %d' % (','.join(map(str, all_leaf_nodes[0])), len(all_leaf_nodes[0])))
# Generate footer
helpers.output_quorum_end(all_last_nodes, all_leaf_nodes, \
model_descriptions)
return (all_last_nodes, all_leaf_nodes, root)
# --------------------------------------------------
def build_and_simulate():
"""
Build a tree model and simulate sending a message along it
"""
# XXX The arguments are totally broken. Fix them!
parser = argparse.ArgumentParser(
description=('Simulator for multicore machines. The default action is '
'to simulate the given combination of topology and machine. '
'Available machines: %s' % ', '.join(config.machines) ))
parser.add_argument('--multicast', action='store_const', default=False,
const=True, help='Perfom multicast rather than broadcast')
parser.add_argument('machine', default=None, nargs='?',
help="Machine to simulate")
parser.add_argument('overlay', nargs='*', default=config.topologies,
help="Overlay to use for atomic broadcast (default: %s)" %
' '.join(config.topologies))
parser.add_argument('--hybrid', action='store_const', default=False,
const=True, help='Generate hybrid model')
parser.add_argument('--hybrid-cluster', help='how to cluster: default: numa, one of: numa, socket')
parser.add_argument('--group', default=None,
help=("Coma separated list of node IDs that should be "
"part of the multicast group"))
parser.add_argument('--visu', help='Visualize generated graph',
action='store_const', default=False, const=True)
parser.add_argument('--debug',
action='store_const', default=False, const=True)
parser.add_argument('--server', action='store_const', default=False, const=True)
try:
config.args = parser.parse_args()
except:
exit(1)
if config.args.debug:
print ('Activating debug mode')
#$import debug
#asert type(debug.info)!=None
logging.getLogger().setLevel(logging.INFO)
if config.args.server:
from server import server_loop
server_loop()
if config.args.group:
config.args.group = map(int, config.args.group.split(','))
if config.args.hybrid:
config.args.hybrid = 'True'
simulate(config.args)
return 0
if __name__ == "__main__":
import netos_machine
print ('Starting Simulator v%s' % helpers.git_version())
# Append NetOS machines
config.machines += netos_machine.get_list()
sys.excepthook = helpers.info
build_and_simulate()
| |
from .. import hooks
from .. import util
from ..extensions import db
from ..models import Post, Setting, get_settings, Context
from ..tasks import get_queue, async_app_context
from flask.ext.login import login_required
from flask import (
request, redirect, url_for, Blueprint, current_app,
)
import requests
import urllib
import datetime
PERMALINK_RE = util.INSTAGRAM_RE
instagram = Blueprint('instagram', __name__)
def register(app):
app.register_blueprint(instagram)
hooks.register('create-context', create_context)
hooks.register('post-saved', send_to_instagram)
@instagram.route('/authorize_instagram')
@login_required
def authorize_instagram():
redirect_uri = url_for('.authorize_instagram', _external=True)
code = request.args.get('code')
if not code:
# redirect to instagram authorization page
params = {
'client_id': get_settings().instagram_client_id,
'redirect_uri': redirect_uri,
'response_type': 'code',
'scope': 'likes comments',
}
return redirect('https://api.instagram.com/oauth/authorize/?'
+ urllib.parse.urlencode(params))
params = {
'client_id': get_settings().instagram_client_id,
'client_secret': get_settings().instagram_client_secret,
'grant_type': 'authorization_code',
'redirect_uri': redirect_uri,
'code': code,
}
result = requests.post(
'https://api.instagram.com/oauth/access_token', data=params)
current_app.logger.debug('received result %s', result)
payload = result.json()
access_token = payload.get('access_token')
Setting.query.get('instagram_access_token').value = access_token
db.session.commit()
return redirect(url_for('admin.edit_settings'))
def create_context(url):
m = PERMALINK_RE.match(url)
if not m:
current_app.logger.debug('url is not an instagram media url %s', url)
return
r = ig_get('https://api.instagram.com/v1/media/shortcode/' + m.group(1))
if r.status_code // 2 != 100:
current_app.logger.warn(
"failed to fetch instagram media with shortcode %s %s %s",
m.group(1), r, r.content)
return
blob = r.json()
author = blob.get('data', {}).get('user', {})
author_name = author.get('full_name')
author_image = author.get('profile_picture')
author_url = author.get('website')
created_time = blob.get('data', {}).get('created_time')
caption_text = (blob.get('data', {}).get('caption') or {}).get('text')
images = blob.get('data', {}).get('images', {})
image = images.get('standard_resolution').get('url')
if created_time:
published = datetime.datetime.fromtimestamp(int(created_time))
content = ''
if caption_text:
content += '<p>' + caption_text + '</p>'
if image:
content += '<img src="' + image + '"/>'
context = Context()
context.url = context.permalink = url
context.author_name = author_name
context.author_image = author_image
context.author_url = author_url
context.published = published
context.title = None
context.content = content
context.content_plain = caption_text
current_app.logger.debug('created instagram context %s', context)
return context
def send_to_instagram(post, args):
"""Share a like or comment to Instagram without user-input.
"""
if 'instagram' in args.getlist('syndicate-to'):
if not is_instagram_authorized():
return False, 'Current user is not authorized for instagram'
current_app.logger.debug(
"queueing post to instagram {}".format(post.id))
get_queue().enqueue(do_send_to_instagram, post.id, current_app.config)
return True, 'Success'
def do_send_to_instagram(post_id, app_config):
with async_app_context(app_config):
current_app.logger.debug('posting to instagram %d', post_id)
post = Post.load_by_id(post_id)
in_reply_to, repost_of, like_of \
= util.posse_post_discovery(post, PERMALINK_RE)
# likes are the only thing we can POSSE to instagram unfortunately
if like_of:
m = PERMALINK_RE.match(like_of)
shortcode = m.group(1)
r = ig_get('https://api.instagram.com/v1/media/shortcode/'
+ m.group(1))
if r.status_code // 2 != 100:
current_app.logger.warn(
"failed to fetch instagram media %s %s", r, r.content)
return None
media_id = r.json().get('data', {}).get('id')
if not media_id:
current_app.logger.warn(
'could not find media id for shortcode %s', shortcode)
return None
r = ig_get('https://api.instagram.com/v1/users/self')
my_username = r.json().get('data', {}).get('username')
r = ig_post('https://api.instagram.com/v1/media/'
+ media_id + '/likes')
if r.status_code // 2 != 100:
current_app.logger.warn(
"failed to POST like for instagram id %s", media_id)
return None
like_url = like_of + '#liked-by-' + my_username
post.add_syndication_url(like_url)
db.session.commit()
return like_url
if in_reply_to:
comment_text = format_markdown_for_instagram(post.content)
comment_url = post_comment(in_reply_to, comment_text)
if comment_url:
post.add_syndication_url(comment_url)
db.session.commit()
return comment_url
def format_markdown_for_instagram(data):
return util.format_as_text(util.markdown_filter(data))
def post_comment(permalink, comment_text):
if ('INSTAGRAM_USERNAME' not in current_app.config
or 'INSTAGRAM_PASSWORD' not in current_app.config):
return
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.support.ui as ui
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
dc = dict(DesiredCapabilities.PHANTOMJS)
dc['ssl-protocol'] = 'any'
browser = webdriver.PhantomJS(desired_capabilities=dc)
wait = ui.WebDriverWait(browser, 10) # timeout after 10 seconds
browser.get('https://instagram.com/accounts/login/')
un = browser.find_element_by_id('lfFieldInputUsername')
un.send_keys(current_app.config['INSTAGRAM_USERNAME']
+ Keys.TAB
+ current_app.config['INSTAGRAM_PASSWORD'])
un.submit()
wait.until(lambda b: b.current_url == 'https://instagram.com/')
browser.get(permalink)
inp = browser.find_element_by_tag_name('input')
inp.send_keys(comment_text)
inp.submit()
# workaround for https://github.com/SeleniumHQ/selenium/issues/767
browser.service.process.terminate()
browser.quit()
return (permalink + '#comment-by-'
+ current_app.config['INSTAGRAM_USERNAME']
+ '-' + datetime.datetime.now().isoformat())
def ig_get(url):
return requests.get(url, params={
'access_token': get_settings().instagram_access_token,
})
def ig_post(url):
return requests.post(url, data={
'access_token': get_settings().instagram_access_token,
})
def is_instagram_authorized():
return (hasattr(get_settings(), 'instagram_access_token')
and get_settings().instagram_access_token)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import re
import subprocess
import shutil
from optparse import OptionParser
class Host:
"""
Abstraction of the elements unique to each Ambari Agent running on this VM.
"""
def __init__(self, host_name, ping_port, home_dir):
self.host_name = host_name
self.ping_port = ping_port
self.home_dir = home_dir
class Multiplier:
"""
In order to perform scale testing, this class bootstraps multiple Ambari Agents to run on the same host.
Each Ambari Agent has its own home directory with subdirectories for configs, logs, etc.
Further, each agent is given a unique port number.
Usage: python agent-multiplier.py [command]
[command] = start | stop | restart | status
Optional flags:
-v --verbose : Increase logging
"""
CONFIG_FILE = "/etc/ambari-agent/conf/agent-multiplier.conf"
def __init__(self, args):
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="Verbose logging")
(options, args) = parser.parse_args(args)
self.verbose = options.verbose
self.home_dir = "/home/"
# Subdirectories to create inside the home dir for the given agent.
self.log_dir = "/var/log/ambari-agent"
self.config_dir = "/etc/ambari-agent/conf"
self.pid_file = "/var/run/ambari-agent/ambari-agent.pid"
self.prefix_dir = "/var/lib/ambari-agent/data"
self.cache_dir = "/var/lib/ambari-agent/cache"
# Ambari Agent config file to use as a template
# Will change hostname and port after copying
self.source_config_file = "/etc/ambari-agent/conf/ambari-agent.ini"
self.source_version_file = "/var/lib/ambari-agent/data/version"
self.base_ping_port = 5000
self.start = 0
self.num = 0
self.prefix = None
# Parse above params from config file, which must exist
self.parse_configs()
if len(args) != 2:
print "Sample Usage: python agent_multiplier.py [action]\n" \
"actions: start|stop|restart|status"
self.command = args[1]
# Validate configs
self.validate()
print "*** Params ***"
print "Start: %d" % self.start
print "Num: %d" % self.num
print "Prefix: %s" % self.prefix
print "Command: %s" % self.command
# All hostnames that will be managed by Ambari Agents on this host
self.hosts = []
for i in range(self.start, self.start + self.num):
host_name = "%s-%04d" % (self.prefix, i)
host_home_dir = os.path.join(self.home_dir, host_name)
host = Host(host_name, self.base_ping_port + i, host_home_dir)
self.hosts.append(host)
self.bootstrap()
def parse_configs(self):
"""
Parse the configuration file to set the config params.
"""
if not os.path.exists(self.CONFIG_FILE):
print "Did not find Agent Multiplier config file: %s" % str(self.CONFIG_FILE)
sys.exit(-1)
params = {}
with open(self.CONFIG_FILE, "r") as f:
for line in f.readlines():
index = line.index("=") if "=" in line else None
if index is not None:
config = line[0:index].strip()
value = line[index+1:].strip()
params[config] = value
# Convert some configs to ints
if "start" in params:
self.start = int(params["start"])
if "num" in params:
self.num = int(params["num"])
if "prefix" in params:
self.prefix = params["prefix"].strip().lower()
def validate(self):
"""
Validate the configs are non-empty and contain correct ranges.
On error, will exit with code -1
"""
errors = []
if self.start <= 0:
errors.append("Start must be a positive integer")
if self.num <= 0:
errors.append("Number of agents on this host must be a positive integer")
if self.prefix is None or self.prefix.strip() == "":
errors.append("Prefix is a required field")
if not os.path.isfile(self.source_config_file):
errors.append("Ambari Agent config file does not exist at %s" % self.source_config_file)
valid_commands = set(["start", "stop", "restart", "status"])
if self.command is None or self.command not in valid_commands:
errors.append("Command must be one of %s" % ", ".join(valid_commands))
if len(errors) > 0:
print "Error:"
print "\n".join(errors)
sys.exit(-1)
def bootstrap(self):
"""
Bootstrap each Ambari Agent that will run on this host with the directories it needs and configuration file.
"""
for host in self.hosts:
host_name = host.host_name
host_home_dir = host.home_dir
host_log_dir = host_home_dir + self.log_dir
host_config_dir = host_home_dir + self.config_dir
host_pid_file = host_home_dir + self.pid_file
host_pid_dir = os.path.dirname(host_pid_file)
host_prefix = host_home_dir + self.prefix_dir
host_cache_dir = host_home_dir + self.cache_dir
if self.verbose:
print "Analyzing host %s with port %d" % (host_name, host.ping_port)
for dir in [host_home_dir, host_log_dir, host_config_dir, host_pid_dir, host_prefix, host_cache_dir]:
if not os.path.isdir(dir):
print "Creating dir %s" % (dir)
os.makedirs(dir)
# Copy config file
host_config_file = os.path.join(host_config_dir, "ambari-agent.ini")
if not os.path.isfile(host_config_file):
print "Copying config file %s" % str(host_config_file)
shutil.copyfile(self.source_config_file, host_config_file)
# Copy version file
version_file = os.path.join(host_prefix, "version")
if not os.path.isfile(version_file):
print "Copying version file %s" % str(version_file)
shutil.copyfile(self.source_version_file, version_file)
# Copy cache dir content
if not os.path.isdir(os.path.join(host_cache_dir, "stacks")):
print "Copying cache directory content %s" % str(host_cache_dir)
self.copytree(self.cache_dir, host_cache_dir)
# Create hostname.sh script to use custom FQDN for each agent.
host_name_script = os.path.join(host_config_dir, "hostname.sh")
self.create_host_name_script(host_name, host_name_script)
# Overwrite the port and hostname
config_dict = {"ping_port": host.ping_port,
"hostname_script": host_name_script,
"public_hostname_script": host_name_script,
"logdir": host_log_dir,
"piddir": host_pid_dir,
"prefix": host_prefix,
"cache_dir": host_cache_dir}
self.change_config(host_config_file, config_dict)
# Change /etc/hosts file by appending each hostname.
self.modify_etc_hosts_file()
def copytree(self, src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def create_host_name_script(self, host_name, host_name_script):
"""
Creates a shell script that will echo the given hostname.
:param host_name: Host name to echo
:param host_name_script: Location to save the scrip to
"""
template = "#!/bin/sh\n" \
"echo HOSTNAME"
with open(str(host_name_script), "w+") as f:
f.writelines(template.replace("HOSTNAME", host_name))
subprocess.call("chmod +x %s" % host_name_script, shell=True)
def change_config(self, config_file, config_dict):
"""
Change existing configs. Will not append new configs.
:param config_file: Config file to modify
:param config_dict: Dictionary of config,value pairs to change.
"""
# TODO, allow appending configs to [AGENT] section.
if not os.path.exists(config_file):
print "ERROR. Did not file config file: %s" % config_file
return
lines = []
with open(config_file, "r") as f:
lines = f.readlines()
new_lines = []
configs_found = set()
configs_changed = set()
for line in lines:
for config, value in config_dict.iteritems():
p = re.compile(config + "\s?=")
if p.match(line):
configs_found.add(config)
new_value = config + "=" + str(value) + "\n"
if line != new_value:
line = new_value
configs_changed.add(config)
continue
# Config didn't change value
new_lines.append(line)
# TODO, if can append configs, then this is not needed.
if len(configs_found) < len(config_dict.keys()):
missing_configs = set(config_dict.keys()) - configs_found
print "ERROR: Did not find all required configs. Missing: %s" % ", ".join(missing_configs)
sys.exit(-1)
if len(configs_changed) > 0:
print "Making changes to file %s" % config_file
with open(config_file, "w") as f:
f.writelines(new_lines)
def modify_etc_hosts_file(self):
"""
Modify this host's /etc/hosts file by changing the line for localhost with synonyms for all of the other
fake host names that will be generated for the Ambari Agents.
"""
etc_hosts = "/etc/hosts"
if not os.path.isfile(etc_hosts):
print "ERROR. Did not find file %s" % etc_hosts
return
lines = []
with open(etc_hosts, "r") as f:
lines = f.readlines()
# Value to search for when using Vagrant VMs
localhost_line_start = "127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 "
new_lines = []
line_changed = False
for line in lines:
if line.startswith("127.0.0.1"):
new_change = localhost_line_start + " ".join([x.host_name for x in self.hosts]) + "\n"
if line != new_change:
line = new_change
line_changed = True
new_lines.append(line)
if line_changed:
print "Making changes to %s" % etc_hosts
with open(etc_hosts, "w") as f:
f.writelines(new_lines)
def run(self):
"""
Run one of the supported commands: start, stop, restart, and status
"""
if self.command == "start":
self.cmd_start()
elif self.command == "stop":
self.cmd_stop()
elif self.command == "restart":
self.cmd_restart()
elif self.command == "status":
self.cmd_status()
def cmd_start(self):
print "Starting %d host(s)" % len(self.hosts)
for host in self.hosts:
cmd = "ambari-agent start --home %s" % (host.home_dir)
os.environ['AMBARI_AGENT_CONF_DIR'] = os.path.join(host.home_dir, "etc/ambari-agent/conf")
subprocess.call(cmd, shell=True, env=os.environ)
def cmd_stop(self):
print "Stopping %d host(s)" % len(self.hosts)
for host in self.hosts:
cmd = "ambari-agent stop --home %s" % (host.home_dir)
os.environ['AMBARI_AGENT_CONF_DIR'] = os.path.join(host.home_dir, "etc/ambari-agent/conf")
subprocess.call(cmd, shell=True, env=os.environ)
def cmd_restart(self):
print "Restarting %d host(s)" % len(self.hosts)
for host in self.hosts:
cmd = "ambari-agent restart --home %s" % (host.home_dir)
os.environ['AMBARI_AGENT_CONF_DIR'] = os.path.join(host.home_dir, "etc/ambari-agent/conf")
subprocess.call(cmd, shell=True, env=os.environ)
def cmd_status(self):
print "Summary of Agent Status:"
print "Total agents: %d\n" % len(self.hosts)
(running_hosts, unknown_hosts, stopped_hosts) = self.aggregate_status()
print "Running agents: %d" % len(running_hosts)
if self.verbose and len(running_hosts):
print "(%s)\n" % (", ".join(running_hosts))
print "Unknown agents: %d" % len(unknown_hosts)
if self.verbose and len(unknown_hosts):
print "(%s)\n" % (", ".join(unknown_hosts))
print "Stopped agents: %d" % len(stopped_hosts)
if self.verbose and len(stopped_hosts):
print "(%s)\n" % (", ".join(stopped_hosts))
def aggregate_status(self):
"""
Aggregate the status of all of the hosts.
:return: Return a 3-tuple of (list of x, list of y, list of z)
x = hosts running with a valid pid
y = hosts with a pid file but process is not running
z = hosts without a pid file
"""
running_hosts = []
unknown_hosts = []
stopped_hosts = []
for host in self.hosts:
pid_file = os.path.join(self.home_dir, host.host_name, self.pid_file.lstrip("/"))
if os.path.isfile(pid_file):
pid = None
with open(pid_file, "r") as f:
try:
line = f.readline()
pid = int(line.strip())
except:
pass
is_running = Multiplier.check_pid(pid)
if is_running:
running_hosts.append(host.host_name)
else:
unknown_hosts.append(host.host_name)
else:
stopped_hosts.append(host.host_name)
return (running_hosts, unknown_hosts, stopped_hosts)
@classmethod
def check_pid(cls, pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
if __name__ == "__main__":
m = Multiplier(sys.argv)
m.run()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the isophote module.
"""
from astropy.io import fits
import numpy as np
from numpy.testing import assert_allclose
import pytest
from .make_test_data import make_test_image
from ..ellipse import Ellipse
from ..fitter import EllipseFitter
from ..geometry import EllipseGeometry
from ..isophote import Isophote, IsophoteList
from ..sample import EllipseSample
from ...datasets import get_path
from ...utils._optional_deps import HAS_SCIPY # noqa
DEFAULT_FIX = np.array([False, False, False, False])
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_SCIPY')
class TestIsophote:
def setup_class(self):
path = get_path('isophote/M51.fits', location='photutils-datasets',
cache=True)
hdu = fits.open(path)
self.data = hdu[0].data
hdu.close()
def test_fit(self):
# low noise image, fitted perfectly by sample
data = make_test_image(noise=1.e-10, seed=0)
sample = EllipseSample(data, 40)
fitter = EllipseFitter(sample)
iso = fitter.fit(maxit=400)
assert iso.valid
assert iso.stop_code == 0 or iso.stop_code == 2
# fitted values
assert iso.intens <= 201.
assert iso.intens >= 199.
assert iso.int_err <= 0.0010
assert iso.int_err >= 0.0009
assert iso.pix_stddev <= 0.03
assert iso.pix_stddev >= 0.02
assert abs(iso.grad) <= 4.25
assert abs(iso.grad) >= 4.20
# integrals
assert iso.tflux_e <= 1.85E6
assert iso.tflux_e >= 1.82E6
assert iso.tflux_c <= 2.025E6
assert iso.tflux_c >= 2.022E6
# deviations from perfect ellipticity. Note
# that sometimes a None covariance can be
# generated by scipy.optimize.leastsq
assert iso.a3 is None or abs(iso.a3) <= 0.01
assert iso.b3 is None or abs(iso.b3) <= 0.01
assert iso.a4 is None or abs(iso.a4) <= 0.01
assert iso.b4 is None or abs(iso.b4) <= 0.01
def test_m51(self):
sample = EllipseSample(self.data, 21.44)
fitter = EllipseFitter(sample)
iso = fitter.fit()
assert iso.valid
assert iso.stop_code == 0 or iso.stop_code == 2
# geometry
g = iso.sample.geometry
assert g.x0 >= (257 - 1.5) # position within 1.5 pixel
assert g.x0 <= (257 + 1.5)
assert g.y0 >= (259 - 1.5)
assert g.y0 <= (259 + 2.0)
assert g.eps >= (0.19 - 0.05) # eps within 0.05
assert g.eps <= (0.19 + 0.05)
assert g.pa >= (0.62 - 0.05) # pa within 5 deg
assert g.pa <= (0.62 + 0.05)
# fitted values
assert_allclose(iso.intens, 682.9, atol=0.1)
assert_allclose(iso.rms, 83.27, atol=0.01)
assert_allclose(iso.int_err, 7.63, atol=0.01)
assert_allclose(iso.pix_stddev, 117.8, atol=0.1)
assert_allclose(iso.grad, -36.08, atol=0.1)
# integrals
assert iso.tflux_e <= 1.20e6
assert iso.tflux_e >= 1.19e6
assert iso.tflux_c <= 1.38e6
assert iso.tflux_c >= 1.36e6
# deviations from perfect ellipticity. Note
# that sometimes a None covariance can be
# generated by scipy.optimize.leastsq
assert iso.a3 is None or abs(iso.a3) <= 0.05
assert iso.b3 is None or abs(iso.b3) <= 0.05
assert iso.a4 is None or abs(iso.a4) <= 0.05
assert iso.b4 is None or abs(iso.b4) <= 0.05
def test_m51_niter(self):
# compares with old STSDAS task. In this task, the
# default for the starting value of SMA is 10; it
# fits with 20 iterations.
sample = EllipseSample(self.data, 10)
fitter = EllipseFitter(sample)
iso = fitter.fit()
assert iso.valid
assert iso.niter == 50
class TestIsophoteList:
def setup_class(self):
data = make_test_image(seed=0)
self.slen = 5
self.isolist_sma10 = self.build_list(data, sma0=10., slen=self.slen)
self.isolist_sma100 = self.build_list(data, sma0=100., slen=self.slen)
self.isolist_sma200 = self.build_list(data, sma0=200., slen=self.slen)
@staticmethod
def build_list(data, sma0, slen=5):
iso_list = []
for k in range(slen):
sample = EllipseSample(data, float(k + sma0))
sample.update(DEFAULT_FIX)
iso_list.append(Isophote(sample, k, True, 0))
result = IsophoteList(iso_list)
return result
def test_basic_list(self):
# make sure it can be indexed as a list.
result = self.isolist_sma10[:]
assert isinstance(result[0], Isophote)
# make sure the important arrays contain floats.
# especially the sma array, which is derived
# from a property in the Isophote class.
assert isinstance(result.sma, np.ndarray)
assert isinstance(result.sma[0], float)
assert isinstance(result.intens, np.ndarray)
assert isinstance(result.intens[0], float)
assert isinstance(result.rms, np.ndarray)
assert isinstance(result.int_err, np.ndarray)
assert isinstance(result.pix_stddev, np.ndarray)
assert isinstance(result.grad, np.ndarray)
assert isinstance(result.grad_error, np.ndarray)
assert isinstance(result.grad_r_error, np.ndarray)
assert isinstance(result.sarea, np.ndarray)
assert isinstance(result.niter, np.ndarray)
assert isinstance(result.ndata, np.ndarray)
assert isinstance(result.nflag, np.ndarray)
assert isinstance(result.valid, np.ndarray)
assert isinstance(result.stop_code, np.ndarray)
assert isinstance(result.tflux_c, np.ndarray)
assert isinstance(result.tflux_e, np.ndarray)
assert isinstance(result.npix_c, np.ndarray)
assert isinstance(result.npix_e, np.ndarray)
assert isinstance(result.a3, np.ndarray)
assert isinstance(result.a4, np.ndarray)
assert isinstance(result.b3, np.ndarray)
assert isinstance(result.b4, np.ndarray)
samples = result.sample
assert isinstance(samples, list)
assert isinstance(samples[0], EllipseSample)
iso = result.get_closest(13.6)
assert isinstance(iso, Isophote)
assert_allclose(iso.sma, 14., atol=1e-6)
def test_extend(self):
# the extend method shouldn't return anything,
# and should modify the first list in place.
inner_list = self.isolist_sma10[:]
outer_list = self.isolist_sma100[:]
assert len(inner_list) == self.slen
assert len(outer_list) == self.slen
inner_list.extend(outer_list)
assert len(inner_list) == 2 * self.slen
# the __iadd__ operator should behave like the
# extend method.
inner_list = self.isolist_sma10[:]
outer_list = self.isolist_sma100[:]
inner_list += outer_list
assert len(inner_list) == 2 * self.slen
# the __add__ operator should create a new IsophoteList
# instance with the result, and should not modify
# the operands.
inner_list = self.isolist_sma10[:]
outer_list = self.isolist_sma100[:]
result = inner_list + outer_list
assert isinstance(result, IsophoteList)
assert len(inner_list) == self.slen
assert len(outer_list) == self.slen
assert len(result) == 2 * self.slen
def test_slicing(self):
iso_list = self.isolist_sma10[:]
assert len(iso_list) == self.slen
assert len(iso_list[1:-1]) == self.slen - 2
assert len(iso_list[2:-2]) == self.slen - 4
def test_combined(self):
# combine extend with slicing.
inner_list = self.isolist_sma10[:]
outer_list = self.isolist_sma100[:]
sublist = inner_list[2:-2]
dummy = sublist.extend(outer_list)
assert not dummy
assert len(sublist) == 2*self.slen - 4
# try one more slice.
even_outer_list = self.isolist_sma200
sublist.extend(even_outer_list[1:-1])
assert len(sublist) == 2*self.slen - 4 + 3
# combine __add__ with slicing.
sublist = inner_list[2:-2]
result = sublist + outer_list
assert isinstance(result, IsophoteList)
assert len(sublist) == self.slen - 4
assert len(result) == 2*self.slen - 4
result = inner_list[2:-2] + outer_list
assert isinstance(result, IsophoteList)
assert len(result) == 2*self.slen - 4
def test_sort(self):
inner_list = self.isolist_sma10[:]
outer_list = self.isolist_sma100[:]
result = outer_list[2:-2] + inner_list
assert result[-1].sma < result[0].sma
result.sort()
assert result[-1].sma > result[0].sma
@pytest.mark.skipif('not HAS_SCIPY')
def test_to_table(self):
test_img = make_test_image(nx=55, ny=55, x0=27, y0=27,
background=100., noise=1.e-6, i0=100.,
sma=10., eps=0.2, pa=0., seed=1)
g = EllipseGeometry(27, 27, 5, 0.2, 0)
ellipse = Ellipse(test_img, geometry=g, threshold=0.1)
isolist = ellipse.fit_image(maxsma=27)
assert len(isolist.get_names()) >= 30 # test for get_names
tbl = isolist.to_table()
assert len(tbl.colnames) == 18
tbl = isolist.to_table(columns='all')
assert len(tbl.colnames) >= 30
tbl = isolist.to_table(columns='main')
assert len(tbl.colnames) == 18
tbl = isolist.to_table(columns=['sma'])
assert len(tbl.colnames) == 1
tbl = isolist.to_table(columns=['tflux_e', 'tflux_c', 'npix_e',
'npix_c'])
assert len(tbl.colnames) == 4
| |
"""
Parameters are a kind of class attribute allowing special behavior,
including dynamically generated parameter values, documentation
strings, constant and read-only parameters, and type or range checking
at assignment time.
Potentially useful for any large Python program that needs
user-modifiable object attributes; see the parameterized.Parameter and
parameterized.Parameterized classes for more information.
This file contains subclasses of Parameter, implementing specific
parameter types (e.g. Number).
$Id$
"""
__version__='$Revision$'
# CEBALERT: we need more documentation above, now that params is a
# separate directory and will be a separate package.
import os.path
from numpy import ndarray, float
from parameterized import Parameterized, Parameter, String, \
descendents, ParameterizedFunction, ParamOverrides
def produce_value(value_obj):
"""
A helper function that produces an actual parameter from a stored
object: if the object is callable, call it, otherwise return the
object.
"""
if callable(value_obj):
return value_obj()
else:
return value_obj
class Dynamic(Parameter):
"""
Parameter whose value can be generated dynamically by a callable
object.
If a Parameter is declared as Dynamic, it can be set a callable
object (such as a function or callable class), and getting the
parameter's value will call that callable.
Note that at present, the callable object must allow attributes
to be set on itself.
[Python 2.4 limitation: the callable object must be an instance of a
callable class, rather than a named function or a lambda function,
otherwise the object will not be picklable or deepcopyable.]
Setting Dynamic.time_fn allows the production of dynamic values to
be controlled: a new value will be produced only if the current
value of time_fn is greater than what it was last time the
parameter value was requested.
If time_fn is set to None, a new value is always produced.
If Dynamic.time_fn is set to something other than None, it must,
when called, produce a number.
"""
# CB: making Dynamic support iterators and generators is sf.net
# feature request 1864370. When working on that task, note that
# detection of a dynamic generator by 'callable' needs to be
# replaced by something that matches whatever Dynamic becomes
# capable of using.
time_fn = None # could add a slot for time_fn to allow instances
# to override
# CBENHANCEMENT: Add an 'epsilon' slot.
# See email 'Re: simulation-time-controlled Dynamic parameters'
# Dec 22, 2007 CB->JAB
def __init__(self,**params):
"""
Call the superclass's __init__ and set instantiate=True if the
default is dynamic.
"""
super(Dynamic,self).__init__(**params)
if callable(self.default):
self._set_instantiate(True)
self._initialize_generator(self.default)
def _initialize_generator(self,gen,obj=None):
"""
Add 'last time' and 'last value' attributes to the generator.
"""
# CEBALERT: use a dictionary to hold these things.
if hasattr(obj,"_Dynamic_time_fn"):
gen._Dynamic_time_fn = obj._Dynamic_time_fn
gen._Dynamic_last = None
# CEB: I'd use None for this, except can't compare a fixedpoint
# number with None (e.g. 1>None but FixedPoint(1)>None can't be done)
gen._Dynamic_time = -1
gen._saved_Dynamic_last = []
gen._saved_Dynamic_time = []
def __get__(self,obj,objtype):
"""
Call the superclass's __get__; if the result is not dynamic
return that result, otherwise ask that result to produce a
value and return it.
"""
gen = super(Dynamic,self).__get__(obj,objtype)
if not hasattr(gen,'_Dynamic_last'):
return gen
else:
return self._produce_value(gen)
def __set__(self,obj,val):
"""
Call the superclass's set and keep this parameter's
instantiate value up to date (dynamic parameters
must be instantiated).
If val is dynamic, initialize it as a generator.
"""
super(Dynamic,self).__set__(obj,val)
dynamic = callable(val)
if dynamic: self._initialize_generator(val,obj)
if not obj: self._set_instantiate(dynamic)
def _produce_value(self,gen,force=False):
"""
Return a value from gen.
If there is no time_fn, then a new value will be returned
(i.e. gen will be asked to produce a new value).
If force is True, or the value of time_fn() is greater than
what it was was last time produce_value was called, a
new value will be produced and returned. Otherwise,
the last value gen produced will be returned.
"""
if hasattr(gen,"_Dynamic_time_fn"):
time_fn = gen._Dynamic_time_fn
else:
time_fn = self.time_fn
if time_fn is None:
value = produce_value(gen)
gen._Dynamic_last = value
else:
time = time_fn()
if force or time>gen._Dynamic_time:
value = produce_value(gen)
gen._Dynamic_last = value
gen._Dynamic_time = time
else:
value = gen._Dynamic_last
return value
def _value_is_dynamic(self,obj,objtype=None):
"""
Return True if the parameter is actually dynamic (i.e. the
value is being generated).
"""
return hasattr(super(Dynamic,self).__get__(obj,objtype),'_Dynamic_last')
def _inspect(self,obj,objtype=None):
"""Return the last generated value for this parameter."""
gen=super(Dynamic,self).__get__(obj,objtype)
if hasattr(gen,'_Dynamic_last'):
return gen._Dynamic_last
else:
return gen
def _force(self,obj,objtype=None):
"""Force a new value to be generated, and return it."""
gen=super(Dynamic,self).__get__(obj,objtype)
if hasattr(gen,'_Dynamic_last'):
return self._produce_value(gen,force=True)
else:
return gen
# CEBALERT: isinstance(x,Number) should be possible in Python 2.6
# (Number is a new abstract base class).
# http://docs.python.org/whatsnew/2.6.html
import operator
is_number = operator.isNumberType
class Number(Dynamic):
"""
Number is a numeric parameter. Numbers have a default value and
optional bounds. There are two types of bounds: ``bounds`` and
``softbounds``. ``bounds`` are hard bounds: the parameter must
have a value within the specified range. The default bounds are
(None,None), meaning there are actually no hard bounds. One or
both bounds can be set by specifying a value
(e.g. bounds=(None,10) means there is no lower bound, and an upper
bound of 10). Bounds are inclusive by default, but exclusivity
can be specified for each bound by setting inclusive_bounds
(e.g. inclusive_bounds=(True,False) specifies an exclusive upper
bound).
Number is also a type of Dynamic parameter, so its value
can be set to a callable to get a dynamically generated
number (see Dynamic).
When not being dynamically generated, bounds are checked when a
Number is created or set. Using a default value outside the hard
bounds, or one that is not numeric, results in an exception. When
being dynamically generated, bounds are checked when a the value
of a Number is requested. A generated value that is not numeric,
or is outside the hard bounds, results in an exception.
As a special case, if allow_None=True (which is true by default if
the parameter has a default of None when declared) then a value
of None is also allowed.
A separate function set_in_bounds() is provided that will
silently crop the given value into the legal range, for use
in, for instance, a GUI.
``softbounds`` are present to indicate the typical range of
the parameter, but are not enforced. Setting the soft bounds
allows, for instance, a GUI to know what values to display on
sliders for the Number.
Example of creating a Number::
AB = Number(default=0.5, bounds=(None,10), softbounds=(0,1), doc='Distance from A to B.')
"""
__slots__ = ['bounds','_softbounds','allow_None','inclusive_bounds']
def __init__(self,default=0.0,bounds=None,softbounds=None,allow_None=False,inclusive_bounds=(True,True),**params):
"""
Initialize this parameter object and store the bounds.
Non-dynamic default values are checked against the bounds.
"""
super(Number,self).__init__(default=default,**params)
self.bounds = bounds
self.inclusive_bounds = inclusive_bounds
self._softbounds = softbounds
self.allow_None = (default is None or allow_None)
if not callable(default): self._check_value(default)
def __get__(self,obj,objtype):
"""
Same as the superclass's __get__, but if the value was
dynamically generated, check the bounds.
"""
result = super(Number,self).__get__(obj,objtype)
# CEBALERT: results in extra lookups (_value_is_dynamic() is
# also looking up 'result' - should just pass it in). Note
# that this method is called often.
if self._value_is_dynamic(obj,objtype): self._check_value(result)
return result
def __set__(self,obj,val):
"""
Set to the given value, raising an exception if out of bounds.
"""
if not callable(val): self._check_value(val)
super(Number,self).__set__(obj,val)
def set_in_bounds(self,obj,val):
"""
Set to the given value, but cropped to be within the legal bounds.
All objects are accepted, and no exceptions will be raised. See
crop_to_bounds for details on how cropping is done.
"""
if not callable(val):
bounded_val = self.crop_to_bounds(val)
else:
bounded_val = val
super(Number,self).__set__(obj,bounded_val)
# CEBERRORALERT: doesn't take account of exclusive bounds. When
# the gui uses set_in_bounds(), expecting to get acceptable
# values, it actually gets an out-of-bounds error. When fixed,
# should remove hack in
# topo.tkgui.projectionpanel.UnitsPanel.sheet_change().
# CEBALERT: in the methods below, should be testing for identity
# with None, rather than equality
def crop_to_bounds(self,val):
"""
Return the given value cropped to be within the hard bounds
for this parameter.
If a numeric value is passed in, check it is within the hard
bounds. If it is larger than the high bound, return the high
bound. If it's smaller, return the low bound. In either case, the
returned value could be None. If a non-numeric value is passed
in, set to be the default value (which could be None). In no
case is an exception raised; all values are accepted.
"""
# Currently, values outside the bounds are silently cropped to
# be inside the bounds; it may be appropriate to add a warning
# in such cases.
if (is_number(val)):
if self.bounds==None:
return val
vmin, vmax = self.bounds
if vmin != None:
if val < vmin:
return vmin
if vmax != None:
if val > vmax:
return vmax
elif self.allow_None and val==None:
return val
else:
# non-numeric value sent in: reverts to default value
return self.default
return val
def _checkBounds(self, val):
if self.bounds!=None:
vmin,vmax = self.bounds
incmin,incmax = self.inclusive_bounds
if vmax is not None:
if incmax is True:
if not val <= vmax:
raise ValueError("Parameter '%s' must be at most %s"%(self._attrib_name,vmax))
else:
if not val < vmax:
raise ValueError("Parameter '%s' must be less than %s"%(self._attrib_name,vmax))
if vmin is not None:
if incmin is True:
if not val >= vmin:
raise ValueError("Parameter '%s' must be at least %s"%(self._attrib_name,vmin))
else:
if not val > vmin:
raise ValueError("Parameter '%s' must be greater than %s"%(self._attrib_name,vmin))
## could consider simplifying the above to something like this untested code:
## too_low = False if vmin is None else
## (val < vmin if incmin else val <= vmin) and
## (val > vmin if incmin else val <= vmin)
## too_high = ...
## if too_low or too_high:
## raise ValueError("Parameter '%s' must be in the range %s" % (self._attrib_name,self.rangestr()))
## where self.rangestr() formats the range using the usual notation for
## indicating exclusivity, e.g. "[0,10)".
def _check_value(self,val):
"""
Checks that the value is numeric and that it is within the hard
bounds; if not, an exception is raised.
"""
if self.allow_None and val==None:
return
if not (is_number(val)):
raise ValueError("Parameter '%s' only takes numeric values"%(self._attrib_name))
self._checkBounds(val)
def get_soft_bounds(self):
"""
For each soft bound (upper and lower), if there is a defined bound (not equal to None)
then it is returned, otherwise it defaults to the hard bound. The hard bound could still be None.
"""
if self.bounds==None:
hl,hu=(None,None)
else:
hl,hu=self.bounds
if self._softbounds==None:
sl,su=(None,None)
else:
sl,su=self._softbounds
if (sl==None): l = hl
else: l = sl
if (su==None): u = hu
else: u = su
return (l,u)
class Integer(Number):
def _check_value(self,val):
if self.allow_None and val==None:
return
if not isinstance(val,int):
raise ValueError("Parameter '%s' must be an integer."%self._attrib_name)
self._checkBounds(val)
class Magnitude(Number):
def __init__(self,default=1.0,softbounds=None,**params):
Number.__init__(self,default=default,bounds=(0.0,1.0),softbounds=softbounds,**params)
# JAB: Should this and other Parameters below be a Dynamic instead?
class Boolean(Parameter):
__slots__ = ['bounds','allow_None']
# CB: what does bounds=(0,1) mean/do for this Parameter?
def __init__(self,default=False,bounds=(0,1),allow_None=False,**params):
self.bounds = bounds
self.allow_None = (default is None or allow_None)
Parameter.__init__(self,default=default,**params)
def __set__(self,obj,val):
if self.allow_None:
if not isinstance(val,bool) and val is not None:
raise ValueError("Boolean '%s' only takes a Boolean value or None."
%self._attrib_name)
if val is not True and val is not False and val is not None:
raise ValueError("Boolean '%s' must be True, False, or None."%self._attrib_name)
else:
if not isinstance(val,bool):
raise ValueError("Boolean '%s' only takes a Boolean value."%self._attrib_name)
if val is not True and val is not False:
raise ValueError("Boolean '%s' must be True or False."%self._attrib_name)
super(Boolean,self).__set__(obj,val)
class NumericTuple(Parameter):
__slots__ = ['length']
def __init__(self,default=(0,0),length=None,**params):
"""
Initialize a numeric tuple parameter with a fixed length
(number of elements). The length is determined by the initial
default value, and is not allowed to change after
instantiation.
"""
if length is None:
self.length = len(default)
else:
self.length = length
self._check(default)
Parameter.__init__(self,default=default,**params)
def _check(self,val):
if not isinstance(val,tuple):
raise ValueError("NumericTuple '%s' only takes a tuple value."%self._attrib_name)
if not len(val)==self.length:
raise ValueError("%s: tuple is not of the correct length (%d instead of %d)." %
(self._attrib_name,len(val),self.length))
for n in val:
if not is_number(n):
raise ValueError("%s: tuple element is not numeric: %s." % (self._attrib_name,str(n)))
def __set__(self,obj,val):
self._check(val)
super(NumericTuple,self).__set__(obj,val)
class XYCoordinates(NumericTuple):
def __init__(self,default=(0.0,0.0),**params):
super(XYCoordinates,self).__init__(default=default,length=2,**params)
class Callable(Parameter):
"""
Parameter holding a value that is a callable object, such as a function.
A keyword argument instantiate=True should be provided when a
function object is used that might have state. On the other hand,
regular standalone functions cannot be deepcopied as of Python
2.4, so instantiate must be False for those values.
"""
def __set__(self,obj,val):
if not callable(val):
raise ValueError("Callable '%s' only takes a callable object."%self._attrib_name)
super(Callable,self).__set__(obj,val)
# CBNOTE: python now has abstract base classes, so we could update
# this. At least if the check is in a method, all such checks could be
# changed at once.
def is_abstract(class_):
try:
return class_.abstract
except AttributeError:
return False
# CEBALERT: this should be a method of ClassSelector.
def concrete_descendents(parentclass):
"""
Return a dictionary containing all subclasses of the specified
parentclass, including the parentclass. Only classes that are
defined in scripts that have been run or modules that have been
imported are included, so the caller will usually first do ``from
package import *``.
Only non-abstract classes will be included.
"""
return dict([(c.__name__,c) for c in descendents(parentclass)
if not is_abstract(c)])
class Composite(Parameter):
"""
A parameter that is in fact a composite of a set of other
parameters or attributes of the class. The constructor argumentt
'attribs' takes a list of attribute names. Getting the parameter
returns a list of the values of the constituents of the composite,
in the order specified. Likewise, setting the parameter takes a
sequence of values and sets the value of the constituent
attributes.
"""
__slots__=['attribs','objtype']
def __init__(self,attribs=None,**kw):
if attribs is None:
attribs = []
super(Composite,self).__init__(default=None,**kw)
self.attribs = attribs
def __get__(self,obj,objtype):
"""
Return the values of all the attribs, as a list.
"""
if not obj:
return [getattr(objtype,a) for a in self.attribs]
else:
return [getattr(obj,a) for a in self.attribs]
def __set__(self,obj,val):
"""
Set the values of all the attribs.
"""
assert len(val) == len(self.attribs),"Compound parameter '%s' got the wrong number of values (needed %d, but got %d)." % (self._attrib_name,len(self.attribs),len(val))
if not obj:
for a,v in zip(self.attribs,val):
setattr(self.objtype,a,v)
else:
for a,v in zip(self.attribs,val):
setattr(obj,a,v)
class Selector(Parameter):
"""
Parameter whose value is set to some form of one of the
possibilities in its range.
Subclasses must implement get_range().
"""
__abstract = True
def get_range(self):
raise NotImplementedError("get_range() must be implemented in subclasses.")
class ObjectSelector(Selector):
"""
Parameter whose value is set to an object from its list of
possible objects.
check_on_set restricts the value to be among the current list of
objects. By default, if objects are initially supplied,
check_on_set is True, whereas if no objects are initially
supplied, check_on_set is False. This can be overridden by
explicitly specifying check_on_set initially.
If check_on_set is True (either because objects are supplied
initially, or because it is explicitly specified), the default
(initial) value must be among the list of objects (unless the
default value is None).
"""
__slots__ = ['objects','compute_default_fn','check_on_set']
# ObjectSelector is usually used to allow selection from a list of
# existing objects, therefore instantiate is False by default.
def __init__(self,default=None,objects=None,instantiate=False,
compute_default_fn=None,check_on_set=None,**params):
if objects is None:
objects = []
self.objects = objects
self.compute_default_fn = compute_default_fn
if check_on_set is not None:
self.check_on_set=check_on_set
elif len(objects)==0:
self.check_on_set=False
else:
self.check_on_set=True
if default is not None and self.check_on_set is True:
self._check_value(default)
super(ObjectSelector,self).__init__(default=default,instantiate=instantiate,**params)
# CBNOTE: if the list of objects is changed, the current value for
# this parameter in existing POs could be out of the new range.
def compute_default(self):
"""
If this parameter's compute_default_fn is callable, call it
and store the result in self.default.
Also removes None from the list of objects (if the default is
no longer None).
"""
if self.default is None and callable(self.compute_default_fn):
self.default=self.compute_default_fn()
if self.default not in self.objects:
self.objects.append(self.default)
def _check_value(self,val,obj=None):
"""
val must be None or one of the objects in self.objects.
"""
if not val in self.objects:
# CEBALERT: can be called before __init__ has called
# super's __init__, i.e. before attrib_name has been set.
try:
attrib_name = self._attrib_name
except AttributeError:
attrib_name = ""
raise ValueError("%s not in Parameter %s's list of possible objects" \
%(val,attrib_name))
# CBNOTE: I think it's not helpful to do a type check for the value of
# an ObjectSelector. If we did such type checking, any user
# of this Parameter would have to be sure to update the list of possible
# objects before setting the Parameter's value. As it is, only users who care about the
# correct list of objects being displayed need to update the list.
def __set__(self,obj,val):
if self.check_on_set:
self._check_value(val,obj)
super(ObjectSelector,self).__set__(obj,val)
# CebAlert; move some bits into superclass (same for clsselector)?
def get_range(self):
"""
Return the possible objects to which this parameter could be set.
(Returns the dictionary {object.name:object}.)
"""
# CEBHACKALERT: was written assuming it would only operate on
# Parameterized instances. Think this is an sf.net bug/feature
# request. Temporary fix: don't use obj.name if unavailable.
try:
d=dict([(obj.name,obj) for obj in self.objects])
except AttributeError:
d=dict([(obj,obj) for obj in self.objects])
return d
class ClassSelector(Selector):
"""
Parameter whose value is an instance of the specified class.
"""
# CEBALERT: allow_None already a slot from superclass?
__slots__ = ['class_','allow_None']
def __init__(self,class_,default=None,instantiate=True,allow_None=False,**params):
self.class_ = class_
self.allow_None = (default is None or allow_None)
self._check_value(default)
super(ClassSelector,self).__init__(default=default,instantiate=instantiate,**params)
def _check_value(self,val,obj=None):
"""val must be None or an instance of self.class_"""
if not (isinstance(val,self.class_)) and not (val is None and self.allow_None):
raise ValueError(
"Parameter '%s' value must be an instance of %s, not '%s'" %
(self._attrib_name, self.class_.__name__, val))
def __set__(self,obj,val):
self._check_value(val,obj)
super(ClassSelector,self).__set__(obj,val)
def get_range(self):
"""
Return the possible types for this parameter's value.
(I.e. return {name: <class>} for all classes that are
concrete_descendents() of self.class_.)
Only classes from modules that have been imported are added
(see concrete_descendents()).
"""
classes = concrete_descendents(self.class_)
d=dict([(name,class_) for name,class_ in classes.items()])
if self.allow_None:
d['None']=None
return d
class List(Parameter):
"""
Parameter whose value is a list of objects, usually of a specified type.
The bounds allow a minimum and/or maximum length of
list to be enforced. If the class is non-None, all
items in the list are checked to be of that type.
"""
__slots__ = ['class_','bounds']
def __init__(self,default=[],class_=None,instantiate=True,
bounds=(0,None),**params):
self.class_ = class_
self.bounds = bounds
self._check_bounds(default)
Parameter.__init__(self,default=default,instantiate=instantiate,
**params)
# Could add range() method from ClassSelector, to allow
# list to be populated in the GUI
def __set__(self,obj,val):
"""Set to the given value, raising an exception if out of bounds."""
self._check_bounds(val)
super(List,self).__set__(obj,val)
def _check_bounds(self,val):
"""
Checks that the list is of the right length and has the right contents.
Otherwise, an exception is raised.
"""
if not (isinstance(val,list)):
raise ValueError("List '%s' must be a list."%(self._attrib_name))
if self.bounds!=None:
min_length,max_length = self.bounds
l=len(val)
if min_length != None and max_length != None:
if not (min_length <= l <= max_length):
raise ValueError("%s: list length must be between %s and %s (inclusive)"%(self._attrib_name,min_length,max_length))
elif min_length != None:
if not min_length <= l:
raise ValueError("%s: list length must be at least %s."%(self._attrib_name,min_length))
elif max_length != None:
if not l <= max_length:
raise ValueError("%s: list length must be at most %s."%(self._attrib_name,max_length))
self._check_type(val)
def _check_type(self,val):
if self.class_!=None:
for v in val:
assert isinstance(v,self.class_),repr(v)+" is not an instance of " + repr(self.class_) + "."
class HookList(List):
"""
Parameter whose value is a list of callable objects.
This type of List Parameter is typically used to provide a place
for users to register a set of commands to be called at a
specified place in some sequence of processing steps.
"""
__slots__ = ['class_','bounds']
def _check_type(self,val):
for v in val:
assert callable(v),repr(v)+" is not callable."
class Dict(ClassSelector):
"""
Parameter whose value is a dictionary.
"""
def __init__(self,**params):
super(Dict,self).__init__(dict,**params)
class Array(ClassSelector):
"""
Parameter whose value is a numpy array.
"""
def __init__(self, **params):
super(Array,self).__init__(ndarray, allow_None=True, **params)
# For portable code:
# - specify paths in unix (rather than Windows) style;
# - use resolve_file_path() for paths to existing files to be read,
# - use resolve_folder_path() for paths to existing folders to be read,
# and normalize_path() for paths to new files to be written.
class resolve_path(ParameterizedFunction):
"""
Find the path to an existing file, searching the paths specified
in the search_paths parameter if the filename is not absolute, and
converting a UNIX-style path to the current OS's format if
necessary.
To turn a supplied relative path into an absolute one, the path is
appended to paths in the search_paths parameter, in order, until
the file is found.
An IOError is raised if the file is not found.
Similar to Python's os.path.abspath(), except more search paths
than just os.getcwd() can be used, and the file must exist.
"""
search_paths = List(default=[os.getcwd()], pickle_default_value=False, doc="""
Prepended to a non-relative path, in order, until a file is
found.""")
path_to_file = Boolean(default=True, pickle_default_value=False, doc="""
String specifying whether the path refers to a 'File' or a 'Folder'.""")
def __call__(self, path, **params):
p = ParamOverrides(self, params)
path = os.path.normpath(path)
if os.path.isabs(path):
if p.path_to_file:
if os.path.isfile(path):
return path
else:
raise IOError("File '%s' not found." %path)
elif not p.path_to_file:
if os.path.isdir(path):
return path
else:
raise IOError("Folder '%s' not found." %path)
else:
raise IOError("Type '%s' not recognised." %p.path_type)
else:
paths_tried = []
for prefix in p.search_paths:
try_path = os.path.join(os.path.normpath(prefix), path)
if p.path_to_file:
if os.path.isfile(try_path):
return try_path
elif not p.path_to_file:
if os.path.isdir(try_path):
return try_path
else:
raise IOError("Type '%s' not recognised." %p.path_type)
paths_tried.append(try_path)
raise IOError(os.path.split(path)[1] + " was not found in the following place(s): " + str(paths_tried) + ".")
class normalize_path(ParameterizedFunction):
"""
Convert a UNIX-style path to the current OS's format,
typically for creating a new file or directory.
If the path is not already absolute, it will be made absolute
(using the prefix parameter).
Should do the same as Python's os.path.abspath(), except using
prefix rather than os.getcwd).
"""
prefix = String(default=os.getcwd(),pickle_default_value=False,doc="""
Prepended to the specified path, if that path is not
absolute.""")
def __call__(self,path="",**params):
p = ParamOverrides(self,params)
if not os.path.isabs(path):
path = os.path.join(os.path.normpath(p.prefix),path)
return os.path.normpath(path)
class Path(Parameter):
"""
Parameter that can be set to a string specifying the path of a
file or folder (in unix style); returns it in the format of the
user's operating system. Please use the Filename or Foldername
classes if you require discrimination between the two.
The specified path can be absolute, or relative to either:
* any of the paths specified in the search_paths attribute (if
search_paths is not None);
or
* any of the paths searched by resolve_path() (if search_paths
is None).
"""
__slots__ = ['search_paths']
def __init__(self, default=None, search_paths=None, **params):
if search_paths is None:
search_paths = []
self.search_paths = search_paths
super(Path,self).__init__(default,**params)
def _resolve(self, path):
if self.search_paths:
return resolve_path(path, search_paths=self.search_paths)
else:
return resolve_path(path)
def __set__(self, obj, val):
"""
Call Parameter's __set__, but warn if the file cannot be found.
"""
try:
self._resolve(val)
except IOError, e:
Parameterized(name="%s.%s"%(obj.name,self._attrib_name)).warning('%s'%(e.args[0]))
super(Path,self).__set__(obj,val)
def __get__(self, obj, objtype):
"""
Return an absolute, normalized path (see resolve_path).
"""
raw_path = super(Path,self).__get__(obj,objtype)
return self._resolve(raw_path)
def __getstate__(self):
# don't want to pickle the search_paths
state = super(Path,self).__getstate__()
if 'search_paths' in state:
state['search_paths'] = []
return state
class Filename(Path):
"""
Parameter that can be set to a string specifying the path of a
file (in unix style); returns it in the format of the user's
operating system.
The specified path can be absolute, or relative to either:
* any of the paths specified in the search_paths attribute (if
search_paths is not None);
or
* any of the paths searched by resolve_path() (if search_paths
is None).
"""
def _resolve(self, path):
if self.search_paths:
return resolve_path(path, path_to_file=True, search_paths=self.search_paths)
else:
return resolve_path(path, path_to_file=True)
class Foldername(Path):
"""
Parameter that can be set to a string specifying the
path of a folder (in unix style); returns it in the format of
the user's operating system.
The specified path can be absolute, or relative to either:
* any of the paths specified in the search_paths attribute (if
search_paths is not None);
or
* any of the paths searched by resolve_dir_path() (if search_paths
is None).
"""
def _resolve(self, path):
if self.search_paths:
return resolve_path(path, path_to_file=False, search_paths=self.search_paths)
else:
return resolve_path(path, path_to_file=False)
| |
import pybel
from pybel import *
import copy_reg
import numpy as np
from openbabel import OBAtomAtomIter,OBTypeTable
from oddt.spatial import angle, angle_2v, dihedral
backend = 'ob'
# setup typetable to translate atom types
typetable = OBTypeTable()
typetable.SetFromType('INT')
typetable.SetToType('SYB')
# hash OB!
pybel.ob.obErrorLog.StopLogging()
class Molecule(pybel.Molecule):
def __init__(self, OBMol, protein = False):
# call parent constructor
super(Molecule,self).__init__(OBMol)
self.protein = protein
#ob.DeterminePeptideBackbone(molecule.OBMol)
# percieve chains in residues
#if len(res_dict) > 1 and not molecule.OBMol.HasChainsPerceived():
# print "Dirty HACK"
# molecule = pybel.readstring('pdb', molecule.write('pdb'))
self._atom_dict = None
self._res_dict = None
self._ring_dict = None
self._coords = None
self._charges = None
# cache frequently used properties and cache them in prefixed [_] variables
@property
def coords(self):
if self._coords is None:
self._coords = np.array([atom.coords for atom in self.atoms])
return self._coords
@property
def charges(self):
if self._charges is None:
self._charges = np.array([atom.partialcharge for atom in self.atoms])
return self._charges
### Backport code implementing resudues (by me) to support older versions of OB (aka 'stable')
@property
def residue(self): return Residue(self.OBAtom.GetResidue())
#### Custom ODDT properties ####
def __getattr__(self, attr):
for desc in pybel._descdict.keys():
if attr.lower() == desc.lower():
return self.calcdesc([desc])[desc]
raise AttributeError('Molecule has no such property: %s' % attr)
@property
def num_rotors(self):
return self.OBMol.NumRotors()
def _repr_html_():
return self.write('svg')
@property
def atom_dict(self):
# check cache and generate dicts
if self._atom_dict is None:
self._dicts()
return self._atom_dict
@property
def res_dict(self):
# check cache and generate dicts
if self._res_dict is None:
self._dicts()
return self._res_dict
@property
def ring_dict(self):
# check cache and generate dicts
if self._ring_dict is None:
self._dicts()
return self._ring_dict
@property
def clone(self):
return Molecule(ob.OBMol(self.OBMol))
def clone_coords(self, source):
self.OBMol.SetCoordinates(source.OBMol.GetCoordinates())
return self
def _dicts(self):
# Atoms
atom_dtype = [('id', 'int16'),
# atom info
('coords', 'float16', 3),
('charge', 'float16'),
('atomicnum', 'int8'),
('atomtype','a4'),
('hybridization', 'int8'),
('neighbors', 'float16', (4,3)), # non-H neighbors coordinates for angles (max of 6 neighbors should be enough)
# residue info
('resid', 'int16'),
('resname', 'a3'),
('isbackbone', 'bool'),
# atom properties
('isacceptor', 'bool'),
('isdonor', 'bool'),
('isdonorh', 'bool'),
('ismetal', 'bool'),
('ishydrophobe', 'bool'),
('isaromatic', 'bool'),
('isminus', 'bool'),
('isplus', 'bool'),
('ishalogen', 'bool'),
# secondary structure
('isalpha', 'bool'),
('isbeta', 'bool')
]
a = []
atom_dict = np.empty(self.OBMol.NumAtoms(), dtype=atom_dtype)
metals = [3,4,11,12,13,19,20,21,22,23,24,25,26,27,28,29,30,31,37,38,39,40,41,42,43,44,45,46,47,48,49,50,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,87,88,89,90,91,
92,93,94,95,96,97,98,99,100,101,102,103]
for i, atom in enumerate(self.atoms):
atomicnum = atom.atomicnum
# skip non-polar hydrogens for performance
# if atomicnum == 1 and atom.OBAtom.IsNonPolarHydrogen():
# continue
atomtype = typetable.Translate(atom.type) # sybyl atom type
partialcharge = atom.partialcharge
coords = atom.coords
if self.protein:
residue = pybel.Residue(atom.OBAtom.GetResidue())
else:
residue = False
# get neighbors, but only for those atoms which realy need them
neighbors = np.empty(4, dtype=[('coords', 'float16', 3),('atomicnum', 'int8')])
neighbors.fill(np.nan)
for n, nbr_atom in enumerate(atom.neighbors):
# concider raising neighbors list to 6, but must do some benchmarks
if n > 3:
break
nbr_atomicnum = nbr_atom.atomicnum
neighbors[n] = (nbr_atom.coords, nbr_atomicnum)
atom_dict[i] = (atom.idx,
coords,
partialcharge,
atomicnum,
atomtype,
atom.OBAtom.GetHyb(),
neighbors['coords'], #n_coords,
# residue info
residue.idx if residue else 0,
residue.name if residue else '',
residue.OBResidue.GetAtomProperty(atom.OBAtom, 2) if residue else False, # is backbone
# atom properties
atom.OBAtom.IsHbondAcceptor(),
atom.OBAtom.IsHbondDonor(),
atom.OBAtom.IsHbondDonorH(),
atomicnum in metals,
atomicnum == 6 and not (np.in1d(neighbors['atomicnum'], [6,1])).any(), #hydrophobe #doble negation, since nan gives False
atom.OBAtom.IsAromatic(),
atomtype in ['O3-', '02-' 'O-'], # is charged (minus)
atomtype in ['N3+', 'N2+', 'Ng+'], # is charged (plus)
atomicnum in [9,17,35,53], # is halogen?
False, # alpha
False # beta
)
if self.protein:
# Protein Residues (alpha helix and beta sheet)
res_dtype = [('id', 'int16'),
('resname', 'a3'),
('N', 'float16', 3),
('CA', 'float16', 3),
('C', 'float16', 3),
('isalpha', 'bool'),
('isbeta', 'bool')
] # N, CA, C
b = []
for residue in self.residues:
backbone = {}
for atom in residue:
if residue.OBResidue.GetAtomProperty(atom.OBAtom,1):
if atom.atomicnum == 7:
backbone['N'] = atom.coords
elif atom.atomicnum == 6:
if atom.type == 'C3':
backbone['CA'] = atom.coords
else:
backbone['C'] = atom.coords
if len(backbone.keys()) == 3:
b.append((residue.idx, residue.name, backbone['N'], backbone['CA'], backbone['C'], False, False))
res_dict = np.array(b, dtype=res_dtype)
# detect secondary structure by phi and psi angles
first = res_dict[:-1]
second = res_dict[1:]
psi = dihedral(first['N'], first['CA'], first['C'], second['N'])
phi = dihedral(first['C'], second['N'], second['CA'], second['C'])
# mark atoms belonging to alpha and beta
res_mask_alpha = np.where(((phi > -145) & (phi < -35) & (psi > -70) & (psi < 50))) # alpha
res_dict['isalpha'][res_mask_alpha] = True
for i in res_dict[res_mask_alpha]['id']:
atom_dict['isalpha'][atom_dict['resid'] == i] = True
res_mask_beta = np.where(((phi >= -180) & (phi < -40) & (psi <= 180) & (psi > 90)) | ((phi >= -180) & (phi < -70) & (psi <= -165))) # beta
res_dict['isbeta'][res_mask_beta] = True
atom_dict['isbeta'][np.in1d(atom_dict['resid'], res_dict[res_mask_beta]['id'])] = True
# Aromatic Rings
r = []
for ring in self.sssr:
if ring.IsAromatic():
path = ring._path
atom = atom_dict[atom_dict['id'] == path[0]]
coords = atom_dict[np.in1d(atom_dict['id'], path)]['coords']
centroid = coords.mean(axis=0)
# get vector perpendicular to ring
vector = np.cross(coords - np.vstack((coords[1:],coords[:1])), np.vstack((coords[1:],coords[:1])) - np.vstack((coords[2:],coords[:2]))).mean(axis=0) - centroid
r.append((centroid, vector, atom['isalpha'], atom['isbeta']))
ring_dict = np.array(r, dtype=[('centroid', 'float16', 3),('vector', 'float16', 3),('isalpha', 'bool'),('isbeta', 'bool'),])
self._atom_dict = atom_dict
self._ring_dict = ring_dict
if self.protein:
self._res_dict = res_dict
### Extend pybel.Molecule
pybel.Molecule = Molecule
class Atom(pybel.Atom):
@property
def neighbors(self):
return [Atom(a) for a in OBAtomAtomIter(self.OBAtom)]
@property
def residue(self):
return Residue(self.OBAtom.GetResidue())
pybel.Atom = Atom
class Residue(object):
"""Represent a Pybel residue.
Required parameter:
OBResidue -- an Open Babel OBResidue
Attributes:
atoms, idx, name.
(refer to the Open Babel library documentation for more info).
The original Open Babel atom can be accessed using the attribute:
OBResidue
"""
def __init__(self, OBResidue):
self.OBResidue = OBResidue
@property
def atoms(self):
return [Atom(atom) for atom in ob.OBResidueAtomIter(self.OBResidue)]
@property
def idx(self):
return self.OBResidue.GetIdx()
@property
def name(self):
return self.OBResidue.GetName()
def __iter__(self):
"""Iterate over the Atoms of the Residue.
This allows constructions such as the following:
for atom in residue:
print atom
"""
return iter(self.atoms)
class Fingerprint(pybel.Fingerprint):
@property
def raw(self):
return _unrollbits(self.fp, pybel.ob.OBFingerprint.Getbitsperint())
def _unrollbits(fp, bitsperint):
""" Unroll unsigned int fingerprint to bool """
ans = np.zeros(len(fp)*bitsperint)
start = 1
for x in fp:
i = start
while x > 0:
ans[i] = x % 2
x >>= 1
i += 1
start += bitsperint
return ans
pybel.Fingerprint = Fingerprint
### Monkeypatch pybel objects pickling
pickle_format = 'mol2'
def pickle_mol(self):
return unpickle_mol, (self.write(pickle_format), dict(self.data.items()))
def unpickle_mol(string, data):
mol = readstring(pickle_format, string)
mol.data.update(data)
return mol
copy_reg.pickle(Molecule, pickle_mol, unpickle_mol)
| |
#!/usr/bin/env python
'''Description'''
import sys
import os
import logging
logging.basicConfig(level=logging.DEBUG)
import pandas as pd
import numpy as np
from collections import OrderedDict
from bokeh.plotting import figure, curdoc
from bokeh.models import Plot, ColumnDataSource, Range1d, HoverTool
from bokeh.properties import Instance, String
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.models.widgets import (HBox, Slider, TextInput,
VBox, VBoxForm, Select,
PreText, DataTable,
)
from bokeh.models.widgets.tables import TableColumn
_data = pd.read_csv(os.path.join('data',
'FPD-non-redundant-processed.csv'),
delimiter=',')
d = _data.ix[:, ['excitation_new', 'emission_new']].dropna()
data = _data.ix[d.index,['emission_new',
'emission_alt',
'excitation_new',
'excitation_alt',
'excitation_color_new',
'excitation_color_class',
'emission_color_new',
'emission_color_class',
'chromophore_name',
'chromophore_class',
'fpid',
'uniprot',
'pdb_id',
'genbank',
'mutation',
'quantum_yield',
'pka',
'protein_name',
'amino_acid_sequence',
'doi',
]].fillna('')
CHROMOPHORES = sorted(set(data['chromophore_class']))
xy_margin = 80
max_emission = max(data['emission_new']) + xy_margin
min_emission = min(data['emission_new']) - xy_margin
max_excitation = max(data['excitation_new']) + xy_margin
min_excitation = min(data['excitation_new']) - xy_margin
class FPDApp(HBox):
extra_generated_classes = [["FPDApp", "FPDApp", "HBox"]]
main_frame = Instance(VBox)
top_frame = Instance(HBox)
table_frame = Instance(HBox)
input_frame = Instance(VBoxForm)
plot_frame = Instance(HBox)
# widget instances
min_excitation = Instance(Slider)
max_excitation = Instance(Slider)
min_emission = Instance(Slider)
max_emission = Instance(Slider)
chrom_class_select = Instance(Select)
# chrom_class = String(default='All')
data_table = Instance(DataTable)
plot = Instance(Plot)
source = Instance(ColumnDataSource)
# pretext = Instance(PreText)
@classmethod
def create(cls):
obj = cls()
obj.init_input()
obj.init_source()
obj.init_plot()
obj.set_children()
return obj
def __init__(self, *args, **kwargs):
super(FPDApp, self).__init__(*args, **kwargs)
def init_source(self):
self.source = ColumnDataSource(data=data)
self.data_table = DataTable(source=self.source, columns=[
TableColumn(field='fpid', title='FPID'),
TableColumn(field='chromophore_name', title='chromophore_name'),
TableColumn(field='chromophore_class', title='chromophore_class'),
TableColumn(field='protein_name', title='Protein name'),
TableColumn(field='excitation_new', title='Excitation'),
TableColumn(field='emission_new', title='Emission'),
TableColumn(field='pdb_id', title='PDB ID'),
TableColumn(field='genbank', title='Genbank ID'),
TableColumn(field='mutation', title='Mutation'),
TableColumn(field='quantum_yield', title='Quantum Yield'),
TableColumn(field='pka', title='pka'),
TableColumn(field='amino_acid_sequence', title='Sequence'),
])
self.data_table.width = 1200
# obj.pretext = PreText(text='No selected items', width=400)
def init_plot(self):
self.plot = self.scatter_plot()
def init_input(self):
# create input widgets only once
self.min_excitation = Slider(
title="Min Excitation", name="min_excitation",
value=min_excitation,
start=min_excitation,
end=max_excitation,
)
self.max_excitation = Slider(
title="Max Excitation", name="max_excitation",
value=max_excitation,
start=min_excitation,
end=max_excitation,
)
self.min_emission = Slider(
title="Min Emission", name="min_emission",
value=min_emission,
start=min_emission,
end=max_emission,
)
self.max_emission = Slider(
title="Max Emission", name="max_emission",
value=max_emission,
start=min_emission,
end=max_emission,
)
self.chrom_class_select = Select(
title="Chromophore",
value='All',
options=['All'] + CHROMOPHORES,
)
def set_sliders(self):
self.min_excitation = Slider(
title="Min Excitation", name="min_excitation",
value=self.min_excitation.value,
start=min_excitation,
end=max_excitation,
)
self.max_excitation = Slider(
title="Max Excitation", name="max_excitation",
value=self.max_excitation.value,
start=min_excitation,
end=max_excitation,
)
self.min_emission = Slider(
title="Min Emission", name="min_emission",
value=self.min_emission.value,
start=min_emission,
end=max_emission,
)
self.max_emission = Slider(
title="Max Emission", name="max_emission",
value=self.max_emission.value,
start=min_emission,
end=max_emission,
)
def get_data(self):
df = data
df = df[df['excitation_new']>=self.min_excitation.value]
df = df[df['excitation_new']<=self.max_excitation.value]
df = df[df['emission_new']>=self.min_emission.value]
df = df[df['emission_new']<=self.max_emission.value]
if self.chrom_class_select.value == 'All': # all chromophore classes
return df
else:
df = df[df['chromophore_class']==self.chrom_class_select.value]
return df
def make_source(self):
self.source.data = self.get_data().to_dict('list')
def make_plots(self):
# # print('CALL: make_plots')
self.plot = self.scatter_plot()
@property
def selected_df(self):
df = data
selected = self.source.selected
if selected:
df = df.iloc[selected, :]
return df
def scatter_plot(self):
toolset = "pan,reset,resize,save,wheel_zoom,hover,box_select"
plot = figure(tools=toolset)
plot.scatter('excitation_new', 'emission_new',
source=self.source,
plot_width=100, plot_height=200,
radius=4, fill_alpha=0.4,
fill_color='excitation_color_new',
line_color='#000000',
)
plot.xaxis.axis_label = 'Emission'
plot.yaxis.axis_label = 'Excitation'
plot.x_range = Range1d(start=min_excitation, end=max_excitation)
plot.y_range = Range1d(start=min_emission, end=max_excitation)
hover = plot.select(dict(type=HoverTool))
hover.tooltips = [
("FPID ", "@fpid"),
("Chromophore name ", "@chromophore_name"),
("Excitation color class ", "@excitation_color_class"),
("Emission color class ", "@emission_color_class"),
("Primary excitation ", "@excitation_new"),
("Secondary excitation ", "@excitation_alt"),
("Primary emission ", "@emission_new"),
("Secondary emission ", "@emission_alt"),
]
return plot
def set_children(self):
self.input_frame = VBoxForm(children=[
self.min_excitation,
self.max_excitation,
self.min_emission,
self.max_emission,
self.chrom_class_select,
])
self.plot_frame = HBox(children=[self.plot])
self.top_frame = HBox(children=[self.plot_frame, self.input_frame])
self.table_frame = HBox(children=[self.data_table])
self.main_frame = VBox(children=[self.top_frame, self.table_frame])
self.children = [self.main_frame]
def setup_events(self):
super(FPDApp, self).setup_events()
if self.source:
self.source.on_change('selected', self, 'on_slider_change')
if self.min_excitation:
self.min_excitation.on_change('value', self, 'on_slider_change')
if self.max_excitation:
self.max_excitation.on_change('value', self, 'on_slider_change')
if self.min_emission:
self.min_emission.on_change('value', self, 'on_slider_change')
if self.max_emission:
self.max_emission.on_change('value', self, 'on_slider_change')
if self.chrom_class_select:
self.chrom_class_select.on_change(
'value', self, 'on_class_change')
def on_class_change(self, obj, attrname, old, new):
self.chrom_class_select.value = new
self.make_source()
self.make_plots()
curdoc().add(self)
def on_slider_change(self, obj, attrname, old, new):
if obj == self.min_excitation:
self.min_excitation.value = new
if self.min_excitation.value > self.max_excitation.value:
self.min_excitation.value = old
if obj == self.max_excitation:
self.max_excitation.value = new
if self.max_excitation.value < self.min_excitation.value:
self.max_excitation.value = old
if obj == self.min_emission:
self.min_emission.value = new
if self.min_emission.value > self.max_emission.value:
self.min_emission.value = old
if obj == self.max_emission:
self.max_emission.value = new
if self.max_emission.value < self.min_emission.value:
self.max_emission.value = old
self.set_sliders()
self.make_source()
self.set_children()
curdoc().add(self)
# self.pretext.text = str(self.selected_df['doi'])
# self.data_table.source = self.source
# self.reset_sliders()
# self.make_plots()
@bokeh_app.route("/bokeh/fpd/")
@object_page("fpd")
def make_object():
app = FPDApp.create()
return app
| |
# Copyright 2011 OpenStack Foundation
# Copyright 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from unittest import mock
from troveclient import base
from troveclient.v1 import datastores
"""
Unit tests for datastores.py
"""
class DatastoreTest(testtools.TestCase):
def setUp(self):
super(DatastoreTest, self).setUp()
self.orig__init = datastores.Datastore.__init__
datastores.Datastore.__init__ = mock.Mock(return_value=None)
self.datastore = datastores.Datastore()
self.datastore.manager = mock.Mock()
def tearDown(self):
super(DatastoreTest, self).tearDown()
datastores.Datastore.__init__ = self.orig__init
def test___repr__(self):
self.datastore.name = "datastore-1"
self.assertEqual('<Datastore: datastore-1>',
self.datastore.__repr__())
class DatastoresTest(testtools.TestCase):
def setUp(self):
super(DatastoresTest, self).setUp()
self.orig__init = datastores.Datastores.__init__
datastores.Datastores.__init__ = mock.Mock(return_value=None)
self.datastores = datastores.Datastores()
self.datastores.api = mock.Mock()
self.datastores.api.client = mock.Mock()
self.datastores.resource_class = mock.Mock(return_value="ds-1")
self.orig_base_getid = base.getid
base.getid = mock.Mock(return_value="datastore1")
def tearDown(self):
super(DatastoresTest, self).tearDown()
datastores.Datastores.__init__ = self.orig__init
base.getid = self.orig_base_getid
def test_list(self):
page_mock = mock.Mock()
self.datastores._paginated = page_mock
limit = "test-limit"
marker = "test-marker"
self.datastores.list(limit, marker)
page_mock.assert_called_with("/datastores", "datastores",
limit, marker)
self.datastores.list()
page_mock.assert_called_with("/datastores", "datastores", None, None)
def test_get(self):
def side_effect_func(path, inst):
return path, inst
self.datastores._get = mock.Mock(side_effect=side_effect_func)
self.assertEqual(('/datastores/datastore1',
'datastore'),
self.datastores.get(1))
class DatastoreVersionsTest(testtools.TestCase):
def setUp(self):
super(DatastoreVersionsTest, self).setUp()
self.orig__init = datastores.DatastoreVersions.__init__
datastores.DatastoreVersions.__init__ = mock.Mock(return_value=None)
self.datastore_versions = datastores.DatastoreVersions()
self.datastore_versions.api = mock.Mock()
self.datastore_versions.api.client = mock.Mock()
self.datastore_versions.resource_class = mock.Mock(
return_value="ds_version-1")
self.orig_base_getid = base.getid
base.getid = mock.Mock(return_value="datastore_version1")
def tearDown(self):
super(DatastoreVersionsTest, self).tearDown()
datastores.DatastoreVersions.__init__ = self.orig__init
base.getid = self.orig_base_getid
def test_list(self):
page_mock = mock.Mock()
self.datastore_versions._paginated = page_mock
limit = "test-limit"
marker = "test-marker"
self.datastore_versions.list("datastore1", limit, marker)
page_mock.assert_called_with("/datastores/datastore1/versions",
"versions", limit, marker)
def test_get(self):
def side_effect_func(path, inst):
return path, inst
self.datastore_versions._get = mock.Mock(side_effect=side_effect_func)
self.assertEqual(('/datastores/datastore1/versions/'
'datastore_version1',
'version'),
self.datastore_versions.get("datastore1",
"datastore_version1"))
def test_get_by_uuid(self):
def side_effect_func(path, inst):
return path, inst
self.datastore_versions._get = mock.Mock(side_effect=side_effect_func)
self.assertEqual(('/datastores/versions/datastore_version1',
'version'),
(self.datastore_versions.
get_by_uuid("datastore_version1")))
class DatastoreVersionMembersTest(testtools.TestCase):
def setUp(self):
super(DatastoreVersionMembersTest, self).setUp()
self.orig__init = datastores.DatastoreVersionMembers.__init__
datastores.DatastoreVersionMembers.__init__ = mock.Mock(
return_value=None)
self.datastore_version_members = datastores.DatastoreVersionMembers()
self.datastore_version_members.api = mock.Mock()
self.datastore_version_members.api.client = mock.Mock()
self.datastore_version_members.resource_class = mock.Mock(
return_value="ds_version_member-1")
self.orig_base_getid = base.getid
base.getid = mock.Mock(return_value="datastore_version_member1")
def tearDown(self):
super(DatastoreVersionMembersTest, self).tearDown()
datastores.DatastoreVersionMembers.__init__ = self.orig__init
base.getid = self.orig_base_getid
def test_add(self):
def side_effect_func(path, body, inst):
return path, body, inst
self.datastore_version_members._create = mock.Mock(
side_effect=side_effect_func)
p, b, i = self.datastore_version_members.add("data_store1",
"datastore_version1",
"tenant1")
self.assertEqual(
"/mgmt/datastores/data_store1/versions/datastore_version1/members",
p)
self.assertEqual("datastore_version_member", i)
self.assertEqual("tenant1", b["member"])
def test_delete(self):
def side_effect_func(path):
return path
self.datastore_version_members._delete = mock.Mock(
side_effect=side_effect_func)
p = self.datastore_version_members.delete("data_store1",
"datastore_version1",
"tenant1")
self.assertEqual(
"/mgmt/datastores/data_store1/versions/datastore_version1/members/"
"tenant1",
p)
def test_list(self):
page_mock = mock.Mock()
self.datastore_version_members._list = page_mock
limit = "test-limit"
marker = "test-marker"
self.datastore_version_members.list("datastore1", "datastore_version1",
limit, marker)
page_mock.assert_called_with("/mgmt/datastores/datastore1/versions/"
"datastore_version1/members",
"datastore_version_members",
limit, marker)
def test_get(self):
def side_effect_func(path, inst):
return path, inst
self.datastore_version_members._get = mock.Mock(
side_effect=side_effect_func)
self.assertEqual(('/mgmt/datastores/datastore1/versions/'
'datastore_version1/members/tenant1',
'datastore_version_member'),
self.datastore_version_members.get(
"datastore1",
"datastore_version1",
"tenant1"))
def test_get_by_tenant(self):
page_mock = mock.Mock()
self.datastore_version_members._list = page_mock
limit = "test-limit"
marker = "test-marker"
self.datastore_version_members.get_by_tenant("datastore1", "tenant1",
limit, marker)
page_mock.assert_called_with("/mgmt/datastores/datastore1/versions/"
"members/tenant1",
"datastore_version_members",
limit, marker)
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ChannelTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"webhooks": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks",
"last_message": null
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete(x_twilio_webhook_enabled="true")
headers = {'X-Twilio-Webhook-Enabled': "true", }
self.holodeck.assert_has_request(Request(
'delete',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
headers=headers,
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.create(x_twilio_webhook_enabled="true")
headers = {'X-Twilio-Webhook-Enabled': "true", }
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels',
headers=headers,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:38Z",
"created_by": "username",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"webhooks": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks",
"last_message": null
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.create()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.list()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"webhooks": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks",
"last_message": null
}
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"next_page_url": null,
"key": "channels"
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"next_page_url": null,
"key": "channels"
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(x_twilio_webhook_enabled="true")
headers = {'X-Twilio-Webhook-Enabled': "true", }
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
headers=headers,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:38Z",
"created_by": "username",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"webhooks": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks",
"last_message": null
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
| |
# -*- coding: utf-8 -*-
# Copyright 2017 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from keystoneauth1 import loading as ks_loading
from keystoneclient.v3 import client as ks_client
from monascaclient import client as mclient
from oslo_config import cfg
from oslo_log import log as logging
from voluptuous import All
from voluptuous import In
from voluptuous import Length
from voluptuous import Required
from voluptuous import Schema
from cloudkitty import collector
from cloudkitty import dataframe
from cloudkitty import utils as ck_utils
LOG = logging.getLogger(__name__)
MONASCA_API_VERSION = '2_0'
COLLECTOR_MONASCA_OPTS = 'collector_monasca'
collector_monasca_opts = [
cfg.StrOpt(
'interface',
default='internal',
help='Endpoint URL type (defaults to internal)',
),
cfg.StrOpt(
'monasca_service_name',
default='monasca',
help='Name of the Monasca service (defaults to monasca)',
),
]
CONF = cfg.CONF
CONF.register_opts(collector_monasca_opts, COLLECTOR_MONASCA_OPTS)
ks_loading.register_auth_conf_options(CONF, COLLECTOR_MONASCA_OPTS)
ks_loading.register_session_conf_options(CONF, COLLECTOR_MONASCA_OPTS)
MONASCA_EXTRA_SCHEMA = {
Required('extra_args', default={}): {
# Key corresponding to the resource id in a metric's dimensions
# Allows to adapt the resource identifier. Should not need to be
# modified in a standard OpenStack installation
Required('resource_key', default='resource_id'):
All(str, Length(min=1)),
Required('aggregation_method', default='max'):
In(['max', 'mean', 'min']),
# In case the metrics in Monasca do not belong to the project
# cloudkitty is identified in
Required('forced_project_id', default=''): str,
},
}
class EndpointNotFound(Exception):
"""Exception raised if the Monasca endpoint is not found"""
class MonascaCollector(collector.BaseCollector):
collector_name = 'monasca'
@staticmethod
def check_configuration(conf):
conf = collector.BaseCollector.check_configuration(conf)
metric_schema = Schema(collector.METRIC_BASE_SCHEMA).extend(
MONASCA_EXTRA_SCHEMA)
output = {}
for metric_name, metric in conf.items():
met = output[metric_name] = metric_schema(metric)
if met['extra_args']['resource_key'] not in met['groupby']:
met['groupby'].append(met['extra_args']['resource_key'])
return output
def __init__(self, **kwargs):
super(MonascaCollector, self).__init__(**kwargs)
self.auth = ks_loading.load_auth_from_conf_options(
CONF,
COLLECTOR_MONASCA_OPTS)
self.session = ks_loading.load_session_from_conf_options(
CONF,
COLLECTOR_MONASCA_OPTS,
auth=self.auth)
self.ks_client = ks_client.Client(
session=self.session,
interface=CONF.collector_monasca.interface,
)
self.mon_endpoint = self._get_monasca_endpoint()
if not self.mon_endpoint:
raise EndpointNotFound()
self._conn = mclient.Client(
api_version=MONASCA_API_VERSION,
session=self.session,
endpoint=self.mon_endpoint)
# NOTE(lukapeschke) This function should be removed as soon as the endpoint
# it no longer required by monascaclient
def _get_monasca_endpoint(self):
service_name = cfg.CONF.collector_monasca.monasca_service_name
endpoint_interface_type = cfg.CONF.collector_monasca.interface
service_list = self.ks_client.services.list(name=service_name)
if not service_list:
return None
mon_service = service_list[0]
endpoints = self.ks_client.endpoints.list(mon_service.id)
for endpoint in endpoints:
if endpoint.interface == endpoint_interface_type:
return endpoint.url
return None
def _get_metadata(self, metric_name, conf):
info = {}
info['unit'] = conf['metrics'][metric_name]['unit']
dimension_names = self._conn.metric.list_dimension_names(
metric_name=metric_name)
info['metadata'] = [d['dimension_name'] for d in dimension_names]
return info
# NOTE(lukapeschke) if anyone sees a better way to do this,
# please make a patch
@classmethod
def get_metadata(cls, resource_type, conf):
tmp = cls(period=conf['period'])
return tmp._get_metadata(resource_type, conf)
def _get_dimensions(self, metric_name, project_id, q_filter):
dimensions = {}
scope_key = CONF.collect.scope_key
if project_id:
dimensions[scope_key] = project_id
if q_filter:
dimensions.update(q_filter)
return dimensions
def _fetch_measures(self, metric_name, start, end,
project_id=None, q_filter=None):
"""Get measures for given metric during the timeframe.
:param metric_name: metric name to filter on.
:type metric_name: str
:param start: Start of the timeframe.
:param end: End of the timeframe if needed.
:param project_id: Filter on a specific tenant/project.
:type project_id: str
:param q_filter: Append a custom filter.
:type q_filter: list
"""
dimensions = self._get_dimensions(metric_name, project_id, q_filter)
group_by = self.conf[metric_name]['groupby']
# NOTE(lpeschke): One aggregated measure per collect period
period = int((end - start).total_seconds())
extra_args = self.conf[metric_name]['extra_args']
kwargs = {}
if extra_args['forced_project_id']:
kwargs['tenant_id'] = extra_args['forced_project_id']
return self._conn.metrics.list_statistics(
name=metric_name,
merge_metrics=True,
dimensions=dimensions,
start_time=start,
end_time=end,
period=period,
statistics=extra_args['aggregation_method'],
group_by=group_by,
**kwargs)
def _fetch_metrics(self, metric_name, start, end,
project_id=None, q_filter=None):
"""List active metrics during the timeframe.
:param metric_name: metric name to filter on.
:type metric_name: str
:param start: Start of the timeframe.
:param end: End of the timeframe if needed.
:param project_id: Filter on a specific tenant/project.
:type project_id: str
:param q_filter: Append a custom filter.
:type q_filter: list
"""
dimensions = self._get_dimensions(metric_name, project_id, q_filter)
metrics = self._conn.metrics.list(
name=metric_name,
dimensions=dimensions,
start_time=start,
end_time=end,
)
resource_key = self.conf[metric_name]['extra_args']['resource_key']
return {metric['dimensions'][resource_key]:
metric['dimensions'] for metric in metrics}
def _format_data(self, metconf, data, resources_info=None):
"""Formats Monasca data to CK data.
Returns metadata, groupby and qty
"""
groupby = data['dimensions']
resource_key = metconf['extra_args']['resource_key']
metadata = dict()
if resources_info:
resource = resources_info[groupby[resource_key]]
for i in metconf['metadata']:
metadata[i] = resource.get(i, '')
qty = data['statistics'][0][1]
converted_qty = ck_utils.convert_unit(
qty, metconf['factor'], metconf['offset'])
mutated_qty = ck_utils.mutate(converted_qty, metconf['mutate'])
return metadata, groupby, mutated_qty
def fetch_all(self, metric_name, start, end,
project_id=None, q_filter=None):
met = self.conf[metric_name]
data = self._fetch_measures(
metric_name,
start,
end,
project_id=project_id,
q_filter=q_filter,
)
resources_info = None
if met['metadata']:
resources_info = self._fetch_metrics(
metric_name,
start,
end,
project_id=project_id,
q_filter=q_filter,
)
formated_resources = list()
for d in data:
if len(d['statistics']):
metadata, groupby, qty = self._format_data(
met, d, resources_info)
formated_resources.append(dataframe.DataPoint(
met['unit'],
qty,
0,
groupby,
metadata,
))
return formated_resources
| |
import sys
import pdb
import time
import random
class Game:
class Player:
def __init__(self, flats, capstones):
self.flats = flats
self.capstones = capstones
def __init__(self, n):
self.n = n
self.total_squares = n * n
self.board = [[] for i in xrange(self.total_squares)]
self.turn = 0
if n == 5:
self.max_flats = 21
self.max_capstones = 1
elif n == 6:
self.max_flats = 30
self.max_capstones = 1
elif n == 7:
self.max_flats = 40
self.max_capstones = 1
self.max_movable = n
self.max_down = 1
self.max_up = n
self.max_left = 'a'
self.max_right = chr(ord('a') + n - 1)
self.moves = 0
self.players = []
self.players.append(Game.Player(self.max_flats, self.max_capstones))
self.players.append(Game.Player(self.max_flats, self.max_capstones))
self.all_squares = [self.square_to_string(i) for i in xrange(self.total_squares)]
def square_to_num(self, square_string):
''' Return -1 if square_string is invalid
'''
if len(square_string) != 2:
return -1
if not square_string[0].isalpha() or not square_string[0].islower() or not square_string[1].isdigit():
return -1
row = ord(square_string[0]) - 96
col = int(square_string[1])
if row < 1 or row > self.n or col < 1 or col > self.n:
return -1
return self.n * (col - 1) + (row - 1)
def square_to_string(self, square):
'''Convert square number to string
'''
if square < 0 or square >= self.total_squares:
return ''
row = square % self.n
col = square / self.n
return chr(row + 97) + str(col + 1)
def execute_move(self, move_string):
'''Execute move
'''
if self.turn == 0:
self.moves += 1
if self.moves != 1:
current_piece = self.turn
else:
current_piece = 1 - self.turn
if move_string[0].isalpha():
square = self.square_to_num(move_string[1:])
if move_string[0] == 'F' or move_string[0] == 'S':
self.board[square].append((current_piece, move_string[0]))
self.players[current_piece].flats -= 1
elif move_string[0] == 'C':
self.board[square].append((current_piece, move_string[0]))
self.players[current_piece].capstones -= 1
elif move_string[0].isdigit():
count = int(move_string[0])
square = self.square_to_num(move_string[1:3])
direction = move_string[3]
if direction == '+':
change = self.n
elif direction == '-':
change = -self.n
elif direction == '>':
change = 1
elif direction == '<':
change = -1
prev_square = square
for i in xrange(4, len(move_string)):
next_count = int(move_string[i])
next_square = prev_square + change
if (len(self.board[next_square]) > 0) and (self.board[next_square][-1][1] == 'S'):
self.board[next_square][-1] = (self.board[next_square][-1][0], 'F')
if next_count - count == 0:
self.board[next_square] += self.board[square][-count:]
else:
self.board[next_square] += self.board[square][-count:-count+next_count]
prev_square = next_square
count -= next_count
count = int(move_string[0])
self.board[square] = self.board[square][:-count]
self.turn = 1 - self.turn
def partition(self, n):
'''Generates all permutations of all partitions
of n
'''
part_list = []
part_list.append([n])
for x in xrange(1, n):
for y in self.partition(n - x):
part_list.append([x] + y)
return part_list
def check_valid(self, square, direction, partition):
'''For given movement (partition), check if stack on
square can be moved in direction. Assumes active player
is topmost color
'''
if direction == '+':
change = self.n
elif direction == '-':
change = -self.n
elif direction == '>':
change = 1
elif direction == '<':
change = -1
for i in xrange(len(partition)):
next_square = square + change * (i + 1)
if len(self.board[next_square]) > 0 and self.board[next_square][-1][1] == 'C':
return False
if len(self.board[next_square]) > 0 and self.board[next_square][-1][1] == 'S' and i != len(partition) - 1:
return False
if i == len(partition) - 1 and len(self.board[next_square]) > 0 and self.board[next_square][-1][1] == 'S' and partition[i] > 1:
return False
if i == len(partition) - 1 and len(self.board[next_square]) > 0 and self.board[next_square][-1][1] == 'S' and self.board[square][-1][1] != 'C':
return False
return True
def generate_stack_moves(self, square):
'''Generate stack moves from square
Assumes active player is topmost color
'''
all_moves = []
r = square % self.n
c = square / self.n
size = len(self.board[square])
dirs = ['+', '-', '<', '>']
up = self.n - 1 - c
down = c
right = self.n - 1 - r
left = r
rem_squares = [up, down, left, right]
for num in xrange(min(size, self.n)):
part_list = self.partition(num + 1)
for di in range(4):
part_dir = [part for part in part_list if len(part) <= rem_squares[di]]
# sys.stderr.write(self.all_squares[square] + ' ' + dirs[di] + ' ' + str(part_dir) + '\n')
for part in part_dir:
if self.check_valid(square, dirs[di], part):
part_string = ''.join([str(i) for i in part])
all_moves.append(str(sum(part)) + self.all_squares[square] + dirs[di] + part_string)
return all_moves
def generate_all_moves(self, player):
'''Generate all possible moves for player
Returns a list of move strings
'''
all_moves = []
for i in xrange(len(self.board)):
if len(self.board[i]) == 0:
if self.players[player].flats > 0:
all_moves.append('F' + self.all_squares[i])
if self.moves != player and self.players[player].flats > 0:
all_moves.append('S' + self.all_squares[i])
if self.moves != player and self.players[player].capstones > 0:
all_moves.append('C' + self.all_squares[i])
for i in xrange(len(self.board)):
if len(self.board[i]) > 0 and self.board[i][-1][0] == player and self.moves != player:
all_moves += self.generate_stack_moves(i)
return all_moves
class RandomPlayer:
def __init__(self):
data = sys.stdin.readline().strip().split()
self.player = int(data[0]) - 1
self.n = int(data[1])
self.time_left = int(data[2])
self.game = Game(self.n)
self.play()
def play(self):
if self.player == 1:
move = sys.stdin.readline().strip()
self.game.execute_move(move)
while True:
all_moves = self.game.generate_all_moves(self.player)
move = all_moves[random.randint(0, len(all_moves)-1)]
self.game.execute_move(move)
move = move + '\n'
sys.stderr.write('Possible moves: ' + str(all_moves) + '\n')
sys.stderr.write('Chosen move: ' + move)
sys.stdout.write(move)
sys.stdout.flush()
move = sys.stdin.readline().strip()
self.game.execute_move(move)
random_player = RandomPlayer()
| |
#!python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility script to automate the process of symbolizing a SyzyASan
minidump.
"""
from collections import namedtuple
import optparse
import os
import re
import subprocess
import sys
# The sentinel value that we use at the end of the command executed in the
# debugger.
_SENTINEL = 'ENDENDEND'
# The default values for the path to cdb.exe.
_DEFAULT_CDB_PATHS = [
r'c:\Program Files (x86)\Debugging Tools for Windows (x86)\cdb.exe',
r'c:\Program Files (x86)\Windows Kits\8.0\Debuggers\x86\cdb.exe',
]
# The frame containing the error info structure.
_BAD_ACCESS_INFO_FRAMES = [
'asan_rtl!agent::asan::AsanRuntime::OnError',
'syzyasan_rtl!agent::asan::AsanRuntime::ExceptionFilterImpl',
]
# The helper string that will be included at the beginning of the printed crash
# reports.
_ERROR_HELP_URL = 'You can go to \
https://code.google.com/p/syzygy/wiki/SyzyASanBug to get more information \
about how to treat this bug.'
# Command to print the error info structure.
_GET_BAD_ACCESS_INFO_COMMAND = 'dt -o error_info'
# Command to print the block info structure nested into the error info one.
_GET_BLOCK_INFO_COMMAND = 'dt agent::asan::AsanBlockInfo poi(error_info) -o'
# Template command to print a stack trace from an error info structure.
#
# Here's the description of the keyword to use in this template:
# - operand: The operator to use to access the structure ('.' or '->').
# - type: The stack trace type ('alloc' or 'free')
_GET_STACK_COMMAND_TEMPLATE = (
'dps @@(&error_info{operand}block_info.{type}_stack) '
'l@@(error_info{operand}block_info.{type}_stack_size);'
)
# Template command to print the stack trace of a corrupt block from an error
# info structure.
#
# Here's the description of the keyword to use in this template:
# - operand: The operator to use to access the structure ('.' or '->').
# - range_idx: The corrupt range index.
# - block_idx: The block index in its range.
# - type: The stack trace type ('alloc' or 'free')
_GET_CORRUPT_BLOCK_STACK_TRACE_TEMPLATE = (
'dps @@(((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info{operand}corrupt_ranges))[{range_idx}].block_info[{block_idx}].'
'{type}_stack) '
'L@@(((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info{operand}corrupt_ranges))[{range_idx}].block_info[{block_idx}].'
'{type}_stack_size)'
)
# A named tuple that will contain an ASan crash report.
ASanReport = namedtuple('ASanReport',
'bad_access_info '
'crash_stack '
'crash_stack_hash '
'alloc_stack '
'alloc_stack_hash '
'free_stack '
'free_stack_hash '
'corrupt_heap_info '
'from_uef')
# Match a stack frame as printed by cdb.exe (or windbg.exe).
#
# Here's some examples of stack frames that this regex will match:
# - 003cd6b8 0ff3a36b 007bff00 00004e84 003cd760 foo!bar+0x18
# - 003cd6b8 0ff3a36b 007bff00 00004e84 003cd760 0xcafebabe
# - (Inline) -------- -------- -------- -------- foo!bar+0x42
#
# Here's a description of the different groups in this regex:
# - args: The arguments in front of the module name.
# - module: The module's name.
# - location: The location in the module.
# - address: If the module name is not available then we'll get its address.
_STACK_FRAME_RE = re.compile("""
^
(\(Inline\)\s)?
(?P<args>([0-9A-F\-]+\ +)+)
(?:
(?P<module>[^ ]+)(!(?P<location>.*))? |
(?P<address>0x[0-9a-f]+)
)
$
""", re.VERBOSE | re.IGNORECASE)
# Match a list of modules as printed by cdb.exe when running the 'lm n' command.
#
# Here's a description of the different groups in this regex:
# - start: Module's start address.
# - end: Module's end address.
# - module_name: Module's name.
# - image_name: Image's name.
_MODULE_MATCH_RE = re.compile("""
(?P<start>\w+)\s+
(?P<end>\w+)\s+
(?P<module_name>\w+)\s+
(?P<image_name>.*)
""", re.VERBOSE | re.IGNORECASE)
# Match a Chrome frame in a stack trace.
_CHROME_RE = re.compile('^(chrome[_0-9A-F]+)$', re.VERBOSE | re.IGNORECASE)
# Match a frame pointer in a stack frame as it is printed by a debugger.
_FRAME_POINTER_RE = re.compile(
'\s*[a-z0-9]+\s+(?P<address>[a-z0-9]+)\s+.*', re.VERBOSE | re.IGNORECASE)
# Match an enum value as it is printed by a debugger. They're usually
# represented as 'NUMERIC_VALUE ( LITERAL_VALUE )'.
_ENUM_VAL_RE = re.compile(
'\s*(?P<num_value>\d+)\s*\(\s*(?P<literal_value>[a-zA-Z0-9_]+)\s*\)',
re.VERBOSE | re.IGNORECASE)
def NormalizeChromeSymbol(symbol):
"""Normalize a Chrome symbol."""
return _CHROME_RE.sub('chrome_dll', symbol)
def NormalizeStackTrace(stack_trace):
"""Normalize a given stack trace.
Args:
stack_trace: The stack trace to normalize.
Returns:
The normalized stack trace and its hash.
"""
trace_hash = 0
output_trace = []
for line in stack_trace:
m = _STACK_FRAME_RE.match(line)
if not m:
continue
if m.group('args'):
# Extract the frame pointer from the 'args' group.
m_frame = _FRAME_POINTER_RE.match(m.group('args'))
if m_frame and m_frame.group('address'):
trace_hash ^= int(m_frame.group('address'), 16)
address = m.group('address')
module = m.group('module')
location = m.group('location')
if address:
output_trace.append(address)
else:
module = NormalizeChromeSymbol(module)
if location:
location = NormalizeChromeSymbol(location)
else:
location = 'unknown'
frame = '%s!%s' % (module, location)
output_trace.append(frame)
return (output_trace, trace_hash)
def DebugStructToDict(structure):
"""Converts a structure as printed by the debugger into a dictionary. The
structure should have the following format:
field1 : value1
field2 : value2
...
Args:
structure: The structure to convert.
Returns:
A dict containing the values stored in the structure.
"""
ret = dict()
for entry in structure:
if not entry.find(':'):
continue
key = entry[:entry.find(':')]
value = entry[entry.find(':') + 1:]
ret[key.rstrip().lstrip()] = value.rstrip().lstrip()
return ret
def GetCorruptHeapInfo(debugger, bad_access_info_vals, bad_access_info_frame,
from_uef):
"""Extract the information stored in the minidump about the heap corruption.
Args:
debugger: A handle to a cdb debugging session.
bad_access_info_vals: A dictionary containing the information about the
invalid access.
bad_access_info_frame: The number of the frame containing the error_info
structure.
from_uef: Indicates if the error has been caught by the unhandled exception
filter.
Returns:
A list of corrupt ranges, each of them containing the information about the
corrupt blocks in it.
"""
# Reset the debugger context and jump to the frame containing the information.
corrupt_range_count = int(bad_access_info_vals['corrupt_range_count'], 16)
debugger.Command('.cxr; .frame %X' % bad_access_info_frame)
corrupt_ranges = []
# Iterates over the corrupt ranges.
for corrupt_range_idx in range(0, corrupt_range_count):
corrupt_range_info = []
# When using the '??' operator in a debugging session to evaluate a
# structure the offsets gets printed, this regex allows their removal.
struct_field_re = re.compile('\s+\+0x[0-9a-f]+\s*(.*)')
operand = '.' if from_uef else '->'
# Get the information about this corrupt range.
for line in debugger.Command(
'?? ((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info%scorrupt_ranges))[0x%x]' % (operand, corrupt_range_idx)):
m = struct_field_re.match(line)
if m:
corrupt_range_info.append(m.group(1))
corrupt_range_info_vals = DebugStructToDict(corrupt_range_info)
block_info_count = int(corrupt_range_info_vals['block_info_count'])
corrupt_range_info_vals['block_info'] = []
# Iterates over the block info structure available for this range.
for block_info_idx in range(0, block_info_count):
# Retrieves the information about the current block info structure.
block_info = []
for line in debugger.Command(
'?? ((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info%scorrupt_ranges))[%d].block_info[%d]' % (
operand, corrupt_range_idx, block_info_idx)):
m = struct_field_re.match(line)
if m:
block_info.append(m.group(1))
block_info_corruption_state = []
for line in debugger.Command(
'?? ((syzyasan_rtl!agent::asan::AsanCorruptBlockRange*)'
'(error_info%scorrupt_ranges))[%d].block_info[%d].analysis' % (
operand, corrupt_range_idx, block_info_idx)):
m = struct_field_re.match(line)
if m:
block_info_corruption_state.append(m.group(1))
block_info_vals = DebugStructToDict(block_info)
block_info_corruption_state_vals = DebugStructToDict(
block_info_corruption_state)
block_info_vals.pop('analysis', None)
for e in block_info_corruption_state_vals:
block_info_vals['analysis.%s' % e] = block_info_corruption_state_vals[e]
# Get the allocation stack trace for this block info structure.
block_info_vals['alloc_stack'], _ = NormalizeStackTrace(debugger.Command(
_GET_CORRUPT_BLOCK_STACK_TRACE_TEMPLATE.format(type='alloc',
operand=operand, range_idx=corrupt_range_idx,
block_idx=block_info_idx)))
# Get the free stack trace for this block info structure.
block_info_vals['free_stack'], _ = NormalizeStackTrace(debugger.Command(
_GET_CORRUPT_BLOCK_STACK_TRACE_TEMPLATE.format(type='free',
operand=operand, range_idx=corrupt_range_idx,
block_idx=block_info_idx)))
# Get the block content.
block_address = block_info_vals['header'].split(' ')[0]
block_info_vals['block_content'] = []
block_content = debugger.Command('db %s+0x10 L0x80' % block_address)
# Match a block data line as printed by Windbg. This helps to get rid of
# the extra characters that we sometime see at the beginning of the
# lines ('0:000>').
line_cleanup_re = re.compile('^\d\:\d+>\s*(.*)')
for line in block_content:
m = line_cleanup_re.match(line)
if m:
line = m.group(1)
block_info_vals['block_content'].append(line)
corrupt_range_info_vals['block_info'].append(block_info_vals)
# Append the information about the current range to the list of corrupt
# ranges.
corrupt_ranges.append(corrupt_range_info_vals)
return corrupt_ranges
class ScopedDebugger(subprocess.Popen):
"""A scoped debugger instance.
"""
def __init__(self, debugger_path, minidump_filename):
"""Initialize the debugger instance.
Args:
debugger_path: The debugger's patth.
minidump_filename: The minidump filename.
"""
super(ScopedDebugger, self).__init__([debugger_path,
'-z', minidump_filename],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def __enter__(self):
"""This debugger should be instantiated via a 'with' statement to ensure
that its resources are correctly closed.
"""
return self
def __exit__(self, e_type, value, traceback):
"""Terminate the debugger process. This is executed when the instance of
this debugger is created with a 'with' statement.
"""
self.StopDebugger()
def StopDebugger(self):
"""Terminate the debugger process. We could send the terminate command ('q')
to the debugger directly but at this point the debugger might be stuck
because of a previous command and it's just faster to kill the process
anyway.
"""
self.terminate()
def Command(self, command):
"""Execute a command in the debugger instance.
Args:
command: The command to execute.
Returns:
The output of the debugger after running this command.
"""
self.stdin.write(command + '; .echo %s\n' % _SENTINEL)
lines = []
while True:
line = self.stdout.readline().rstrip()
# Sometimes the sentinel value is preceded by something like '0:000> '.
if line.endswith(_SENTINEL):
break
lines.append(line)
return lines
def LoadSymbols(self, pdb_path):
"""Loads the pdbs for the loaded modules if they are present in |pdb_path|
Args:
pdb_path: The path containing the pdbs.
"""
pdbs = [f for f in os.listdir(pdb_path) if f.endswith('.pdb')]
# The path needs to be quoted to avoid including the sentinel value in cdb's
# symbol search path.
self.Command('.sympath \"%s\"' % pdb_path)
for line in self.Command('lm n'):
m = _MODULE_MATCH_RE.match(line)
if m is None:
continue
image_name = m.group('image_name')
if image_name is None:
continue
pdb_name = image_name + '.pdb'
if pdb_name in pdbs:
self.Command('.reload /fi %s' % image_name)
self.Command('.symfix')
def ProcessMinidump(minidump_filename, cdb_path, pdb_path):
"""Process a minidump.
This analyzes the error contained in the minidump and returns the crash report
for it.
Args:
minidump_filename: The minidump filename.
cdb_path: The path to cdb.exe.
pdb_path: (Optional) The path to the pdbs for the loaded modules.
Returns:
The crash report to be printed.
"""
if not os.path.exists(minidump_filename):
return
with ScopedDebugger(cdb_path, minidump_filename) as debugger:
if pdb_path is not None:
debugger.LoadSymbols(pdb_path)
# Enable the line number information.
debugger.Command('.lines')
# Get the SyzyASan crash stack and try to find the frame containing the
# bad access info structure.
asan_crash_stack = debugger.Command('kv')
bad_access_info_frame = 0;
crash_lines, _ = NormalizeStackTrace(asan_crash_stack)
# Indicates if this bug has been caught by the unhandled exception filter.
from_uef = False
for line in crash_lines:
if not any(line.find(b) != -1 for b in _BAD_ACCESS_INFO_FRAMES):
bad_access_info_frame += 1
else:
if line.find('ExceptionFilter') != -1:
from_uef = True
break
if bad_access_info_frame == -1:
print ('Unable to find the frame containing the invalid access'
'informations for %d.' % minidump_filename)
return
# Get the information about this bad access.
debugger.Command('.frame %X' % bad_access_info_frame)
debugger.Command('kv')
bad_access_info = debugger.Command(_GET_BAD_ACCESS_INFO_COMMAND)
bad_access_block_info = debugger.Command(_GET_BLOCK_INFO_COMMAND)
# The first two lines contain no useful information, remove them.
bad_access_info.pop(0)
bad_access_info.pop(0)
bad_access_block_info.pop(0)
bad_access_block_info.pop(0)
bad_access_info_vals = DebugStructToDict(bad_access_info)
bad_access_info_vals.update(DebugStructToDict(bad_access_block_info))
# Checks if the heap is corrupt.
heap_is_corrupt = bad_access_info_vals['heap_is_corrupt'] == '1'
# Cleans the enum value stored in the dictionary.
for key in bad_access_info_vals:
m = _ENUM_VAL_RE.match(bad_access_info_vals[key])
if m:
bad_access_info_vals[key] = m.group('literal_value')
debugger.Command('.ecxr')
crash_stack, crash_stack_hash = NormalizeStackTrace(
debugger.Command('kv'))
# If the heap is not corrupt and the error type indicates an invalid or wild
# address then there's no useful information that we can report.
if not heap_is_corrupt and (
bad_access_info_vals['error_type'] == 'INVALID_ADDRESS' or
bad_access_info_vals['error_type'] == 'WILD_ACCESS'):
report = ASanReport(bad_access_info=bad_access_info_vals,
crash_stack=crash_stack,
crash_stack_hash=crash_stack_hash,
alloc_stack=None,
alloc_stack_hash=None,
free_stack=None,
free_stack_hash=None,
corrupt_heap_info=None,
from_uef=None)
return report
def GetStackAndStackHashFromErrorInfoStruct(debugger, stack_type, is_ptr):
assert stack_type in ['alloc', 'free']
command = _GET_STACK_COMMAND_TEMPLATE.format(type=stack_type,
operand='->' if is_ptr else '.')
return NormalizeStackTrace(debugger.Command(command))
debugger.Command('.cxr; .frame %X' % bad_access_info_frame)
alloc_stack, alloc_stack_hash = GetStackAndStackHashFromErrorInfoStruct(
debugger, 'alloc', is_ptr=not from_uef)
free_stack, free_stack_hash = GetStackAndStackHashFromErrorInfoStruct(
debugger, 'free', is_ptr=not from_uef)
corrupt_heap_info = None
if heap_is_corrupt:
corrupt_heap_info = GetCorruptHeapInfo(debugger,
bad_access_info_vals,
bad_access_info_frame, from_uef)
report = ASanReport(bad_access_info=bad_access_info_vals,
crash_stack=crash_stack,
crash_stack_hash=crash_stack_hash,
alloc_stack=alloc_stack,
alloc_stack_hash=alloc_stack_hash,
free_stack=free_stack,
free_stack_hash=free_stack_hash,
corrupt_heap_info=corrupt_heap_info,
from_uef=from_uef)
return report
def PrintASanReport(report, file_handle=sys.stdout):
"""Print a crash report.
Args:
report: The report to print.
file_handle: A handle to the out stream, by default we print the report to
stdout.
"""
file_handle.write('Bad access information:\n')
for key in report.bad_access_info:
file_handle.write(' %s: %s\n' % (key, report.bad_access_info[key]))
file_handle.write('\nCrash stack:\n')
if report.crash_stack and len(report.crash_stack) != 0:
for line in report.crash_stack:
file_handle.write('%s\n' % line)
if report.alloc_stack and len(report.alloc_stack) != 0:
file_handle.write('\nAllocation stack:\n')
for line in report.alloc_stack:
file_handle.write('%s\n' % line)
if report.free_stack and len(report.free_stack) != 0:
file_handle.write('\nFree stack:\n')
for line in report.free_stack:
file_handle.write('%s\n' % line)
if report.corrupt_heap_info:
file_handle.write('\n\nHeap is corrupt, here\'s some information about the '
'corrupt ranges.\n\n')
corrupt_range_idx = 0
for corrupt_heap_range in report.corrupt_heap_info:
file_handle.write('Corrupt range #%d\n' % corrupt_range_idx)
corrupt_range_idx += 1
file_handle.write(' Address : %s\n' % corrupt_heap_range['address'])
file_handle.write(' Length : %s\n' % corrupt_heap_range['length'])
file_handle.write(' Block count : %s\n' %
corrupt_heap_range['block_count'])
file_handle.write(' Block info count : %s\n' %
corrupt_heap_range['block_info_count'])
file_handle.write(' Block infos:\n')
block_info_idx = 0
for block_info in corrupt_heap_range['block_info']:
file_handle.write(' Block info #%d\n' % block_info_idx)
for field in sorted(block_info):
if not field.endswith('stack') and field != ('block_content'):
file_handle.write(' %s : %s\n' % (field, block_info[field]))
file_handle.write(' Alloc stack:\n')
for frame in block_info['alloc_stack']:
file_handle.write(' %s\n' % frame)
if block_info['free_stack']:
file_handle.write(' Free stack:\n')
for frame in block_info['free_stack']:
file_handle.write(' %s\n' % frame)
file_handle.write(' Block content:\n')
for line in block_info['block_content']:
file_handle.write(' %s\n' % line)
file_handle.write('\n\n%s\n' % _ERROR_HELP_URL)
_USAGE = """\
%prog [options] <minidumps>
Symbolizes a list of minidumps that has been generated by SyzyASan. For each of
them this prints the crash, alloc and free stack traces and gives more
information about the crash.
"""
def _ParseArguments():
"""Parse the command line arguments.
Returns:
The options on the command line and the list of minidumps to process.
"""
parser = optparse.OptionParser(usage=_USAGE)
parser.add_option('--cdb-path', help='(Optional) The path to cdb.exe.')
parser.add_option('--pdb-path',
help='(Optional) The path to the folder containing the'
' PDBs.')
(opts, args) = parser.parse_args()
if not opts.cdb_path:
for path in _DEFAULT_CDB_PATHS:
if os.path.isfile(path):
opts.cdb_path = path
break
if not opts.cdb_path:
parser.error('Unable to find cdb.exe.')
return opts, args
def main():
"""Parse arguments and do the symbolization."""
opts, minidumps = _ParseArguments()
for minidump in minidumps:
report = ProcessMinidump(minidump, opts.cdb_path, opts.pdb_path)
if report:
print 'Report for %s' % minidump
PrintASanReport(report)
print '\n'
else:
print 'Error while processing %s' % minidump
return 0
if __name__ == '__main__':
sys.exit(main())
| |
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2012-2015 Christian Schwarz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# General imports
import copy
from numbers import Number
from math import sqrt
# imports from pycast
from pycastobject import PyCastObject
from decorators import optimized
from timeseries import MultiDimensionalTimeSeries
def sign(a, b):
"""Return a with the algebraic sign of b"""
return (b/abs(b)) * a
def pythag(a, b):
"""Computer c = (a^2 + b^2)^0.5 without destructive underflow or overflow
It solves the Pythagorean theorem a^2 + b^2 = c^2
"""
absA = abs(a)
absB = abs(b)
if absA > absB:
return absA * sqrt(1.0 + (absB / float(absA)) ** 2)
elif absB == 0.0:
return 0.0
else:
return absB * sqrt(1.0 + (absA / float(absB)) ** 2)
class Matrix(PyCastObject):
"""A Matrix instance stores all relevant data of a matrix.
It provides a number of Matrix operations, such as multiplication,
transformation and inversion.
"""
# default number of digits after decimal point which are printed
defaultStringPrecision = 3
def __init__(self, columns, rows, oneDimArray=None, rowBased=True, isOneDimArray=True):
"""Initialize the Matrix with the given number of columns and rows.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list oneDimArray: The values for the Matrix in a based
one dimensional list. Depending on the
rowBased parameter, the first n values
(n = the number of rows) represents either
the first row or the first column.
The length of oneDimArray has to be
columns * rows.
If isOneDimArray is False this should be a
two dimensonal list.
:param boolean rowBased: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False)
:param boolean isOneDimArray: Indicates whether the parameter
oneDimArray is a one dimensional array or
a two dimensional array.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- len(oneDimArray) != columns * rows
"""
if columns < 1 or rows < 1:
raise ValueError("At least one row and one column is necessary")
super(Matrix, self).__init__()
self._columns = columns
self._rows = rows
if oneDimArray is None:
self.matrix = [[0.0 for i in xrange(rows)] for j in xrange(columns)]
elif isOneDimArray:
if len(oneDimArray) != columns * rows:
raise ValueError("""Size of array does not fit in Matrix
with %d rows and %d columns""" % (rows, columns))
if rowBased:
self.matrix = []
for j in xrange(columns):
self.matrix.append([])
for i in xrange(rows):
self.matrix[j].append(oneDimArray[i * columns + j])
else:
self.matrix = [[oneDimArray[j * rows + i] for i in xrange(rows)] for j in xrange(columns)]
else:
self._initialize_with_array(oneDimArray, rowBased)
self._stringPrecision = Matrix.defaultStringPrecision
def __str__(self):
"""Return a String representation of the :py:obj:`self`
The number of digits after the decimal point can be specified using
:py:meth:`self.set_str_precision` """
rep = "%d x %d Matrix\n" % (self.get_height(), self.get_width())
# get value with the most digits before the decimal point.
max_val = max(max(abs(min(row)), max(row)) for row in self.matrix)
# set width for each entry.
# places before decimal place, places after decimal place +
# decimal point, sign and one empty space.
width = len(str(int(max_val))) + self._stringPrecision + 3
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
val = float(self.get_value(col, row))
rep += "{num: {width}.{prec}f}".format(num=val, width=width, prec=self._stringPrecision)
rep += "\n"
return rep
def __eq__(self, otherMatrix):
"""Return if :py:obj:`self` and the other Matrix are equal
Matrices are equal to each other if:
- the values are equal at all positions.
:return: :py:const:`True` if Matrix objects are equal,
:py:const:`False` mulotherwise.
:rtype: boolean
"""
if self.matrix != otherMatrix.matrix:
return False
return True
def __ne__(self, otherMatrix):
"""Return if :py:obj:`self` and the other Matrix are not equal"""
return not self == otherMatrix
def _initialize_with_array(self, data, rowBased=True):
"""Set the matrix values from a two dimensional list."""
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError("Size of Matrix does not match")
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError("Size of Matrix does not match")
self.matrix[col].append(data[row][col])
else:
if len(data) != self._columns:
raise ValueError("Size of Matrix does not match")
for col in data:
if len(col) != self._rows:
raise ValueError("Size of Matrix does not match")
self.matrix = copy.deepcopy(data)
@classmethod
def from_timeseries(cls, timeSeries):
"""Create a new Matrix instance from a TimeSeries or MultiDimensionalTimeSeries
:param TimeSeries timeSeries: The TimeSeries, which should be used to
create a new Matrix.
:return: A Matrix with the values of the timeSeries. Each row of
the Matrix represents one entry of the timeSeries.
The time of an entry is ignored in the matrix.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError`, if the timeSeries is empty.
"""
width = 1
if isinstance(timeSeries, MultiDimensionalTimeSeries):
width = timeSeries.dimension_count()
matrixData = [[] for dummy in xrange(width)]
for entry in timeSeries:
for col in xrange(1, len(entry)):
matrixData[col - 1].append(entry[col])
if not matrixData[0]:
raise ValueError("Cannot create Matrix from empty Timeseries")
mtrx = Matrix.from_two_dim_array(len(matrixData), len(matrixData[0]), matrixData)
# mtrx.initialize(matrixData, rowBased=False)
return mtrx
@classmethod
def from_two_dim_array(cls, cols, rows, twoDimArray):
"""Create a new Matrix instance from a two dimensional array.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list twoDimArray: A two dimensional column based array
with the values of the matrix.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- the size of the parameter does not match with the size of
the Matrix.
"""
return Matrix(cols, rows, twoDimArray, rowBased=False, isOneDimArray=False)
def initialize(self, datalist, rowBased=True):
"""Initialize :py:obj:`self` with the values stored in the two dimensional list.
:param list datalist: A list representing the matrix rows
containing lists representing the columns for each row.
The values in the List must be numeric
:param boolean rowBased: Indicates wether the datalist is row or
column based. Has to be True if datalist[i] is the i'th row,
or False if datalist[i] is the i'th column
:raise: Raises an :py:exc:`ValueError` if the size of the parameter
does not match with the size of the Matrix.
:note: The values in the list are not checked for the correct type.
"""
self._initialize_with_array(datalist, rowBased)
def to_multi_dim_timeseries(self):
"""Return a TimeSeries with the values of :py:obj:`self`
The index of the row is used for the timestamp
:return: Return a new MultiDimensionalTimeSeries with the values
of the Matrix
:rtype: MultiDimensionalTimeSeries
"""
ts = MultiDimensionalTimeSeries(dimensions=self.get_width())
for row in xrange(self.get_height()):
newEntry = []
for col in xrange(self.get_width()):
newEntry.append(self.get_value(col, row))
ts.add_entry(row, newEntry)
return ts
def get_array(self, rowBased=True):
"""Return a two dimensional list with the values of the :py:obj:`self`.
:param boolean rowBased: Indicates wether the returned list should be
row or column based. Has to be True if list[i] should be the i'th
row, False if list[i] should be the i'th column.
:return: Returns a list representing the matrix rows
containing lists representing the columns for each row.
:rtype: list
"""
if rowBased:
array = []
for row in xrange(self._rows):
newRow = []
for col in xrange(self._columns):
newRow.append(self.get_value(col, row))
array.append(newRow)
return array
return copy.deepcopy(self.matrix)
def get_matrix_from_list(self, rows, columns, matrix_list, rowBased=True):
"""Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False).
"""
resultMatrix = Matrix(columns, rows, matrix_list, rowBased)
return resultMatrix
def set_value(self, column, row, value):
"""Set the value of the Matrix at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:param numeric value: The new value at the given column/row
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange.
"""
self.matrix[column][row] = value
def get_value(self, column, row):
"""Return the value of :py:obj:`self` at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange.
"""
return self.matrix[column][row]
def get_height(self):
"Return the number of rows of the Matrix"
return self._rows
def get_width(self):
"""Return the number of columns of the Matrix"""
return self._columns
def set_string_precision(self, precision):
"""Set the number of digits after the decimal point used to print the Matrix
:param integer precision: The number of digits to which the values
should be rounded when the Matrix is printed.
:raise: Raises an :py:exc:`ValueError` if precision is negative.
"""
if precision < 0:
raise ValueError("precision cannot be negative")
self._stringPrecision = precision
def invers(self):
"""Return the invers matrix, if it can be calculated
:return: Returns a new Matrix containing the invers
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if the matrix is not inversible
:note: Only a squared matrix with a determinant != 0 can be inverted.
:todo: Reduce amount of create and copy operations
"""
if self._columns != self._rows:
raise ValueError("A square matrix is needed")
mArray = self.get_array(False)
appList = [0] * self._columns
# add identity matrix to array in order to use gauss jordan algorithm
for col in xrange(self._columns):
mArray.append(appList[:])
mArray[self._columns + col][col] = 1
# create new Matrix and execute gass jordan algorithm
exMatrix = Matrix.from_two_dim_array(2 * self._columns, self._rows, mArray)
gjResult = exMatrix.gauss_jordan()
# remove identity matrix from left side
# TODO Implement slicing directly for Matrix
gjResult.matrix = gjResult.matrix[self._columns:]
gjResult._columns = len(gjResult.matrix)
return gjResult
def __copy__(self):
"""Return a new clone of the Matrix
:return: Returns a Matrix containing the same data and
configuration as self.
It does not copy super classes, but the
optimization status (True/False) is copied
:rtype: Matrix
"""
mtrx = Matrix.from_two_dim_array(self._columns, self._rows, self.matrix)
# copy of immmutable Boolean.
mtrx.optimizationEnabled = self.optimizationEnabled
return mtrx
def __mul__(self, other):
"""Return the result of the matrixmultiplication or a multiple of the matrix
:param Matrix or Number other: The matrix, which should be multiplied.
:return: Returns a new Matrix with the result of the multiplication
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if
- the number of columns of the Matrix does not match witch
- the number of rows of the given matrix.
:raise: Raises an :py:exc:`TypeError` if the input parameter is not
a Matrix or a number
"""
if isinstance(other, Matrix):
return self.matrix_multiplication(other)
elif isinstance(other, Number):
return self.multiply(other)
else:
raise TypeError("Can't multiply Matrix with type %s" % type(other).__name__)
def __rmul__(self, other):
"""Return multiple of Matrix
:return: Returns a
:raise: Raises an :py:exc:`ValueError` if the input parameter is not a number
"""
if isinstance(other, Number):
return self.multiply(other)
else:
raise TypeError("Can't multiply Matrix with type %s" % type(other).__name__)
def is_matrix_mult_possible(self, matrix):
"""Return True if :py:obj:`self` can be multiplied with the other matrix, False otherwise"""
if self._columns != matrix.get_height():
return False
return True
@optimized
def matrix_multiplication(self, matrix):
"""Multiply :py:obj:`self` with the given matrix and return result matrix.
param Matrix matrix: The matrix, which should be multiplied.
:return: Returns a new Matrix with the result of the multiplication
:rtype: Matrix
:note: Make sure, that the matrices can be multiplied.
The number of columns of the Matrix instance must match with
the number of rows of the Matrix given as parameter.
Use is_matrix_mult_possible(matrix) to test.
"""
resultMatrix = Matrix(matrix.get_width(), self.get_height())
for r_row in xrange(self._rows):
for r_col in xrange(matrix.get_width()):
#blockwise matrix multiplication hack
if isinstance(self.get_array()[0][0], Matrix):
blocksize = self.get_array()[0][0].get_width()
valueT = Matrix(blocksize, blocksize)
else:
valueT = 0
for column in xrange(matrix.get_height()):
valueT += self.get_value(column, r_row) * matrix.get_value(r_col, column)
resultMatrix.set_value(r_col, r_row, valueT)
return resultMatrix
def matrix_multiplication_blockwise(self, matrix, blocksize):
"""
http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication
"""
#Create the blockwise version of self and matrix
selfBlockwise = self.matrix_to_blockmatrix(blocksize)
matrixBlockwise = matrix.matrix_to_blockmatrix(blocksize)
return (selfBlockwise * matrixBlockwise).flatten()
def flatten(self):
"""
If the current Matrix consists of Blockmatrixes as elementes method
flattens the Matrix into one Matrix only consisting of the 2nd level
elements
[[[1 2] [[3 4] to [[1 2 3 4]
[5 6]] [7 8]]] [5 6 7 8]]
"""
blocksize = self.get_array()[0][0].get_width()
width = self.get_width() * blocksize
columnsNew = [[] for dummy in xrange(width)]
for row in self.get_array():
index = 0
for submatrix in row:
for column in submatrix.get_array(False):
columnsNew[index] += column
index += 1
columnsFlat = sum(columnsNew, [])
return Matrix(width, len(columnsNew[0]), columnsFlat, rowBased=False)
def matrix_to_blockmatrix(self, blocksize):
"""
turns an n*m Matrix into a (n/blocksize)*(m/blocksize matrix).
Each element is another blocksize*blocksize matrix.
"""
if self.get_width() % blocksize or self.get_height() % blocksize:
raise ValueError("Number of rows and columns have to be evenly dividable by blocksize")
selfBlocks = []
for columnIndex in range(0, self.get_width() - 1, blocksize):
for rowIndex in range(0, self.get_height() - 1, blocksize):
currentBlock = []
for blockRows in self.get_array(False)[columnIndex:columnIndex + blocksize]:
currentBlock += blockRows[rowIndex:rowIndex + blocksize]
selfBlocks.append(Matrix(blocksize, blocksize, currentBlock, rowBased=False))
return Matrix(self.get_width() / blocksize, self.get_height() / blocksize, selfBlocks, rowBased=False)
# def matrix_multiplication_scipy(self, matrix):
# a = np.matrix(self.get_array())
# b = np.matrix(matrix.get_array())
# c = (a*b)
# c_list = c.tolist()
# result = Matrix(len(c_list[0]), len(c_list), None)
# result.initialize(c_list)
# return result
def multiply(self, multiplicator):
"""Return a new Matrix with a multiple.
:param Number multiplicator: The number to calculate the multiple
:return: The Matrix with the the multiple.
:rtype: Matrix
"""
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) * multiplicator)
return result
def transform(self):
"""Return a new transformed matrix.
:return: Returns a new transformed Matrix
:rtype: Matrix
"""
t_matrix = Matrix(self._rows, self._columns)
for col_i, col in enumerate(self.matrix):
for row_i, entry in enumerate(col):
t_matrix.set_value(row_i, col_i, entry)
return t_matrix
def gauss_jordan(self):
"""Reduce :py:obj:`self` to row echelon form.
:return: Returns :py:obj:`self` in row echelon form for convenience.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if:
- the matrix rows < columns
- the matrix is not invertible
In this case :py:obj:`self` is not changed.
"""
mArray = self.get_array(rowBased=False)
width = self.get_width()
height = self.get_height()
if not height < width:
raise ValueError("""Not enough rows""")
# Start with complete matrix and remove in each iteration
# the first row and the first column
for offset in xrange(height):
# Switch lines, if current first value is 0
if mArray[offset][offset] == 0:
for i in xrange(offset + 1, height):
if mArray[offset][i] != 0:
tmp = []
for j in xrange(offset, width):
tmp.append(mArray[j][offset])
# tmp = mArray[offset][offset:]
for j in xrange(offset, width):
mArray[j][offset] = mArray[j][i]
mArray[j][i] = tmp[j]
# mArray[offset][offset:] = mArray[i][offset:]
# mArray[i] = tmp
break
currentRow = [mArray[j][offset] for j in xrange(offset, width)]
devider = float(currentRow[0])
# If no line is found with an value != 0
# the matrix is not invertible
if devider == 0:
raise ValueError("Matrix is not invertible")
transformedRow = []
# Devide current row by first element of current row
for value in currentRow:
transformedRow.append(value / devider)
# put transformed row back into matrix
for j in xrange(offset, width):
mArray[j][offset] = transformedRow[j - offset]
# subtract multiples of the current row, from all remaining rows
# in order to become a 0 at the current first column
for i in xrange(offset + 1, height):
multi = mArray[offset][i]
for j in xrange(offset, width):
mArray[j][i] = mArray[j][i] - mArray[j][offset] * multi
for i in xrange(1, height):
# subtract multiples of the i-the row from all above rows
for j in xrange(0, i):
multi = mArray[i][j]
for col in xrange(i, width):
mArray[col][j] = mArray[col][j] - mArray[col][i] * multi
self.matrix = mArray
return self
def __add__(self, matrix):
"""Return a new Matrix instance with the result of the addition
:param Matrix matrix: The matrix, which should be added to the instance
:return: A new Matrix with the same size with the result of the addition
:rtype: Matrix
:raise: Raises a :py:exc:`ValueError` if the size of the instance does
not match with the size of the parameter matrix
"""
if self.get_height() != matrix.get_height() or self.get_width() != matrix.get_width():
raise ValueError("Size of matrix does not match")
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) + matrix.get_value(col, row))
return result
def __sub__(self, matrix):
"""Return a new Matrix instance with the result of the subtraction
:param Matrix matrix: The matrix, which should be subtracted from the instance
:return: A new Matrix with the same size with the result of the subtraction
:rtype: Matrix
:raise: Raises a :py:exc:`ValueError` if the size of the instance does
not match with the size of the parameter matrix
"""
if self.get_height() != matrix.get_height() or self.get_width() != matrix.get_width():
raise ValueError("Size of matrix does not match")
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) - matrix.get_value(col, row))
return result
def __div__(self, divider):
"""Return a new Matrix, where all values are divided by the divider
:param integer divider: The divider to divide all values of the matrix
:return: A new Matrix, where all values are divided by the divider
:rtype: Matrix
"""
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) / float(divider))
return result
def householder(self):
"""Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured
"""
# copy instance to transform it to bidiagonal form.
bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix)
# build identity matrix, which is used to calculate householder transformations
identityMatrixRow = Matrix(self.get_height(), self.get_height())
for i in xrange(self.get_height()):
identityMatrixRow.set_value(i, i, 1.0)
identityMatrixCol = Matrix(self.get_width(), self.get_width())
for i in xrange(self.get_width()):
identityMatrixCol.set_value(i, i, 1.0)
# zero out the k'th column and row
for k in xrange(self.get_width() - 1):
# vector with the values of the k'th column (first k-1 rows are 0)
x = Vector(self.get_height())
y = Vector(self.get_height())
if k > 0:
x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
s = 0.0
for i in xrange(k, self.get_height()):
val = bidiagMatrix.get_value(k, i)
x.set_value(0, i, val)
s += (val ** 2)
s = sqrt(s)
# y must have same length as x
y.set_value(0, k, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
# calculate w = (x-y)/(|x-y|)
w = tmp / norm
# uk is the k'th householder matrix for the column
uk = identityMatrixRow - 2 * (w * w.transform())
bidiagMatrix = uk * bidiagMatrix
if k == 0:
# set u in first iteration.
u = uk
else:
u = u * uk
# zero out the the row
if k < self.get_width() - 2:
x = Vector(self.get_width())
y = Vector(self.get_width())
x.set_value(0, k, bidiagMatrix.get_value(k, k))
y.set_value(0, k, bidiagMatrix.get_value(k, k))
s = 0.0
for i in xrange(k + 1, bidiagMatrix.get_width()):
val = bidiagMatrix.get_value(i, k)
x.set_value(0, i, val)
s += (val ** 2)
# length of vector x ignoring the k'th value
s = sqrt(s)
# y must have same length as x, since k'th value is equal
# set k+1 value to s
y.set_value(0, k + 1, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
w = tmp / norm
# vk is the k'th householder matrix for the row
vk = identityMatrixCol - (2 * (w * w.transform()))
bidiagMatrix = bidiagMatrix * vk
if k == 0:
# set v in first iteration
v = vk
else:
v = vk * v
return (u, bidiagMatrix, v)
def svd(self, maxIteration=50):
"""Return the singular value decomposition of the Matrix instance
:param integer maxIteration: The maximmum number of iterations,
which are executed in the qr decomposition
:return: A tuple with Matrices u, sigma, v with
so that u * sigma * v^T = self
:rtype: tuple
:raise: Raises a :py:exc:`ValueError` if the Matrix object has
more columns than rows
:note: Translation of the FORTRAN implementation if the SVD given
in the NUMERICAL RECIPES IN FORTRAN 77. THE ART OF SCIENTIFIC
COMPUTING.
The algorithm is not yet numerical stable, so the results may
not be in all cases as expected.
"""
if(self.get_width() > self.get_height()):
raise ValueError("Matrix has more columns than rows.")
eps = 1.e-15
tol = 1.e-64 / eps
a = self.get_array(False)
m = len(a[0])
n = len(a)
v = []
for k in xrange(n):
v.append([0.0] * n)
# output diagonal
w = [0.0] * n
# upper diagonal (for bidiagonal form)
rv1 = [0.0] * n
# Householder Reduction to bidiagional form
g = 0.0
anorm = 0.0
for i in xrange(n):
l = i + 1
rv1[i] = g
s = 0.0
# calculate length of relevant row vector in matrix (part of i'th column)
s = sum(a[i][k] ** 2 for k in xrange(i, m))
if s <= tol:
g = 0.0
else:
f = a[i][i]
# square root to get actual length of vector
g = sqrt(s) if f < 0 else -sqrt(s)
h = f * g - s
a[i][i] = f - g
for j in xrange(l, n):
s = sum(a[i][k] * a[j][k] for k in xrange(i, m))
f = s / h
for k in xrange(i, m):
a[j][k] += (f * a[i][k])
w[i] = g
# calculate length of relevant column vector in matrix (part of i'th row)
s = 0.0
s = sum(a[k][i] ** 2 for k in xrange(l, n))
if s <= tol:
g = 0.0
else:
f = a[l][i]
g = sqrt(s) if f < 0 else -sqrt(s)
h = f * g - s
a[l][i] = f - g
for k in xrange(l, n):
rv1[k] = a[k][i] / h
for j in xrange(l, m):
s = sum(a[k][j] * a[k][i] for k in xrange(l, n))
for k in xrange(l, n):
a[k][j] += (s * rv1[k])
anorm = max(anorm, (abs(w[i]) + abs(rv1[i])))
# Accumulation of right hand transformations
for i in xrange(n - 1, -1, -1):
if g != 0.0:
for j in xrange(l, n):
v[i][j] = a[j][i] / (g * a[i + 1][i])
for j in xrange(l, n):
s = sum(a[k][i] * v[j][k] for k in xrange(l, n))
for k in xrange(l, n):
v[j][k] += (s * v[i][k])
for j in xrange(l, n):
v[j][i] = 0.0
v[i][j] = 0.0
v[i][i] = 1.0
g = rv1[i]
l = i
# Accumulation of left hand transformations
for i in xrange(n - 1, -1, -1):
l = i + 1
g = w[i]
for j in xrange(l, n):
a[j][i] = 0.0
if g != 0.0:
for j in xrange(l, n):
s = sum(a[i][k] * a[j][k] for k in xrange(l, m))
f = s / (a[i][i] * g)
for k in xrange(i, m):
a[j][k] += f * a[i][k]
for j in xrange(i, m):
a[i][j] /= g
else:
for j in xrange(i, m):
a[i][j] = 0.0
a[i][i] += 1.0
eps *= anorm
# Diagonalization of the bidiagonal form.
# Loop over singular values and over allowed iterations
for k in xrange(n - 1, -1, -1):
for dummy in xrange(maxIteration):
for l in xrange(k, -1, -1):
convergenceTest = False
if abs(rv1[l]) <= eps:
convergenceTest = True
break
if abs(w[l - 1]) <= eps:
# convergenceTest = False (already default)
break
if not convergenceTest:
c = 0.0
s = 1.0
nm = l - 1
for i in xrange(l, k + 1):
f = s * rv1[i]
rv1[i] = c * rv1[i]
if abs(f) <= eps:
break
g = w[i]
h = pythag(f, g)
w[i] = h
c = g / h
s = -f / h
for j in xrange(m):
y = a[nm][j]
z = a[i][j]
a[nm][j] = (y * c) + (z * s)
a[i][j] = -(y * s) + (z * c)
z = w[k]
if l == k:
# convergence
if z < 0.0:
w[k] = -z
for j in xrange(n):
v[k][j] = -v[k][j]
break
x = w[l]
y = w[k - 1]
g = rv1[k - 1]
h = rv1[k]
f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y)
g = pythag(f, 1.0)
f = ((x - z) * (x + z) + h * ((y / (f + sign(g, f))) - h)) / x
c = 1.0
s = 1.0
for i in xrange(l + 1, k + 1):
g = rv1[i]
y = w[i]
h = s * g
g = c * g
z = pythag(f, h)
rv1[i - 1] = z
c = f / z
s = h / z
f = (x * c) + (g * s)
g = -x * s + g * c
h = y * s
y = y * c
for jj in xrange(n):
x = v[i - 1][jj]
z = v[i][jj]
v[i - 1][jj] = (x * c) + (z * s)
v[i][jj] = -(x * s) + (z * c)
z = pythag(f, h)
w[i - 1] = z
if z != 0.0:
z = 1.0 / z
c = f * z
s = h * z
f = (c * g) + (s * y)
x = -s * g + c * y
for jj in xrange(m):
y = a[i - 1][jj]
z = a[i][jj]
a[i - 1][jj] = (y * c) + (z * s)
a[i][jj] = -(y * s) + (z * c)
rv1[l] = 0.0
rv1[k] = f
w[k] = x
# Build Matrix instances for the result
uM = Matrix.from_two_dim_array(len(a), len(a[0]), a)
diagMatrix = Matrix(len(w), len(w))
for i in xrange(len(w)):
diagMatrix.set_value(i, i, w[i])
vM = Matrix.from_two_dim_array(len(v), len(v[0]), v)
return uM, diagMatrix, vM
def pseudoinverse(self):
"""Return the pseudoinverse (Moore-Penrose-Inverse).
The singular value decomposition is used to calculate the pseudoinverse.
"""
transform = False
if self.get_width() > self.get_height():
transform = True
u, sigma, v = self.transform().svd()
else:
u, sigma, v = self.svd()
# calculate inverse of sigma
for i in xrange(min(sigma.get_height(), sigma.get_width())):
val = sigma.get_value(i, i)
# divide only if the value is not 0 or close to zero (rounding errors)
eps = 1.e-15
if eps < val or val < -eps:
sigma.set_value(i, i, 1 / val)
if transform:
return (v * sigma * u.transform()).transform()
else:
return v * sigma * u.transform()
class Vector(Matrix):
"""A vector instance is a Matrix, which only has 1 column"""
def __init__(self, rows):
"""Initiliate a vector with the given number of rows.
All values of this vector are 0.0"""
super(Vector, self).__init__(1, rows)
@classmethod
def initialize_from_matrix(cls, matrix, column):
"""Create vector from matrix
:param Matrix matrix: The Matrix, which should be used to create the vector.
:param integer column: The column of the matrix, which should be used
to create the new vector.
:raise: Raises an :py:exc:`IndexError` if the matrix does not have the specified column.
"""
vec = Vector(matrix.get_height())
for row in xrange(matrix.get_height()):
vec.set_value(0, row, matrix.get_value(column, row))
return vec
def norm(self):
"""Calculates the norm (length) of the vector
:return: Return the length of the vector
:rtype: float
"""
return sqrt(sum(i[0] ** 2 for i in self.get_array()))
def unify(self):
"""Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector
"""
length = float(self.norm())
for row in xrange(self.get_height()):
self.set_value(0, row, self.get_value(0, row) / length)
return self
| |
from __future__ import absolute_import
from __future__ import unicode_literals
from functools import reduce
import six
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_SERVICE
class Container(object):
"""
Represents a Docker container, constructed from the output of
GET /containers/:id:/json.
"""
def __init__(self, client, dictionary, has_been_inspected=False):
self.client = client
self.dictionary = dictionary
self.has_been_inspected = has_been_inspected
@classmethod
def from_ps(cls, client, dictionary, **kwargs):
"""
Construct a container object from the output of GET /containers/json.
"""
name = get_container_name(dictionary)
if name is None:
return None
new_dictionary = {
'Id': dictionary['Id'],
'Image': dictionary['Image'],
'Name': '/' + name,
}
return cls(client, new_dictionary, **kwargs)
@classmethod
def from_id(cls, client, id):
return cls(client, client.inspect_container(id))
@classmethod
def create(cls, client, **options):
response = client.create_container(**options)
return cls.from_id(client, response['Id'])
@property
def id(self):
return self.dictionary['Id']
@property
def image(self):
return self.dictionary['Image']
@property
def image_config(self):
return self.client.inspect_image(self.image)
@property
def short_id(self):
return self.id[:10]
@property
def name(self):
return self.dictionary['Name'][1:]
@property
def service(self):
return self.labels.get(LABEL_SERVICE)
@property
def name_without_project(self):
return '{0}_{1}'.format(self.service, self.number)
@property
def number(self):
number = self.labels.get(LABEL_CONTAINER_NUMBER)
if not number:
raise ValueError("Container {0} does not have a {1} label".format(
self.short_id, LABEL_CONTAINER_NUMBER))
return int(number)
@property
def ports(self):
self.inspect_if_not_inspected()
return self.get('NetworkSettings.Ports') or {}
@property
def human_readable_ports(self):
def format_port(private, public):
if not public:
return private
return '{HostIp}:{HostPort}->{private}'.format(
private=private, **public[0])
return ', '.join(format_port(*item)
for item in sorted(six.iteritems(self.ports)))
@property
def labels(self):
return self.get('Config.Labels') or {}
@property
def log_config(self):
return self.get('HostConfig.LogConfig') or None
@property
def human_readable_state(self):
if self.is_paused:
return 'Paused'
if self.is_running:
return 'Ghost' if self.get('State.Ghost') else 'Up'
else:
return 'Exit %s' % self.get('State.ExitCode')
@property
def human_readable_command(self):
entrypoint = self.get('Config.Entrypoint') or []
cmd = self.get('Config.Cmd') or []
return ' '.join(entrypoint + cmd)
@property
def environment(self):
return dict(var.split("=", 1) for var in self.get('Config.Env') or [])
@property
def is_running(self):
return self.get('State.Running')
@property
def is_paused(self):
return self.get('State.Paused')
def get(self, key):
"""Return a value from the container or None if the value is not set.
:param key: a string using dotted notation for nested dictionary
lookups
"""
self.inspect_if_not_inspected()
def get_value(dictionary, key):
return (dictionary or {}).get(key)
return reduce(get_value, key.split('.'), self.dictionary)
def get_local_port(self, port, protocol='tcp'):
port = self.ports.get("%s/%s" % (port, protocol))
return "{HostIp}:{HostPort}".format(**port[0]) if port else None
def start(self, **options):
return self.client.start(self.id, **options)
def stop(self, **options):
return self.client.stop(self.id, **options)
def pause(self, **options):
return self.client.pause(self.id, **options)
def unpause(self, **options):
return self.client.unpause(self.id, **options)
def kill(self, **options):
return self.client.kill(self.id, **options)
def restart(self, **options):
return self.client.restart(self.id, **options)
def remove(self, **options):
return self.client.remove_container(self.id, **options)
def inspect_if_not_inspected(self):
if not self.has_been_inspected:
self.inspect()
def wait(self):
return self.client.wait(self.id)
def logs(self, *args, **kwargs):
return self.client.logs(self.id, *args, **kwargs)
def inspect(self):
self.dictionary = self.client.inspect_container(self.id)
self.has_been_inspected = True
return self.dictionary
# TODO: only used by tests, move to test module
def links(self):
links = []
for container in self.client.containers():
for name in container['Names']:
bits = name.split('/')
if len(bits) > 2 and bits[1] == self.name:
links.append(bits[2])
return links
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
def attach_socket(self, **kwargs):
return self.client.attach_socket(self.id, **kwargs)
def __repr__(self):
return '<Container: %s (%s)>' % (self.name, self.id[:6])
def __eq__(self, other):
if type(self) != type(other):
return False
return self.id == other.id
def __hash__(self):
return self.id.__hash__()
def get_container_name(container):
if not container.get('Name') and not container.get('Names'):
return None
# inspect
if 'Name' in container:
return container['Name']
# ps
shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
return shortest_name.split('/')[-1]
| |
"""Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| |
#!/usr/local/bin/python
import sys, os, getopt, signal, time, re, sqlite3
import distutils.core
import xml.etree.cElementTree as ET
from bs4 import BeautifulSoup, NavigableString, Tag
# The categories that can be found in the ClassHierarchy/index.html file.
maincategories = {
"Class": [
re.compile("^.*class .+$", re.MULTILINE),
re.compile("^.*UCLASS.*$", re.MULTILINE)],
"Struct": [
re.compile("^.*struct .+$", re.MULTILINE),
re.compile("^.*USTRUCT.*$", re.MULTILINE)],
"Union": [
re.compile("^.*union .+$", re.MULTILINE)]
}
class detail:
def __init__(self, htmlname, indexname, regexps):
self.htmlname = htmlname
self.indexname = indexname
self.regexps = regexps
# Additional detail categories, that can be found in each of the referenced files.
detailcategories = [
detail("constructor", "Constructor", []),
detail("constants", "Constant", []),
detail("variables", "Variable", []),
detail("methods", "Method", []),
detail("operators", "Operator", [])
]
htmlroot = None
docsetpath = None
docpath = None
dbpath = None
db = None
cur = None
count = 0
verbose = 0
def usage():
print 'Usage: ue4docset.py [options] <htmlroot> <docsetpath>\n'
print ('\tParses the extracted chm documentation at ' + '\033[4m' + 'htmlroot' + '\033[0m' +
' and generates a docset at ' + '\033[4m' + 'docsetpath' + '\033[0m' + '.')
print '\nOptions:'
print '\t-i\tDocumentation identifier.'
print '\t-n\tDocumentation display name.'
print '\t-s\tDocumentation version.'
print '\t-f\tFallback URL.'
print '\t-v\tVerbose.'
print '\nExample:'
print '\tue4docset.py -n "Unreal Engine" -s "4.0.2" ~/Desktop/HTML ~/Desktop/UE4.docset'
def signal_handler(signal, frame):
print('\nAborted by user.')
sys.exit(2)
signal.signal(signal.SIGINT, signal_handler)
# Generate an Info.plist file from the passed, optional parameters.
def generate_plist(opts):
print "Generating Info.plist"
identifier = "com.epic.unrealengine4"
name = "UE4"
fallbackURL = "https://docs.unrealengine.com/latest/INT/"
version = None
for o, a in opts:
if o == "-i":
identifier = a
elif o == "-n":
name = a
elif o == "-s":
version = a
elif o == "-f":
fallbackURL = a
plistpath = os.path.join(docsetpath, "Contents/Info.plist")
plist = ET.Element("plist")
plist.set("version", "1.0")
root = ET.SubElement(plist, "dict")
key = ET.SubElement(root, "key")
key.text = "CFBundleIdentifier"
string = ET.SubElement(root, "string")
string.text = identifier
key = ET.SubElement(root, "key")
key.text = "CFBundleName"
string = ET.SubElement(root, "string")
string.text = name
if not version is None:
key = ET.SubElement(root, "key")
key.text = "CFBundleShortVersionString"
string = ET.SubElement(root, "string")
string.text = version
key = ET.SubElement(root, "key")
key.text = "CFBundleVersion"
string = ET.SubElement(root, "string")
string.text = version
key = ET.SubElement(root, "key")
key.text = "DashDocSetFamily"
string = ET.SubElement(root, "string")
string.text = "appledoc"
key = ET.SubElement(root, "key")
key.text = "DocSetPlatformFamily"
string = ET.SubElement(root, "string")
string.text = name
key = ET.SubElement(root, "key")
key.text = "DocSetFallbackURL"
string = ET.SubElement(root, "string")
string.text = fallbackURL
key = ET.SubElement(root, "key")
key.text = "dashIndexFilePath"
string = ET.SubElement(root, "string")
string.text = "INT/API/index.html"
key = ET.SubElement(root, "key")
key.text = "isDashDocset"
value = ET.SubElement(root, "true")
tree = ET.ElementTree(plist)
with open(plistpath, "w") as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write('<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">')
tree.write(f, 'utf-8')
# Try to find out which category (class, struct..) a doc page belongs to, by parsing its the syntax.
def guess_category(syntax):
for category, regexps in maincategories.iteritems():
for regexp in regexps:
if regexp.search(syntax):
return category
return None
# Insert an entry into the documentation index database.
def insert_index(name, category, path):
global verbose
if not os.path.isfile(os.path.join(docpath, path)):
if verbose:
print "Documenation at path " + path + " does not exist. Skipping."
return
global count
if verbose:
print (str(count) + ": Found " + category +
(" " * (15 - len(category))) + name +
(" " * (40 - len(name))) + " at " + path)
cur.execute("INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES (?,?,?)",
(name, category, path))
count += 1
# Find all the additional categories of a specific type (methods, variables, etc.) in a class doc page.
def parse_file_detail(abspath, soup, detail):
try:
for namecell in soup.find(id=detail.htmlname).find_all(class_="name-cell"):
if namecell.a:
name = namecell.a.text
relpath = namecell.a['href']
thepath = os.path.relpath(os.path.normpath(os.path.join(os.path.dirname(abspath), relpath)), htmlroot)
insert_index(name, detail.indexname, thepath)
except Exception: pass
# Go through the doc page of a class/struct and parse all the methods, variables etc..
def parse_file_details(abspath, soup):
for detail in detailcategories:
parse_file_detail(abspath, soup, detail)
# Parse a class/struct doc page. Find its name in the H1 tag, guess its category based on the syntax.
def parse_file(abspath):
try:
page = open(abspath)
soup = BeautifulSoup(page);
name = soup.find(id="H1TitleId").text
cattext = soup.find(class_='simplecode_api').text
category = guess_category(cattext)
if category is not None and name is not None:
thepath = os.path.relpath(abspath, htmlroot)
insert_index(name, category, thepath)
parse_file_details(abspath, soup)
except Exception: pass
def print_progress(progress):
global verbose
if not verbose:
p = int(progress * 100)
sys.stdout.write('\r')
sys.stdout.write("[%-50s] %d%% " % ('='*(p/2), p))
sys.stdout.flush()
# Go thought all the links of an index file (ClassHierarchy/index.html) and parse all linked doc pages.
def scrape_index_file(abspath):
# print "Scraping file at " + abspath
page = open(abspath)
soup = BeautifulSoup(page)
links = soup.find_all('a')
for idx, link in enumerate(links):
print_progress(float(idx) / float(len(links)))
relpath = link['href']
if not relpath.startswith('javascript') and not relpath.startswith('http'):
foundpath = os.path.normpath(os.path.join(os.path.dirname(abspath), relpath))
parse_file(foundpath)
print_progress(1.0)
print ''
def scrape_folder(abspath):
for dirName, subdirList, fileList in os.walk(abspath):
for fileName in fileList:
parse_file(os.path.join(dirName, fileName))
def main():
global htmlroot
global docsetpath, docpath, dbpath
global db, cur
global verbose
try:
opts, args = getopt.getopt(sys.argv[1:], "vi:n:s:f:")
if len(args) < 2:
usage()
sys.exit(2)
htmlroot = args[0]
if not os.path.isdir(os.path.join(htmlroot, 'INT')):
print 'Error: Extracted CHM documentation not found. Did you specify the correct path?',
print 'It should contain a number of files with a # prefix and a folder called INT'
sys.exit(2)
docsetpath = args[1]
docsetname, docsetext = os.path.splitext(docsetpath)
if not docsetext == '.docset':
print 'Error: docsetpath argument should specify the path of the docset file.',
print 'E.g. ~/Desktop/UE4.docset'
sys.exit(2)
for o, a in opts:
if o == "-v":
verbose = 1
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
docpath = os.path.join(docsetpath, "Contents/Resources/Documents")
if not os.path.exists(docpath):
os.makedirs(docpath)
dbpath = os.path.join(docsetpath, "Contents/Resources/docSet.dsidx")
print 'Copying documentation from ' + htmlroot + ' to ' + docpath + '.'
print 'This may take a few minutes.'
distutils.dir_util.copy_tree(htmlroot, docpath)
chmroot = os.path.join(htmlroot, 'INT/API')
classlistpath = os.path.join(chmroot, 'ClassHierarchy/')
classlistindex = os.path.join(classlistpath, 'index.html')
generate_plist(opts)
db = sqlite3.connect(dbpath)
cur = db.cursor()
try: cur.execute('DROP TABLE searchIndex;')
except: pass
cur.execute('CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);')
cur.execute('CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);')
print 'Indexing documentation'
#time.sleep(3)
scrape_index_file(classlistindex)
#scrape_folder(chmroot)
db.commit()
db.close()
start_time = time.time();
main();
print "Generation took", time.time() - start_time, "seconds."
| |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import logging
from PIL import Image
import sys
import scipy as sp
from itertools import cycle
import util
import math
from nltk.tbl import template
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
BLACK_BG_THRESHOLD = 50
BLACK_BG_VAR_THRESHOLD = 255
WHITE_BG_THRESHOLD = 225
def xcut(img, x):
h, w = img.shape[:2]
img1 = img[0:h, 0:x]
img2 = img[0:h, x+1:w+1]
return [img1, img2]
def ycut(img, y):
h, w = img.shape[:2]
img1 = img[0:y, 0:w]
img2 = img[y+1:h+1, 0:w]
return [img1, img2]
def importantregion(gray_img, path=None, index=0):
sift = cv2.SIFT(1000, 3, 0.12, 12)
kp, d = sift.detectAndCompute(gray_img, None)
if (path != None):
img_kp_img = cv2.drawKeypoints(gray_img, kp, None, (255, 0, 0), 0)
cv2.imwrite(path + "\\object_features" + ("%06i" % index) + ".jpg", img_kp_img)
pointsx = []
pointsy = []
if len(kp) > 0:
for point in kp:
pointsx.append(point.pt[0])
pointsy.append(point.pt[1])
minx = np.floor(np.amin(pointsx))
miny = np.floor(np.amin(pointsy))
maxx = np.ceil(np.amax(pointsx))
maxy = np.ceil(np.amax(pointsy))
else:
minx = -1
miny = -1
maxx = -1
maxy = -1
return minx, miny, maxx, maxy
# def candidateobjects(image, siftthres=3000):
# sift = cv2.SIFT(siftthres)
# kp, d = sift.detectAndCompute(image, None)
#
# points = []
# for point in kp:
# points.append(point.pt)
#
# n_points = np.array(points)
# indices, labels = cluster.dbscan(n_points)
# n_kp = np.array(kp)
return
def fit_mask_to_img(image, mask, tlx, tly):
h, w = image.shape[:2]
fitmask = np.zeros((h,w), dtype=np.uint8)
maskh, maskw = mask.shape
fitmask[tly:tly+maskh, tlx:tlx+maskw] = mask
return fitmask
def highlight(image, mask, (r, g, b, a)=(23, 175, 251, 100)):
# Create a semi-transparent highlight layer the size of image
h, w, bgra = image.shape
layer = np.empty((h, w, 4), dtype=np.uint8)
layer[:, :, 0] = r
layer[:, :, 1] = g
layer[:, :, 2] = b
layer[:, :, 3] = a
blurmask = expandmask(mask)
layer = maskimage(layer, blurmask)
fg_mask = fgmask(image)
fgimg = alphaimage(image, fg_mask)
img = Image.fromarray(fgimg, "RGBA")
b, g, r, a = img.split()
img = Image.merge("RGBA", (r, g, b, a))
highlight = Image.fromarray(layer, "RGBA")
result = Image.alpha_composite(img, highlight)
r, g, b, a = result.split()
data = np.empty((h, w, 4), dtype=np.uint8)
data[:, :, 0] = b
data[:, :, 1] = g
data[:, :, 2] = r
data[:, :, 3] = a
return data
def removetemplate(gray_img, gray_obj, M):
rows, cols = gray_img.shape[0:2]
neg_warp_obj = cv2.warpPerspective(255 - gray_obj, M, (cols, rows))
neg_img = 255 - gray_img
negdiff = np.minimum(neg_img, cv2.absdiff(neg_img, neg_warp_obj))
diff = 255 - negdiff
# h1,w1 = neg_warp_obj.shape
# h2,w2 = neg_img.shape
# h3,w3 = diff.shape
# view = sp.zeros((max(h1, h2, h3), w1+w2+w3), sp.uint8)
# view[:h1, :w1] =neg_warp_obj
# view[:h2, w1:w1+w2] = neg_img
# view[:h3, w1+w2:w1+w2+w3] = negdiff
# cv2.namedWindow("remove template", cv2.WINDOW_NORMAL)
# cv2.imshow("remove template", view)
# cv2.waitKey(0)
return diff
def subtractlogo(frame, logo, is_black, bgcolor):
gray_logo = util.grayimage(logo)
wlogo, hlogo = gray_logo.shape[::-1]
topleft = find_object_exact_inside(frame, logo, 0.90)
frame_copy = frame.copy()
if topleft == None:
return frame_copy
else:
print 'logo detected'
tlx = topleft[0]
tly = topleft[1]
brx = tlx + wlogo
bry = tly + hlogo
if (is_black):
thres = BLACK_BG_THRESHOLD
else:
thres = WHITE_BG_THRESHOLD
logomask = fgmask(logo, is_black, thres)
logomask = expandmask(logomask,3)
logomask = fit_mask_to_img(frame_copy, logomask, tlx, tly)
# util.showimages([logomask], "logomask")
frame_copy[logomask != 0] = bgcolor
return frame_copy
def fgmask(image, is_black=True, threshold=BLACK_BG_THRESHOLD, var_threshold=255):
"""Mask background 0, forground 255"""
img2gray = util.grayimage(image)
if is_black:
"""src(x,y) <= thres becomes 0"""
ret, mask = cv2.threshold(img2gray, threshold, 255, cv2.THRESH_BINARY)
else:
"""src(x,y) > thresh becomes 0"""
ret, mask = cv2.threshold(img2gray, threshold, 255, cv2.THRESH_BINARY_INV)
return mask
def fgbbox(mask):
cols, rows = np.where(mask != 0)
if (len(cols) == 0 or len(rows) == 0 ):
return (-1, -1, -1, -1)
tlx = min(rows)
brx = max(rows)
tly = min(cols)
bry = max(cols)
return (tlx, tly, brx, bry)
def maskimage_white(image, mask):
# mask is a single channel array; mask_region is whited
maskimg = cv2.bitwise_and(image, image, mask=mask)
inv_mask = cv2.bitwise_not(mask)
maskimg = cv2.bitwise_not(maskimg, maskimg, mask=inv_mask)
return maskimg
def maskimage(image, mask):
# mask is a single channel array; mask_region is blacked
maskimg = cv2.bitwise_and(image, image, mask=mask)
return maskimg
def alphaimage(image, mask):
h, w = image.shape[:2]
result = np.empty((h, w, 4), dtype=np.uint8)
result[:, :, 0:3] = image[:,:,0:3]
# util.showimages([result], "before copying alpha")
# util.showimages([mask], "mask")
# print np.amax(mask)
# print (mask)
# util.showimages([result])
result[:, :, 3] = (mask)
# util.showimages([result], "after copying alpha")
return result
def expandmask(mask, width=10):
kernel = np.ones((width, width), np.float32) / (width * width)
blurmask = cv2.filter2D(mask, -1, kernel)
blurmask[blurmask != 0] = 255
return blurmask
def subtractobject(image, objmask, M, emptycolor=0):
rows, cols = image.shape[0:2]
warpmask = cv2.warpPerspective(objmask, M, (cols, rows))
warpmask = cv2.bitwise_not(warpmask)
kernel = np.ones((10, 10), np.float32) / 100
blurmask = cv2.filter2D(warpmask, -1, kernel)
blurmask[blurmask != 255] = 0
subimage = cv2.bitwise_and(image, image, mask=blurmask)
subimage[blurmask == 0] = emptycolor
return subimage
def find_template_ctr(frame, template, threshold = 0.8):
"""Return center of template location in side frame"""
grayframe = util.grayimage(frame)
graytemp = util.grayimage(template)
wtemp, htemp = graytemp.shape[::-1]
top_left = find_object_exact_inside(frame, template, threshold)
if (top_left == None):
return None
else:
center = (top_left[0] + wtemp / 2, top_left[1] + htemp / 2)
return center
def find_best_match_inside(img, obj):
"""obj must be inside img"""
"""This method does work, but is very slow"""
objh, objw = obj.shape[:2]
imgh, imgw = img.shape[:2]
mindiff = float("inf")
minloc = (-1, -1)
for x in range(0, (imgw-objw)+1):
for y in range(0, (imgh-objh)+1):
diff = cv2.absdiff(obj, img[y:y+objh, x:x+objw])
diff = cv2.min(diff, obj)
score = np.sum(diff)
if (score < mindiff):
mindiff = score
minloc = (x,y)
cv2.rectangle(img, (minloc[0],minloc[1]), (minloc[0]+objw, minloc[1]+objh), (255,255,255), 2)
util.showimages([obj, img], "pf.find_best_match_inside")
return minloc
def find_object_appx(img, obj, thres=-1):
sift = cv2.SURF(0)
gray_img = util.grayimage(img)
gray_obj = util.grayimage(obj)
kp1, d1 = sift.detectAndCompute(gray_obj, None)
kp2, d2 = sift.detectAndCompute(gray_img, None)
# obj_kp_img = cv2.drawKeypoints(gray_obj, kp1, None, (255, 0, 0), 0)
# img_kp_img = cv2.drawKeypoints(gray_img, kp2, None, (255, 0, 0), 0)
logging.info("Object # features: %i", len(kp1))
logging.info("Image # features: %i", len(kp2))
if d1 == None or d2 == None:
print 'No matches: no feature'
return None
bf = cv2.BFMatcher(cv2.NORM_L2, True)
matches = bf.match(d1, d2)
dist = [m.distance for m in matches]
logging.info("Number of correspondences: %i", len(matches))
if (len(matches) < 4):
print 'No matches: Not enough correspondence'
return None
match_img = drawMatches(gray_obj, gray_img, kp1, kp2, matches)
util.showimages([match_img])
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches])
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches])
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# num_in = mask.ravel().tolist().count(1)
# logging.info("# Inliers: %i", num_in)
#
# if (num_in < len(kp1)*0.5):
# print 'Warning: less than half of object features are inliers'
#
# (h1, w1) = gray_obj.shape[:2]
# (h2, w2) = gray_img.shape[:2]
# image = np.zeros((max(h1, h2), w1 + w2), np.uint8)
# image[:h1, :w1] = gray_obj
# image[:h2, w1:w1+w2] = gray_img
return M
def find_object_appx_thres(img, obj, thres=None):
"""Find approximate match using feature-match and return transformation matrix"""
gray_img = util.grayimage(img)
gray_obj = util.grayimage(obj)
"""Return 3x3 transformation matrix which transforms gray_obj to match inside gray_img
Return None if no good match"""
sift = cv2.SIFT()
kp1, d1 = sift.detectAndCompute(gray_obj, None)
kp2, d2 = sift.detectAndCompute(gray_img, None)
bf = cv2.BFMatcher(cv2.NORM_L2, True)
if d1 == None or d2 == None:
print 'no matches'
return None
logging.info("gray_obj # features: %i", len(kp1))
logging.info("gray_img # features: %i", len(kp2))
matches = bf.match(d1, d2)
dist = [m.distance for m in matches]
if (len(dist) < 0):
print "Not enough matches: ", len(dist)
return None
if (thres==None):
thres_param = 0.5
else:
thres_param = thres
thres_dist = (sum(dist) / len(dist)) * thres_param
good_matches = [m for m in matches if m.distance < thres_dist]
logging.info("good match threshold: sum(dist)/len(dist)* %f = %f", thres_param, thres_dist)
logging.info("Number of matches: %i", len(matches))
logging.info("Number of good matches: %i", len(good_matches))
if (len(good_matches) <= 4):
print 'not enough good match'
return None
good_matches = sorted(good_matches, key=lambda x:x.distance)
# match_img = drawMatches(gray_obj, gray_img, kp1, kp2, good_matches)
# util.showimages([match_img])
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches])
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches])
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.LMEDS)
if (mask.ravel().tolist().count(1) < len(good_matches) * 0.3):
logging.info("mask count %i", mask.ravel().tolist().count(1))
logging.info("no good transform")
return None
return M
if (thres<0):
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches])
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches])
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
return M
def drawKeypointClusters(img, n_kp, labels):
unique_labels = set(labels)
n_clusters_ = len(unique_labels)
colors = cycle([(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255)])
for k, col in zip(unique_labels, colors):
class_members = labels == k
img = cv2.drawKeypoints(img, n_kp[class_members], None, col, 4)
return img
def isgoodmatch(M):
if M == None:
return False
unitsquare = np.array([[0, 0], [1, 0], [1, 1], [0, 1]], dtype='float32')
unitsquare = np.array([unitsquare])
tsquare = cv2.perspectiveTransform(unitsquare, M)
tarea = cv2.contourArea(tsquare)
print 'tarea = ', tarea
if tarea < 0.5 or tarea > 5:
print "Not a good homography, scaling factor:", tarea
return False
return True
def find_object_exact_inside(img, template, threshold=0.20):
"""Return the top left corner of the rectangle that matches exact template INSIDE img"""
gray_img = util.grayimage(img)
gray_template = util.grayimage(template)
# gray_img = fgmask(img, 150, 255, True)
# gray_template = fgmask(template, 150, 255, True)
# util.showimages([gray_img, gray_template], "masked")
w, h = gray_template.shape[::-1]
# Apply template Matching
res = cv2.matchTemplate(img, template, cv2.TM_CCORR_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
"""threshold khan = 0.75, tecmath = 0.25 """
# print max_val
# img_copy = img.copy()
# cv2.rectangle(img_copy, top_left, bottom_right, 255, 2)
# util.showimages([img_copy], "max loc")
if (max_val < threshold):
logging.info("Exact match NOT found: %f", max_val)
return None
else:
logging.info("Exact match found: %f", max_val)
# img_copy = img.copy()
# cv2.rectangle(img_copy, top_left, bottom_right, 255, 2)
# util.showimages([img_copy], "location of frame inside panorama")
return top_left
def find_object_exact(fgimg, obj, threshold = 2.0):
"""Return translation matrix for exact template match, including partial overlap"""
imgh, imgw = fgimg.shape[:2]
objh, objw = obj.shape[:2]
if (len(fgimg.shape) == 3):
img = np.ones((imgh + 2 * objh, imgw + 2 * objw, 3), dtype=np.uint8) * 0
img[objh:objh + imgh, objw:objw + imgw, :] = fgimg[:, :, :3]
else:
img = np.ones((imgh + 2 * objh, imgw + 2 * objw), dtype=np.uint8) * 0 # use 255?
img[objh:objh + imgh, objw:objw + imgw] = fgimg[:, :]
# util.showimages([img, obj], "image object")
res = cv2.matchTemplate(img, obj, cv2.TM_SQDIFF)
# plt.imshow(res,cmap = 'gray')
# plt.show()
# cv2.waitKey(0)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
min_val = math.sqrt(min_val) / (objh * objw)
# if (min_val > threshold):
# logging.info("Exact match NOT found: %f", min_val)
# return None
# else:
# logging.info("Exact match found: %f", min_val)
top_left = min_loc
h, w = obj.shape[0:2]
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img, top_left, bottom_right, (0,0,255), 2)
# util.showimages([img], "object inside image")
# print 'top left', top_left
# print min_val
# cv2.imshow("obj", obj)
# cv2.imshow("img", img)
# cv2.imshow("res", res)
# cv2.waitKey(0)
return (top_left[0] - objw, top_left[1] - objh)
# M = np.identity(3, dtype=np.float32)
# M[0, 2] = top_left[0] - objw
# M[1, 2] = top_left[1] - objh
# h,w = obj.shape[0:2]
# bottom_right = (top_left[0] + w, top_left[1] + h)
# cv2.imshow("object", obj_gray)
# cv2.rectangle(fgimg_gray, top_left, bottom_right, 0, 2)
# cv2.imshow("find_object_appx exact", fgimg_gray)
# cv2.waitKey(0)
# return M
def get_newobj_and_mask(image_and_mask, objlist):
"""objlist-- obj, obj_mask"""
# show image and mask
image = image_and_mask[0]
fgmask = image_and_mask[1]
fgimg = maskimage_white(image, fgmask)
if objlist == None:
return fgimg, fgmask
for i in range(0, len(objlist)):
obj = objlist[i][0]
objmask = objlist[i][1]
if (obj == None):
continue
fgimg_gray = cv2.cvtColor(fgimg, cv2.COLOR_BGR2GRAY)
obj_gray = cv2.cvtColor(obj, cv2.COLOR_BGR2GRAY)
M0 = find_object_exact(fgimg, obj)
if M0 == None:
# print 'using feature match'
M = find_object_appx(fgimg_gray, obj_gray)
else:
# print 'using exact template match'
M = M0
if isgoodmatch(M):
fgimg = subtractobject(fgimg, objmask, M, 255) # TODO: subtract object_white
fgmask = subtractobject(fgmask, objmask, M, 0) # TODO: subtract object_black
return fgimg, fgmask
def getnewobj(image, objlist):
fg_mask = fgmask(image)
fgimg = maskimage_white(image, fg_mask)
if objlist == None or len(objlist) == 0:
return fgimg
for i in range(0, len(objlist)):
obj = objlist[i]
if (obj == None):
continue
objmask = fgmask(obj)
obj, objmask = croptofg(obj, objmask)
if (obj == None):
continue
fgimg_gray = cv2.cvtColor(fgimg, cv2.COLOR_BGR2GRAY)
obj_gray = cv2.cvtColor(obj, cv2.COLOR_BGR2GRAY)
M = find_object_exact(fgimg, obj)
# M = None
if M == None:
M = find_object_appx(fgimg_gray, obj_gray)
if isgoodmatch(M):
fgimg = subtractobject(fgimg, objmask, M, 255)
return fgimg
def croptofg(fgimg, fgmask):
if (fgimg == None or fgmask == None):
return None, None
tlx, tly, brx, bry = fgbbox(fgmask)
if (tlx == -1): # nothing new in this image
return None, None
h, w = fgimg.shape[0:2]
# tlx = int(max(0, tlx - 10))
# tly = int(max(0, tly - 10))
# brx = int(min(brx + 10, w))
# bry = int(min(bry + 10, h))
newobj = cropimage(fgimg, tlx, tly, brx, bry)
newobjmask = cropimage(fgmask, tlx, tly, brx, bry)
return newobj, newobjmask
def drawMatches(img1, img2, k1, k2, matches, maxline=100):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8)
view[:h1, :w1, 0] = img1
view[:h2, w1:, 0] = img2
view[:, :, 1] = view[:, :, 0]
view[:, :, 2] = view[:, :, 0]
numline = 0
for m in matches:
color = tuple([sp.random.randint(0, 255) for _ in xrange(3)])
cv2.line(view, (int(k1[m.queryIdx].pt[0]), int(k1[m.queryIdx].pt[1])) , (int(k2[m.trainIdx].pt[0] + w1), int(k2[m.trainIdx].pt[1])), color)
numline += 1
if (numline >= maxline):
break
return view
def whiteout(threshimg, tlx, tly, brx, bry):
"""Whiteout rectangular region"""
threshimg2 = threshimg.copy()
threshimg2[tly:bry, tlx:brx] = 255
return threshimg2
def bgfill(img, tlx, tly, brx, bry):
"""Inpaint rectangular region"""
img_mask = np.zeros(img.shape[:2], dtype=np.uint8)
img_mask[tly:bry, tlx:brx] = 255
fill_img = cv2.inpaint(img, img_mask, 3, 0)
return fill_img
def cropimage(img, tlx, tly, brx, bry):
img2 = img[tly:bry+1, tlx:brx+1]
return img2
def numfgpix_mit(gray_frame):
return (gray_frame < 200).sum()
def numfgpix_khan(gray_frame):
ret, thresimg = cv2.threshold(gray_frame, 50, 255, cv2.THRESH_BINARY)
# util.showimages([thresimg], "numfgpix_khan")
return np.count_nonzero(thresimg)
def removebg_mit(gray_frame):
dest = gray_frame.copy()
dest[gray_frame < 200] = 255
dest[gray_frame >= 200] = 0
return dest
def removebg_khan(gray_frame):
dest = gray_frame.copy()
dest[gray_frame < 100 ] = 255
dest[gray_frame >= 100] = 0
return dest
def numfgpix_thresh(gray, is_black=True, fgthres=BLACK_BG_THRESHOLD):
mask = fgmask(gray, is_black, fgthres, 255)
util.showimages([mask], "fg")
# ret, threshimg = cv2.threshold(gray, fgthres, 50, cv2.THRESH_BINARY) #for black background
# ret, threshimg = cv2.threshold(gray, fgthres, 225, cv2.THRESH_BINARY_INV) #for white background
numfg = np.count_nonzero(mask)
logging.debug("#fg pix %i", numfg)
# util.showimages([threshimg], "processframe::numfgpix_thres")
return numfg
def numfgpix(img, bgcolor):
"""Return number of foreground pixels in img, where bg color denotes the background colors"""
sub = np.empty(img.shape)
print 'img.shape', img.shape
sub.fill(1)
for x in bgcolor:
bg = np.empty(img.shape)
bg.fill(x)
sub = np.maximum(np.zeros(img.shape), np.abs(img - bg))
count = np.count_nonzero(sub)
return count
def removebackground(gray_img, gray_bgsample, thres=50):
"""Remove background from img"""
bgh, bgw = gray_bgsample.shape[:2]
bgpix = []
for i in range(0, bgw):
for j in range(0, bgh):
bgpix.append(gray_bgsample[j, i])
orig = np.full(gray_img.shape, 255, dtype=np.uint8)
for pix in bgpix:
bg = np.full(gray_img.shape, pix, dtype=np.uint8)
diff = cv2.absdiff(gray_img, bg)
sub = np.minimum(orig, diff)
sub[sub < thres] = 0
sub[sub >= thres] = 255
return sub
def calculate_size(size_image1, size_image2, H):
col1, row1 = size_image1
col2, row2 = size_image2
min_row = 1
min_col = 1
max_row = 0
max_col = 0
im2T = np.array([[1, 1, 1], [1, col2, 1], [row2, 1, 1], [row2, col2, 1]])
im2 = im2T.T
result = H.dot(im2)
min_row = math.floor(min(min_row, min(result[0])))
max_row = math.ceil(max(max_row, max(result[0])))
min_col = math.floor(min(min_col, min(result[1])))
max_col = math.ceil(max(max_col, max(result[1])))
im_rows = max(max_row, row1) - min(min_row, row1) + 1
im_cols = max(max_col, col1) - min(min_col, col1) + 1
size = (im_rows, im_cols)
offset = (min_row, min_col)
return (size, offset)
def stitch_images(previmage, curimage, is_black=True):
if (previmage == None):
return curimage
curimage = cv2.cvtColor(curimage, cv2.COLOR_BGR2BGRA)
curimage_gray = cv2.cvtColor(curimage, cv2.COLOR_BGR2GRAY)
previmage = cv2.cvtColor(previmage, cv2.COLOR_BGR2BGRA)
previmage_gray = cv2.cvtColor(previmage, cv2.COLOR_BGR2GRAY)
(curh, curw) = curimage.shape[:2]
(prevh, prevw) = previmage.shape[:2]
print 'curimage shape', curimage.shape
print 'curh, curw', curh, curw
print 'prevh, prevw', prevh, prevw
M = find_object_appx_thres(previmage_gray, curimage_gray, 0.9)
print M
if not isgoodmatch(M):
print "M is not a good match"
tx = 0.0
ty = prevh - 1.0
M = np.array([[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]])
(warpsize, offset) = calculate_size((prevh, prevw), (curh, curw), M)
# print 'warpsize', warpsize
curimage_warp = cv2.warpPerspective(curimage, M, (int(warpsize[0]), int(warpsize[1])), borderValue=(0, 0, 0, 0), borderMode=cv2.BORDER_CONSTANT)
# util.showimages([curimage_warp], "curimage_warp")
xoff = int(offset[0])
yoff = int(offset[1])
# print 'xoff, yoff', xoff-1, yoff-1
M0 = np.array([[1.0, 0.0, -(xoff - 1)], [0.0, 1.0, -(yoff - 1)], [0.0, 0.0, 1.0]])
if (is_black):
borderValue=(0,0,0,0)
else:
borderValue=(255,255,255,0)
previmage_warp = cv2.warpPerspective(previmage, M0, (int(warpsize[0]), int(warpsize[1])), borderValue, borderMode=cv2.BORDER_CONSTANT)
# util.showimages([curimage_warp, previmage_warp])
pil_curimage_warp = util.array_to_pil(curimage_warp, "RGBA") # Image.fromarray(curimage_warp, "RGBA")
pil_previmage_warp = util.array_to_pil(previmage_warp, "RGBA") # Image.fromarray(previmage_warp, "RGBA")
pil_previmage_warp.paste(pil_curimage_warp, (-(xoff - 1), -(yoff - 1)), pil_curimage_warp)
merged = np.array(pil_previmage_warp)
merged = cv2.cvtColor(merged, cv2.COLOR_RGB2BGR)
# util.showimages([merged], "merged")
return merged
def panorama(list_of_frames, is_black=True):
previmage = list_of_frames[0].frame
for i in range(1, len(list_of_frames)):
print "%i of %i" % (i, len(list_of_frames))
curimage = list_of_frames[i].frame
previmage = stitch_images(previmage, curimage, is_black)
# util.showimages([previmage], "pf::panorama, previmage")
return previmage
def writetext(img, text, bottomleft, fontscale=10.0, color=(0, 0, 0)):
cv2.putText(img, text, bottomleft, cv2.FONT_HERSHEY_PLAIN, fontscale, color)
return img
def get_diff_objimg(start_img, end_img, x1,y1, x2,y2):
if (start_img is None):
return end_img
curx = x2 - x1
cury = y2 - y1
curh, curw = end_img.shape[:2]
diff_img = end_img.copy()
endimg_overlap = diff_img[max(0,-cury):min(curh-cury, curh), max(0, -curx):min(curw-curx, curw)]
startimg_overlap = start_img[max(0, cury):min(curh, curh+cury), max(0, curx):min(curw+curx, curw)]
cur_overlaph, cur_overlapw = endimg_overlap.shape[:2]
pre_overlaph, pre_overlapw = startimg_overlap.shape[:2]
if (cur_overlaph == 0 or cur_overlapw == 0):
return end_img
diff_img[max(0,-cury):min(curh-cury, curh), max(0, -curx):min(curw-curx, curw)] = cv2.absdiff(endimg_overlap, startimg_overlap)
diff_img = cv2.min(end_img, diff_img)
return diff_img
if __name__ == "__main__":
src = cv2.imread("udacity1_capture.png", 0) # 3 channel BGR image
dest = src.copy()
dest[src < 200] = 0
dest[src >= 200] = 255
cv2.imwrite("udactiy1_bg.png", dest)
cv2.imshow("display", dest)
cv2.waitKey(0)
| |
# Copyright 2019 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from io import StringIO
from typing import Text # noqa: F401
from capirca.lib import naming, policy
from pybatfish.client import capirca
def _load_test_definitions(netstr, svcstr=None):
# type: (Text, Text) -> naming.Naming
"""Parses a Capirca Naming from the given network and services strings."""
defs = naming.Naming()
if netstr:
defs._ParseFile(StringIO(netstr), "networks")
if svcstr:
defs._ParseFile(StringIO(svcstr), "services")
return defs
TEST_DATABASE = """
HOST_BITS = 1.2.3.4/8 # some prefix with host bits present
RFC1918_10 = 10.0.0.0/8 # non-public
RFC1918_172 = 172.16.0.0/12 # non-public
RFC1918_192 = 192.168.0.0/16 # non-public
RFC1918 = RFC1918_10
RFC1918_172
RFC1918_192
LOOPBACK = 127.0.0.0/8 # loopback
::1/128 # ipv6 loopback
RFC_3330 = 169.254.0.0/16 # special use IPv4 addresses - netdeploy
RFC_6598 = 100.64.0.0/10 # Shared Address Space
MULTICAST = 224.0.0.0/4 # IP multicast
FF00::/8 # IPv6 multicast
CLASS-E = 240.0.0.0/4
DENY-EXTERNAL-SRC =
RFC1918
LOOPBACK
RFC_3330
MULTICAST
CLASS-E
UNDEFINED
"""
DEFINITIONS = _load_test_definitions(TEST_DATABASE)
def _get_group(name):
return capirca._entry_to_group(name, DEFINITIONS.networks[name].items, DEFINITIONS)
def test_entry_to_group_naive():
g = _get_group("RFC1918_10")
assert set(g.addresses) == {"10.0.0.0/8"}
assert not g.childGroupNames
g = _get_group("RFC1918_172")
assert set(g.addresses) == {"172.16.0.0/12"}
assert not g.childGroupNames
g = _get_group("RFC1918_192")
assert set(g.addresses) == {"192.168.0.0/16"}
assert not g.childGroupNames
def test_entry_to_group_host_bits():
g = _get_group("HOST_BITS")
assert set(g.addresses) == {"1.0.0.0/8"}
def test_entry_to_group_recursive():
g = _get_group("RFC1918")
assert not g.addresses
assert set(g.childGroupNames) == {"RFC1918_10", "RFC1918_172", "RFC1918_192"}
def test_entry_to_group_mixed_6_4(caplog):
g = _get_group("LOOPBACK")
assert set(g.addresses) == {"127.0.0.0/8"}
assert not g.childGroupNames
assert "Skipping IPv6 addresses in LOOPBACK" in caplog.text
def test_entry_to_group_error_undefined(caplog):
g = _get_group("DENY-EXTERNAL-SRC")
assert not g.addresses
assert not g.childGroupNames
assert "error converting DENY-EXTERNAL-SRC, creating empty group" in caplog.text
def test_create_reference_book():
simple_database = """
RFC1918_10 = 10.0.0.0/8 # non-public
RFC1918_172 = 172.16.0.0/12 # non-public
RFC1918_192 = 192.168.0.0/16 # non-public
RFC1918 = RFC1918_10
RFC1918_172
RFC1918_192
"""
defs = _load_test_definitions(simple_database)
book = capirca.create_reference_book(defs)
assert book.name == "capirca"
assert len(book.addressGroups) == 4
assert set(g.name for g in book.addressGroups) == {
"RFC1918",
"RFC1918_10",
"RFC1918_172",
"RFC1918_192",
}
assert not book.interfaceGroups
book_custom = capirca.create_reference_book(defs, "testbook")
assert book_custom.name == "testbook"
assert book_custom.addressGroups == book.addressGroups
assert book_custom.interfaceGroups == book.interfaceGroups
TEST_SVCS = """
SSH = 22/tcp
DNS = 53/udp
"""
TEST_POLICY = """
header {
target:: arista some_acl
target:: cisco some_acl
target:: juniper some_acl
target:: paloalto some_acl
}
term permit_ssh {
protocol:: tcp
destination-port:: SSH
action:: accept
}
term permit_dns {
protocol:: udp
destination-port:: DNS
action:: accept
}
term deny_all {
action:: reject
}
"""
def test_get_acl_text():
defs = _load_test_definitions(TEST_DATABASE, TEST_SVCS)
pol = policy.ParsePolicy(TEST_POLICY, defs)
cisco = capirca._get_acl_text(pol, "cisco")
assert "permit tcp any any eq 22" in cisco
assert "permit udp any any eq 53" in cisco
assert "deny ip any any" in cisco
cisco_wrong = capirca._get_acl_text(pol, " CISCO ")
assert cisco_wrong == cisco
juniper = capirca._get_acl_text(pol, "juniper")
assert re.search(r"from {\s*protocol tcp;\s*destination-port 22;\s*}", juniper)
assert re.search(r"from {\s*protocol udp;\s*destination-port 53;\s*}", juniper)
assert re.search(r"term deny_all {\s*then {\s*reject;\s*}\s*}", juniper)
arista = capirca._get_acl_text(pol, "arista")
assert "permit tcp any any eq ssh" in arista
assert "permit udp any any eq domain" in arista
assert "deny ip any any" in arista
# palo alto currently unsupported
try:
capirca._get_acl_text(pol, "paloalto")
assert False
except ValueError as e:
assert "Batfish" in str(e)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/usage_reporting/*"""
__author__ = 'Mike Gainer (mgainer@google.com)'
import collections
import copy
import os
import time
import urlparse
from common import crypto
from common import utils as common_utils
from controllers import sites
from models import courses
from models import models
from models import transforms
from modules.usage_reporting import config
from modules.usage_reporting import course_creation
from modules.usage_reporting import enrollment
from modules.usage_reporting import messaging
from modules.usage_reporting import usage_reporting
from tests.functional import actions
from google.appengine.api import namespace_manager
from google.appengine.api import urlfetch
ADMIN_EMAIL = 'admin@foo.com'
FAKE_COURSE_ID = 'CCCCCCCCCCCCCCCCCCCCC'
FAKE_INSTALLATION_ID = 'IIIIIIIIIIIIIIIIIIII'
FAKE_TIMESTAMP = 1234567890
class MockSender(messaging.Sender):
_messages = []
@classmethod
def send_message(cls, the_dict):
cls._messages.append(the_dict)
@classmethod
def get_sent(cls):
return copy.deepcopy(cls._messages)
@classmethod
def clear_sent(cls):
del cls._messages[:]
class MockMessage(messaging.Message):
@classmethod
def _get_random_course_id(cls, course):
return FAKE_COURSE_ID
@classmethod
def _get_random_installation_id(cls):
return FAKE_INSTALLATION_ID
@classmethod
def _get_time(cls):
return FAKE_TIMESTAMP
class UsageReportingTestBase(actions.TestBase):
def setUp(self):
super(UsageReportingTestBase, self).setUp()
self.save_sender = messaging.Sender
self.save_message = messaging.Message
messaging.Sender = MockSender
messaging.Message = MockMessage
messaging.ENABLED_IN_DEV_FOR_TESTING = True
actions.login(ADMIN_EMAIL, is_admin=True)
# If the optional wipeout module is present, it will enforce some
# requirements that we're not prepared to construct in core
# Course Builder. Unilaterally remove its registrations.
event_callbacks = models.StudentLifecycleObserver.EVENT_CALLBACKS
for event_type in event_callbacks:
if 'wipeout' in event_callbacks[event_type]:
del event_callbacks[event_type]['wipeout']
enqueue_callbacks = models.StudentLifecycleObserver.EVENT_CALLBACKS
for event_type in enqueue_callbacks:
if 'wipeout' in enqueue_callbacks[event_type]:
del enqueue_callbacks[event_type]['wipeout']
def tearDown(self):
MockSender.clear_sent()
messaging.ENABLED_IN_DEV_FOR_TESTING = False
messaging.Sender = self.save_sender
messaging.Message = self.save_message
sites.reset_courses()
super(UsageReportingTestBase, self).tearDown()
class ConfigTests(UsageReportingTestBase):
def test_set_report_allowed(self):
config.set_report_allowed(True)
self.assertEquals(True, config.REPORT_ALLOWED.value)
config.set_report_allowed(False)
self.assertEquals(False, config.REPORT_ALLOWED.value)
config.set_report_allowed(True)
self.assertEquals(True, config.REPORT_ALLOWED.value)
config.set_report_allowed(False)
self.assertEquals(False, config.REPORT_ALLOWED.value)
def test_on_change_report_allowed(self):
config.set_report_allowed(True)
config._on_change_report_allowed(config.REPORT_ALLOWED, False)
config.set_report_allowed(False)
config._on_change_report_allowed(config.REPORT_ALLOWED, True)
expected = [{
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._TIMESTAMP: FAKE_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_REPORT_ALLOWED,
messaging.Message._VALUE: True,
messaging.Message._SOURCE: messaging.Message.ADMIN_SOURCE,
}, {
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._TIMESTAMP: FAKE_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_REPORT_ALLOWED,
messaging.Message._VALUE: False,
messaging.Message._SOURCE: messaging.Message.ADMIN_SOURCE,
}]
self.assertEquals(expected, MockSender.get_sent())
def test_admin_post_change_report_allowed(self):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
'config_override')
response = self.post(
'/admin?action=config_override&name=%s' %
config.REPORT_ALLOWED.name,
{'xsrf_token': xsrf_token})
response = self.get('/rest/config/item?key=%s' %
config.REPORT_ALLOWED.name)
payload = {
'name': config.REPORT_ALLOWED.name,
'value': True,
'is_draft': False,
}
message = {
'key': config.REPORT_ALLOWED.name,
'payload': transforms.dumps(payload),
'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
'config-property-put'),
}
response = self.put('/rest/config/item',
{'request': transforms.dumps(message)})
self.assertEqual(200, response.status_int)
payload = {
'name': config.REPORT_ALLOWED.name,
'value': False,
'is_draft': False,
}
message = {
'key': config.REPORT_ALLOWED.name,
'payload': transforms.dumps(payload),
'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
'config-property-put'),
}
response = self.put('/rest/config/item',
{'request': transforms.dumps(message)})
self.assertEqual(200, response.status_int)
expected = [{
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._TIMESTAMP: FAKE_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_REPORT_ALLOWED,
messaging.Message._VALUE: True,
messaging.Message._SOURCE: messaging.Message.ADMIN_SOURCE,
}, {
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._TIMESTAMP: FAKE_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_REPORT_ALLOWED,
messaging.Message._VALUE: False,
messaging.Message._SOURCE: messaging.Message.ADMIN_SOURCE,
}]
self.assertEquals(expected, MockSender.get_sent())
class CourseCreationTests(UsageReportingTestBase):
def test_welcome_page(self):
with actions.OverriddenConfig(sites.GCB_COURSES_CONFIG.name, ''):
response = self.get('/admin/welcome')
self.assertEquals(200, response.status_int)
self.assertIn('Explore Sample Course', response.body)
self.assertIn('Create Empty Course', response.body)
self.assertIn(
'I agree that Google may collect information about this',
response.body)
self.assertIn(
'name="%s"' %
course_creation.USAGE_REPORTING_CONSENT_CHECKBOX_NAME,
response.body)
def test_welcome_page_checkbox_state(self):
# Expect checkbox checked when no setting made
dom = self.parse_html_string(self.get('/admin/welcome').body)
checkbox = dom.find('.//input[@type="checkbox"]')
self.assertEqual('checked', checkbox.attrib['checked'])
# Expect checkbox unchecked when setting is False
config.set_report_allowed(False)
dom = self.parse_html_string(self.get('/admin/welcome').body)
checkbox = dom.find('.//input[@type="checkbox"]')
self.assertNotIn('checked', checkbox.attrib)
# Expect checkbox checked when setting is True
config.set_report_allowed(True)
dom = self.parse_html_string(self.get('/admin/welcome').body)
checkbox = dom.find('.//input[@type="checkbox"]')
self.assertEqual('checked', checkbox.attrib['checked'])
def test_submit_welcome_with_accept_checkbox_checked(self):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
'add_first_course')
response = self.post(
'/admin/welcome',
{
'action': 'add_first_course',
'xsrf_token': xsrf_token,
course_creation.USAGE_REPORTING_CONSENT_CHECKBOX_NAME:
course_creation.USAGE_REPORTING_CONSENT_CHECKBOX_VALUE,
})
self.assertEquals(True, config.REPORT_ALLOWED.value)
expected = [{
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._TIMESTAMP: FAKE_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_REPORT_ALLOWED,
messaging.Message._VALUE: True,
messaging.Message._SOURCE: messaging.Message.WELCOME_SOURCE,
}]
self.assertEquals(expected, MockSender.get_sent())
def test_submit_welcome_with_accept_checkbox_unchecked(self):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
'add_first_course')
response = self.post(
'/admin/welcome',
{
'action': 'add_first_course',
'xsrf_token': xsrf_token,
course_creation.USAGE_REPORTING_CONSENT_CHECKBOX_NAME: '',
})
self.assertEquals(False, config.REPORT_ALLOWED.value)
expected = [{
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._TIMESTAMP: FAKE_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_REPORT_ALLOWED,
messaging.Message._VALUE: False,
messaging.Message._SOURCE: messaging.Message.WELCOME_SOURCE,
}]
self.assertEquals(expected, MockSender.get_sent())
class EnrollmentTests(UsageReportingTestBase):
def test_unexpected_field_raises(self):
with self.assertRaises(ValueError):
enrollment.StudentEnrollmentEventDTO(None, {'bad_field': 'x'})
def test_enrollment_map_reduce_job(self):
self.maxDiff = None
MOCK_NOW = 1427247511
COURSE = 'xyzzy'
NAMESPACE = 'ns_xyzzy'
MIN_TIMESTAMP = (
MOCK_NOW
- (MOCK_NOW % enrollment.SECONDS_PER_HOUR)
- enrollment.StudentEnrollmentEventCounter.MAX_AGE)
# Insert some bogus StudentEnrollmentEventEntity for the M/R job
# to count or delete.
very_old_enroll = enrollment.StudentEnrollmentEventDTO(None, {})
very_old_enroll.timestamp = 0
very_old_enroll.metric = messaging.Message.METRIC_ENROLLED
very_old_unenroll = enrollment.StudentEnrollmentEventDTO(None, {})
very_old_unenroll.timestamp = 0
very_old_unenroll.metric = messaging.Message.METRIC_UNENROLLED
just_too_old_enroll = enrollment.StudentEnrollmentEventDTO(None, {})
just_too_old_enroll.timestamp = MIN_TIMESTAMP - 1
just_too_old_enroll.metric = messaging.Message.METRIC_ENROLLED
just_too_old_unenroll = enrollment.StudentEnrollmentEventDTO(None, {})
just_too_old_unenroll.timestamp = MIN_TIMESTAMP - 1
just_too_old_unenroll.metric = messaging.Message.METRIC_UNENROLLED
young_enough_enroll = enrollment.StudentEnrollmentEventDTO(None, {})
young_enough_enroll.timestamp = MIN_TIMESTAMP
young_enough_enroll.metric = messaging.Message.METRIC_ENROLLED
young_enough_unenroll = enrollment.StudentEnrollmentEventDTO(None, {})
young_enough_unenroll.timestamp = MIN_TIMESTAMP
young_enough_unenroll.metric = messaging.Message.METRIC_UNENROLLED
now_enroll = enrollment.StudentEnrollmentEventDTO(None, {})
now_enroll.timestamp = MOCK_NOW
now_enroll.metric = messaging.Message.METRIC_ENROLLED
now_unenroll = enrollment.StudentEnrollmentEventDTO(None, {})
now_unenroll.timestamp = MOCK_NOW
now_unenroll.metric = messaging.Message.METRIC_UNENROLLED
dtos = [
very_old_enroll,
very_old_unenroll,
just_too_old_enroll,
just_too_old_unenroll,
young_enough_enroll,
young_enough_unenroll,
now_enroll,
now_unenroll,
]
app_context = actions.simple_add_course(COURSE, ADMIN_EMAIL, 'Test')
with common_utils.Namespace(NAMESPACE):
enrollment.StudentEnrollmentEventDAO.save_all(dtos)
# Run map/reduce job with a setup function replaced so that it will
# always choose the same timestamp as the start time.
job_class = enrollment.StudentEnrollmentEventCounter
save_b_a_m_p = job_class.build_additional_mapper_params
try:
def fixed_time_b_a_m_p(self, app_context):
return {self.MIN_TIMESTAMP: MIN_TIMESTAMP}
job_class.build_additional_mapper_params = fixed_time_b_a_m_p
# Actually run the job.
enrollment.StudentEnrollmentEventCounter(app_context).submit()
self.execute_all_deferred_tasks(
models.StudentLifecycleObserver.QUEUE_NAME)
self.execute_all_deferred_tasks()
finally:
job_class.build_additional_mapper_params = save_b_a_m_p
# Verify that the DTOs older than the cutoff have been removed from
# the datastore.
with common_utils.Namespace(NAMESPACE):
dtos = enrollment.StudentEnrollmentEventDAO.get_all()
dtos.sort(key=lambda dto: (dto.timestamp, dto.metric))
self.assertEqual(
[young_enough_enroll.dict,
young_enough_unenroll.dict,
now_enroll.dict,
now_unenroll.dict],
[d.dict for d in dtos])
# Verify that we have messages for the new-enough items, and no
# messages for the older items.
messages = MockSender.get_sent()
messages.sort(key=lambda m: (m['timestamp'], m['metric']))
MOCK_NOW_HOUR = MOCK_NOW - (MOCK_NOW % enrollment.SECONDS_PER_HOUR)
expected = [{
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._COURSE: FAKE_COURSE_ID,
messaging.Message._TIMESTAMP: MIN_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_ENROLLED,
messaging.Message._VALUE: 1,
}, {
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._COURSE: FAKE_COURSE_ID,
messaging.Message._TIMESTAMP: MIN_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_UNENROLLED,
messaging.Message._VALUE: 1,
}, {
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._COURSE: FAKE_COURSE_ID,
messaging.Message._TIMESTAMP: MOCK_NOW_HOUR,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_ENROLLED,
messaging.Message._VALUE: 1,
}, {
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._COURSE: FAKE_COURSE_ID,
messaging.Message._TIMESTAMP: MOCK_NOW_HOUR,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_UNENROLLED,
messaging.Message._VALUE: 1,
}]
self.assertEquals(expected, messages)
sites.reset_courses()
def test_end_to_end(self):
"""Actually enroll and unenroll students; verify reporting counts."""
COURSE_NAME_BASE = 'test'
NUM_COURSES = 2
NUM_STUDENTS = 3
THE_TIMESTAMP = 1427245200
for course_num in range(NUM_COURSES):
course_name = '%s_%d' % (COURSE_NAME_BASE, course_num)
actions.simple_add_course(course_name, ADMIN_EMAIL, course_name)
actions.update_course_config(
course_name,
{
'course': {
'now_available': True,
'browsable': True,
},
})
for student_num in range(NUM_STUDENTS):
name = '%s_%d_%d' % (COURSE_NAME_BASE, course_num, student_num)
actions.login(name + '@foo.com')
actions.register(self, name, course_name)
if student_num == 0:
actions.unregister(self, course_name)
actions.logout()
# Expect no messages yet; haven't run job.
self.assertEquals([], MockSender.get_sent())
# Run all counting jobs.
with actions.OverriddenConfig(config.REPORT_ALLOWED.name, True):
usage_reporting.StartReportingJobs._for_testing_only_get()
self.execute_all_deferred_tasks(
models.StudentLifecycleObserver.QUEUE_NAME)
self.execute_all_deferred_tasks()
# Verify counts. (Ignore dates, these are fickle and subject to
# weirdness on hour boundaries. Also ignore course/instance IDs;
# they are non-random and thus all the same.)
num_enrolled_msgs = 0
num_unenrolled_msgs = 0
num_student_count_msgs = 0
for message in MockSender.get_sent():
if (message[messaging.Message._METRIC] ==
messaging.Message.METRIC_STUDENT_COUNT):
num_student_count_msgs += 1
self.assertEquals(
NUM_STUDENTS, message[messaging.Message._VALUE])
elif (message[messaging.Message._METRIC] ==
messaging.Message.METRIC_ENROLLED):
num_enrolled_msgs += 1
self.assertEquals(
NUM_STUDENTS, message[messaging.Message._VALUE])
elif (message[messaging.Message._METRIC] ==
messaging.Message.METRIC_UNENROLLED):
num_unenrolled_msgs += 1
self.assertEquals(
1, message[messaging.Message._VALUE])
self.assertEquals(NUM_COURSES, num_enrolled_msgs)
self.assertEquals(NUM_COURSES, num_unenrolled_msgs)
self.assertEquals(NUM_COURSES, num_student_count_msgs)
sites.reset_courses()
class UsageReportingTests(UsageReportingTestBase):
def test_disallowed(self):
config.set_report_allowed(False)
response = self.get(usage_reporting.StartReportingJobs.URL,
headers={'X-AppEngine-Cron': 'True'})
self.assertEquals(200, response.status_int)
self.assertEquals('Disabled.', response.body)
def test_not_from_cron_and_not_admin(self):
config.set_report_allowed(True)
actions.logout()
response = self.get(usage_reporting.StartReportingJobs.URL,
expect_errors=True)
self.assertEquals(403, response.status_int)
self.assertEquals('Forbidden.', response.body)
def test_not_from_cron_but_is_admin(self):
config.set_report_allowed(True)
response = self.get(usage_reporting.StartReportingJobs.URL,
expect_errors=True)
self.assertEquals(200, response.status_int)
self.assertEquals('OK.', response.body)
def test_jobs_run(self):
COURSE = 'test'
app_context = actions.simple_add_course(COURSE, ADMIN_EMAIL, 'Test')
actions.register(self, 'Joe Admin', COURSE)
config.set_report_allowed(True)
response = self.get(usage_reporting.StartReportingJobs.URL,
headers={'X-AppEngine-Cron': 'True'})
self.assertEquals(200, response.status_int)
self.assertEquals('OK.', response.body)
now = int(time.time())
self.execute_all_deferred_tasks(
models.StudentLifecycleObserver.QUEUE_NAME)
self.execute_all_deferred_tasks()
expected = [{
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._COURSE: FAKE_COURSE_ID,
messaging.Message._TIMESTAMP: FAKE_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_STUDENT_COUNT,
messaging.Message._VALUE: 1,
}, {
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._COURSE: FAKE_COURSE_ID,
messaging.Message._TIMESTAMP: now - (now % 3600),
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_ENROLLED,
messaging.Message._VALUE: 1,
}]
actual = MockSender.get_sent()
actual.sort(key=lambda x: x['timestamp'])
self.assertEquals(expected, actual)
sites.reset_courses()
class MessageCatcher(object):
URL = 'https://docs.google.com/a/google.com/forms/d/<IDNUMBER>/formResponse'
FORM_FIELD = 'entry.12345'
DEFAULT_CONFIG = transforms.dumps({
messaging.Sender._REPORT_ENABLED: True,
messaging.Sender._REPORT_TARGET: URL,
messaging.Sender._REPORT_FORM_FIELD: FORM_FIELD,
})
_config = DEFAULT_CONFIG
_return_code = 200
_messages = []
Response = collections.namedtuple('Response', ['status_code', 'content'])
@classmethod
def get(cls):
return cls.Response(cls._return_code, cls._config)
@classmethod
def post(cls, request):
if cls._return_code == 200:
# Pretend to not have seen the message if reporting a failure.
message = transforms.loads(request.get(cls.FORM_FIELD)[0])
cls._messages.append(message)
return cls.Response(cls._return_code, '')
@classmethod
def get_sent(cls):
return copy.deepcopy(cls._messages)
@classmethod
def clear_sent(cls):
del cls._messages[:]
@classmethod
def set_return_code(cls, return_code):
cls._return_code = return_code
@classmethod
def set_config(cls, cfg):
cls._config = cfg
class MessagingTests(actions.TestBase):
COURSE_NAME = 'test'
NAMESPACE = 'ns_test'
# Object to emulate response from urlfetch.fetch for our mock.
Response = collections.namedtuple('Response', ('status_code', 'content'))
def mock_urlfetch_fetch(self, url, method=None, payload=None,
follow_redirects=None):
"""Override of urlfetch.fetch method; forwards to self.get/post."""
if not url.startswith('https://'):
raise urlfetch.Error('Malformed URL')
if method == 'GET':
return MessageCatcher.get()
elif method == 'POST':
return MessageCatcher.post(urlparse.parse_qs(payload))
def setUp(self):
super(MessagingTests, self).setUp()
messaging.ENABLED_IN_DEV_FOR_TESTING = True
self.save_urlfetch_fetch = urlfetch.fetch
urlfetch.fetch = self.mock_urlfetch_fetch
actions.login(ADMIN_EMAIL, is_admin=True)
self.app_config = actions.simple_add_course(
self.COURSE_NAME, ADMIN_EMAIL, self.COURSE_NAME)
def tearDown(self):
messaging.ENABLED_IN_DEV_FOR_TESTING = False
messaging.Sender._report_settings_timestamp = 0
urlfetch.fetch = self.save_urlfetch_fetch
MessageCatcher.clear_sent()
MessageCatcher.set_return_code(200)
MessageCatcher.set_config(MessageCatcher.DEFAULT_CONFIG)
sites.reset_courses()
super(MessagingTests, self).tearDown()
def test_blue_sky_instance_message(self):
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, True)
messages = MessageCatcher.get_sent()
self.assertEquals(1, len(messages))
message = messages[0]
self.assertEquals(messaging.Message.METRIC_REPORT_ALLOWED,
message[messaging.Message._METRIC])
self.assertEquals(True,
message[messaging.Message._VALUE])
self.assertAlmostEqual(int(time.time()),
message[messaging.Message._TIMESTAMP],
delta=10)
self.assertEquals(os.environ['GCB_PRODUCT_VERSION'],
message[messaging.Message._VERSION])
self.assertNotEquals(0, len(message[messaging.Message._INSTALLATION]))
self.assertNotIn(messaging.Message._COURSE, message)
def test_blue_sky_course_message(self):
student_count = 1453
with common_utils.Namespace(self.NAMESPACE):
messaging.Message.send_course_message(
messaging.Message.METRIC_STUDENT_COUNT, student_count)
messages = MessageCatcher.get_sent()
self.assertEquals(1, len(messages))
message = messages[0]
self.assertEquals(messaging.Message.METRIC_STUDENT_COUNT,
message[messaging.Message._METRIC])
self.assertEquals(student_count,
message[messaging.Message._VALUE])
self.assertAlmostEqual(int(time.time()),
message[messaging.Message._TIMESTAMP],
delta=10)
self.assertEquals(os.environ['GCB_PRODUCT_VERSION'],
message[messaging.Message._VERSION])
self.assertNotEquals(0, len(message[messaging.Message._INSTALLATION]))
self.assertNotEquals(0, len(message[messaging.Message._COURSE]))
def test_random_ids_are_consistent(self):
num_messages = 10
student_count = 123
with common_utils.Namespace(self.NAMESPACE):
for unused in range(num_messages):
messaging.Message.send_course_message(
messaging.Message.METRIC_STUDENT_COUNT, student_count)
messages = MessageCatcher.get_sent()
self.assertEquals(num_messages, len(messages))
for message in messages:
self.assertEquals(
messages[0][messaging.Message._INSTALLATION],
message[messaging.Message._INSTALLATION])
self.assertEquals(
messages[0][messaging.Message._COURSE],
message[messaging.Message._COURSE])
def test_report_disabled_by_config(self):
MessageCatcher.set_config(
transforms.dumps({
messaging.Sender._REPORT_ENABLED: False,
messaging.Sender._REPORT_TARGET: 'irrelevant',
messaging.Sender._REPORT_FORM_FIELD: 'irrelevant',
}))
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, True)
# Should have no messages sent, and nothing queued.
messages = MessageCatcher.get_sent()
self.assertEquals(0, len(messages))
tasks = self.taskq.GetTasks('default')
self.assertEquals(0, len(tasks))
def _assert_message_queued_and_succeeds(self):
# Should have no messages sent, and one item queued.
messages = MessageCatcher.get_sent()
self.assertEquals(0, len(messages))
# Now execute background tasks; expect one message.
self.execute_all_deferred_tasks(
models.StudentLifecycleObserver.QUEUE_NAME)
self.execute_all_deferred_tasks()
messages = MessageCatcher.get_sent()
self.assertEquals(1, len(messages))
message = messages[0]
self.assertEquals(messaging.Message.METRIC_REPORT_ALLOWED,
message[messaging.Message._METRIC])
self.assertEquals(True,
message[messaging.Message._VALUE])
self.assertAlmostEqual(int(time.time()),
message[messaging.Message._TIMESTAMP],
delta=10)
self.assertEquals(os.environ['GCB_PRODUCT_VERSION'],
message[messaging.Message._VERSION])
self.assertNotEquals(0, len(message[messaging.Message._INSTALLATION]))
self.assertNotIn(messaging.Message._COURSE, message)
def test_report_queued_when_config_malformed(self):
MessageCatcher.set_config(
'this will not properly decode as JSON')
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, True)
MessageCatcher.set_config(MessageCatcher.DEFAULT_CONFIG)
self._assert_message_queued_and_succeeds()
def test_report_queued_when_config_unavailable(self):
MessageCatcher.set_return_code(500)
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, True)
MessageCatcher.set_return_code(200)
self._assert_message_queued_and_succeeds()
def test_report_queued_when_config_url_malformed(self):
MessageCatcher.set_config(
transforms.dumps({
messaging.Sender._REPORT_ENABLED: True,
messaging.Sender._REPORT_TARGET: 'a malformed url',
messaging.Sender._REPORT_FORM_FIELD: 'entry.12345',
}))
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, True)
MessageCatcher.set_config(MessageCatcher.DEFAULT_CONFIG)
self._assert_message_queued_and_succeeds()
def test_report_queued_when_post_receives_non_200(self):
# Send one message through cleanly; this will get the messaging
# module to retain its notion of the destination URL and not re-get
# it on the next message.
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, True)
MessageCatcher.clear_sent()
# Set reponse code so that the POST fails; verify that that retries
# using the deferred task queue.
MessageCatcher.set_return_code(500)
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, True)
MessageCatcher.set_return_code(200)
self._assert_message_queued_and_succeeds()
class ConsentBannerTests(UsageReportingTestBase):
COURSE_NAME = 'test_course'
SUPER_MESSAGE = 'Would you like to help improve Course Builder?'
NOT_SUPER_MESSAGE = 'Please ask your Course Builder Administrator'
NOT_SUPER_EMAIL = 'not-super@test.com'
def setUp(self):
super(ConsentBannerTests, self).setUp()
self.base = '/' + self.COURSE_NAME
self.app_context = actions.simple_add_course(
self.COURSE_NAME, ADMIN_EMAIL, 'Banner Test Course')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
courses.Course.ENVIRON_TEST_OVERRIDES = {
'course': {'admin_user_emails': self.NOT_SUPER_EMAIL}}
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
namespace_manager.set_namespace(self.old_namespace)
courses.Course.ENVIRON_TEST_OVERRIDES = {}
super(ConsentBannerTests, self).tearDown()
def test_banner_with_buttons_shown_to_super_user_on_dashboard(self):
dom = self.parse_html_string(self.get('dashboard').body)
banner = dom.find('.//div[@class="consent-banner"]')
self.assertIsNotNone(banner)
self.assertIn(self.SUPER_MESSAGE, banner.find('.//h1').text)
self.assertEqual(2, len(banner.findall('.//button')))
def test_banner_with_buttons_shown_to_super_user_on_global_admin(self):
dom = self.parse_html_string(self.get('/admin/global').body)
banner = dom.find('.//div[@class="consent-banner"]')
self.assertIsNotNone(banner)
self.assertIn(self.SUPER_MESSAGE, banner.find('.//h1').text)
self.assertEqual(2, len(banner.findall('.//button')))
def test_banner_without_buttons_shown_to_instructor_on_dashboard(self):
actions.logout()
actions.login(self.NOT_SUPER_EMAIL, is_admin=False)
dom = self.parse_html_string(self.get('dashboard').body)
banner = dom.find('.//div[@class="consent-banner"]')
self.assertIsNotNone(banner)
self.assertIn(self.NOT_SUPER_MESSAGE, banner.findall('.//p')[1].text)
self.assertEqual(0, len(banner.findall('.//button')))
def test_banner_not_shown_when_choices_have_been_made(self):
config.set_report_allowed(False)
# Check super-user role; global admin
dom = self.parse_html_string(self.get('/admin/global').body)
self.assertIsNone(dom.find('.//div[@class="consent-banner"]'))
# check super-user role; dashboard
dom = self.parse_html_string(self.get('dashboard').body)
self.assertIsNone(dom.find('.//div[@class="consent-banner"]'))
# Check non-super role; dashboadd
actions.logout()
actions.login(self.NOT_SUPER_EMAIL, is_admin=False)
dom = self.parse_html_string(self.get('dashboard').body)
self.assertIsNone(dom.find('.//div[@class="consent-banner"]'))
class ConsentBannerRestHandlerTests(UsageReportingTestBase):
URL = '/rest/modules/usage_reporting/consent'
XSRF_TOKEN = 'usage_reporting_consent_banner'
def do_post(self, xsrf_token, is_allowed):
request = {
'xsrf_token': xsrf_token,
'payload': transforms.dumps({'is_allowed': is_allowed})
}
return self.post(self.URL, {'request': transforms.dumps(request)})
def test_handler_rejects_bad_xsrf_token(self):
response = self.do_post('bad_xsrf_token', False)
self.assertEqual(200, response.status_int)
response_dict = transforms.loads(response.body)
self.assertEqual(403, response_dict['status'])
self.assertIn('Bad XSRF token.', response_dict['message'])
def test_handler_rejects_non_super_user(self):
actions.logout()
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN)
response = self.do_post(xsrf_token, False)
self.assertEqual(200, response.status_int)
response_dict = transforms.loads(response.body)
self.assertEqual(401, response_dict['status'])
self.assertIn('Access denied.', response_dict['message'])
def test_handler_sets_consent_and_sends_message(self):
self.assertFalse(config.is_consent_set())
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN)
response = self.do_post(xsrf_token, True)
self.assertTrue(config.is_consent_set())
self.assertTrue(config.REPORT_ALLOWED.value)
response = self.do_post(xsrf_token, False)
self.assertFalse(config.REPORT_ALLOWED.value)
expected = [{
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._TIMESTAMP: FAKE_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_REPORT_ALLOWED,
messaging.Message._VALUE: True,
messaging.Message._SOURCE: messaging.Message.BANNER_SOURCE,
}, {
messaging.Message._INSTALLATION: FAKE_INSTALLATION_ID,
messaging.Message._TIMESTAMP: FAKE_TIMESTAMP,
messaging.Message._VERSION: os.environ['GCB_PRODUCT_VERSION'],
messaging.Message._METRIC: messaging.Message.METRIC_REPORT_ALLOWED,
messaging.Message._VALUE: False,
messaging.Message._SOURCE: messaging.Message.BANNER_SOURCE,
}]
self.assertEquals(expected, MockSender.get_sent())
class DevServerTests(UsageReportingTestBase):
"""Test that consent widgets are turned off in normal dev mode."""
def test_welcome_page_message_not_shown_in_dev(self):
# First check the text is present in test mode
response = self.get('/admin/welcome')
self.assertIn(
'I agree that Google may collect information about this',
response.body)
# Switch off test mode
messaging.ENABLED_IN_DEV_FOR_TESTING = False
# Expect text is missing
response = self.get('/admin/welcome')
self.assertNotIn(
'I agree that Google may collect information about this',
response.body)
def test_consent_banner_not_shown_in_dev(self):
# First check banner is present in test mode
dom = self.parse_html_string(self.get('/admin/global').body)
banner = dom.find('.//div[@class="consent-banner"]')
self.assertIsNotNone(banner)
# Switch off test mode
messaging.ENABLED_IN_DEV_FOR_TESTING = False
# Expect to see banner missing
dom = self.parse_html_string(self.get('/admin/global').body)
banner = dom.find('.//div[@class="consent-banner"]')
self.assertIsNone(banner)
| |
""" level.py
function
buildLevel
wipeData
"""
import pygame
import dinosInSpace
import soundFx56
import scroller56
import radar56
import star56
import static56
import block56
import dino56
import interface56
import tracer56
import infoGraphic56
import controlMenu56
import dataStorage56
import endMessage
import snack
import autoMessage
import gfx56
import modeSwitch
import snackPacket
import simpleLabel
# colors
BERRY = (153, 86, 98)
TWILIGHT = (23, 123, 159)
MOONSTRUCK = (68, 103, 161)
LILAC = (172, 191, 233)
PLUM = (134, 106, 125)
KEYLIME = (198, 227, 171)
# default dino speed
DSPEED = 6
DSTEP = 25
PUZZLE_TITLE_LABEL_CENTER = (400,100)
def buildLevel(screen, mapName, isUserMap, profileName=None, puzzleName=None, _fps=60, imageFrom=None):
""" builds and positions level objects """
# define parameters
## # --------- data testing - temp: only for packing old data lists into .dat
##
# if not isUserMap:
#
# mapData = (mode,
# mustSave,
# gridSize,
# bkgObjData,
# userBlocks,
# goals,
# spawns,
# sArrows,
# linkedArrows,
# mines,
# switches,
# dinoSets,
# message
# )
#
#
# dataStorage56.writeMap(mapName, mapData, isUserMap)
(trashThis, # todo: remove this -- was mode (action or puzzle) but no longer needed as just puzzle
mustSave,
gridSize,
bkgObjData,
userBlocks,
goals,
spawns,
sArrows,
linkedArrows,
mines,
switches,
dinoSets,
message) = dataStorage56.getMap(mapName, isUserMap) # message is always None, left out of laziness
snax = None
# altExits = None
cameraPos = None # default center
# map mods
#
# - hardcode background data / messages here (cannot build with map editor)
# - modify by overwrite
# > example:
# -modify speed of first dino (increase by 3x):
#
# ## map conditional
# if mapName == "mapName":
#
# # overwrite speed data
# dinoSets[0][3] = 3 * DSPEED
#
#
# __________________________________
# data structure:
# ----------------------------------
#
# bkgObj data
#
# stars: class ("star"), step, color
# scrolling backgrounds objects
# (
# class, -> "scroll", "float" *, "wrap" *, "cow" **
# minSpeed, -> 1
# loc, -> (1,1)
# imgFile, -> "fileName.png"
# scale, -> (20,20) or "2X"
# getAt, -> (0,0)
# spinRange, -> 1.5
# * floatSpeed, -> (.5,1)
# ** flashData -> (OFFTIME, VAR_RANGE)
# )
#
# message data
#
# message = controlMenuXX.messageObject
#
# dino data
#
# [type, num, color, speed, spawn, delayStart, delayStep]
# example: [ "delux", 1, "green", DSPEED, 1, DSTEP, 0 ]
#
# snax data
#
# [
# [imgKey, gridPos, switchChan=0, defState=None]
# example: ["HAMBURGER", (4,5), 2, "ON"]
# ]
#
# ******** must sync new profile data in areaSelect.py AND snackWorld.py for snax *********
#
#
# ----------------------------------
# puzzle name : [file name, locked, complete, difficulty, snacks collected, secret exit found]
#
# -0 (string) _file name_ : passed as 'dest' to map selector (level)
# -1 (bool) _locked_ : controlls player access / preview
# -2 (bool) _complete_ : displays if complete, adds to global profile completed count
# -3 (int) _difficulty_ : displays difficulty level
# -4 (list) _snacks_ : displays how many snacks collected as fraction, pass 'None' if n/a
# -5 (list) _exits found : displays how many alternate exits found, pass 'None' if n/a
#
# "Roundabout" : ["1_Roundabout", False, False, 1, [0,0], [1,1]],
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| add snax ||||||||||||||||||||||||||||||||||||||||
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
nextTutorial = None
isLastPuzzle = False
if not isUserMap:
puzzleData = dataStorage56.getPuzzleData(profileName, puzzleName)
if mapName == "1_TUT1":
message = True
nextTutorial = "1_TUT2"
nextPuzzle = "tut2"
elif mapName == "1_TUT2":
message = True
nextTutorial = "1_TUT3"
nextPuzzle = "tut3"
elif mapName == "1_TUT3":
message = True
nextTutorial = "1_TUT4"
nextPuzzle = "tut4"
elif mapName == "1_TUT4":
message = True
nextTutorial = "1_TUT5"
nextPuzzle = "tut5"
elif mapName == "1_TUT5":
message = True
nextTutorial = "1_TUT6"
nextPuzzle = "tut6"
elif mapName == "1_TUT6":
message = True
nextTutorial = "1_TUT7"
nextPuzzle = "tut7"
elif mapName == "1_TUT7":
message = True
nextTutorial = "_TERMINATE" # string is arbitrary as long as it doesn't start with '1_TUT'
nextPuzzle = "_TERMINATE" # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
elif mapName == "2_ODD_COLOR_OUT":
message = True
elif mapName == "3_SMALL_DETOUR":
message = True
elif mapName == "4_GO_FOR_IT":
message = True
snax = [
["sugar pufz", (1,3)]
]
elif mapName == "5_SWITCHES":
message = True
elif mapName == "9_CROSS_PATHS":
message = True
snax = [
["shrimp nuggets", (3,1), 1, "OFF"],
["candydough", (6,1), 1, "ON"]
]
elif mapName == "10_LOOP":
snax = [
["barbequarks", (2,1)],
["picklesicle", (4,5)]
]
elif mapName == "11_ISLAND":
message = True
elif mapName == "12_SARDINES_1":
snax = [
["fizzy beerwafers", (1,1)]
]
elif mapName == "14_TRY_THE_HARD_WAY":
message = True
snax = [
["nanocorn", (1,5)]
]
elif mapName == "19_CORRAL":
message = True
snax = [
["lucky coffee", (3,1)],
["munchzilla", (7,2)]
]
elif mapName == "21_SPLIT_UP":
snax = [
["cheese zees", (3,1)],
["chocobeanz", (2,5), 2, "ON"]
]
elif mapName == "22_ASTEROID_FIELD":
message = True
cameraPos = "R"
elif mapName == "26_UNDER_YOUR_NOSE":
snax = [
["funtarts", (2,1), 1, "ON"],
["grade a milk", (2,2)]
]
elif mapName == "28_SECTIONS":
message = True
cameraPos = "TR"
elif mapName == "29_SARDINES_2":
message = True
elif mapName == "30_TASTY_TROVE":
cameraPos = "L"
snax = [
["penutbutter cubes", (4,4)],
["yumzingers", (6,3)]
]
elif mapName == "31_SANDBAR":
snax = [
["xtremophile gummies", (1,1), 1, "ON"],
["monster biscuits", (7,3)]
]
elif mapName == "32_HOPSCOTCH":
cameraPos = "L"
snax = [
["broccolibanana", (5,1), 1, "OFF"]
]
elif mapName == "33_CORNERS":
snax = [
["pizzaballoon", (1,1)]
]
elif mapName == "34_COPY_CAT":
message = True
snax = [
["marshmelons", (5,3)]
]
elif mapName == "38_NOT_SO_FAST":
message = True
snax = [
["joybacon", (2,3)],
["sushi yumyum cone", (3,1)],
["hexberries", (5,5), 2, "OFF"]
]
elif mapName == "39_REALLY_CROWDED_CREW":
message = True
elif mapName == "40_RESCUE":
message = True
snax = [
["lazercut fries", (4,4)]
]
elif mapName == "41_GATEWAY":
isLastPuzzle = True
message = True
# elif mapName == "30_TASTY_TROVE":
#wrapping bkg data: 'wrap', minSpeed, gridPair, imgFile, scaleTo, getAt, spinRange, floatSpeed
# END MODS -----------------------------------------------///////////////////////////////
# make everything: code after this point runs every time
# game, scroller, radar
game = dinosInSpace.Game(screen, nextTutorial, isLastPuzzle)
soundFx56.GameSoundManager(game)
scroller56.Scroller(game, gridSize, cameraPos, _fps)
radar = radar56.Radar(game)
radar56.makeGrid(game, gridSize)
# stars / moving background objects
if bkgObjData: # note there SHOULD always be bkgobj data
star56.construct(game, bkgObjData)
# user blocks
block56.buildUserBlocks(game, userBlocks)
# all static blocks except for goals
if spawns:
for s in spawns:
spawn = static56.buildSpawn(game, s[0], s[1], dinoSets)
if sArrows:
for a in sArrows:
arrow = static56.buildStaticArrow(game, a[0], a[1], a[2])
if mines:
for m in mines:
mine = static56.buildMine(game, m[0])
linkList = []
if linkedArrows:
for l in linkedArrows:
linkArrow = static56.buildLinkedArrow(game, l[0], l[1], l[2], l[3], l[4])
linkList.append(linkArrow)
# snax
snack.initImgLib()
linkedSnax = {}
i = 0
snaxRemaining = False
if snax:
for s in snax:
if not puzzleData[4][i]: # if the snack is not collected already
if len(s) > 2: # if the snax mod has linked data (4 vs 2 len)
newSnack = snack.Snack(snax[i][0], snax[i][1], i, s[3]) # xtra last param is default linked state ("ON" or "OFF")
else:
newSnack = snack.Snack(snax[i][0], snax[i][1], i)
newSnack.register()
# linked snax
if len(s) > 2: # if the snax mod has linked data (4 vs 2 len)
linkedSnax[newSnack] = s[2] # object as key and switch channel as value
snaxRemaining = True
i += 1
# game.addGroup(snack.Snack.snaxGroup) -- old place for snax addition
#switchCount = 0
if switches:
for s in switches:
#switchCount += 1
switch = static56.buildSwitch(game, s[0])
myLinkedObjs = []
for l in linkList:
if l.getSwitchNum() == s[1]: # s[1] is channel
myLinkedObjs.append(l)
for lsnax in linkedSnax:
if linkedSnax[lsnax] == s[1]: # if linked snax value (channel) == switch channel
myLinkedObjs.append(lsnax)
switch.setLinked(myLinkedObjs)
if len(static56.StaticBlock.staticGroup) > 0:
game.addGroup(static56.StaticBlock.staticGroup) # add all sObj to game
if snaxRemaining:
game.addGroup(snack.Snack.snaxGroup)
# goals (space stations)
if goals:
for g in goals:
goal = static56.buildGoal(game, g[0], g[1], _fps)
# grid blocks
interface56.makeGridBox(game, gridSize)
#### snax was here
# dinos
for s in dinoSets:
dinos = dino56.buildDinoSet(game, s, _fps)
dino56.Dino.setGridSize(gridSize)
dino56.DinoDelux.setREF_TOTAL()
game.addGroup(dino56.Dino.dinoGroup)
game.addGroup(dino56.Dino.packetGroup) ###################### packet group
snackPacket.initImgLib()
# depth index for above passing animations
paDepth = dinosInSpace.Game.getGroupListLen()
dino56.Dino.setPaDepth(paDepth)
# cursor >>
# add objects to radar >>
# tracer
block56.Tracer()
static56.Switch.makeTracers()
game.addGroup(block56.Tracer.tracerGroup)
game.addGroup(tracer56.SwitchTracer.switchTracerGroup)
# counters / infoGraphics, grid
interface56.CursorCounter(game)
interface56.ItemMenu(game)
##static56.makeGoalCounter(game)
# radar56.makeGrid(game, gridSize) moved to beginning
## infoGraphic56.FPS(game) ############ ------------------------------------------------------- F P S
infoGraphic56.SpawnInfoBox.setLoc()
infoGraphic56.SpawnInfoBox.addToGame(game)
# add objects to radar
## radar56.Radar.setDinoGroup()
## radar56.Radar.setStaticGroup()
## radar56.Radar.setBlockGroup()
## radar.makeImages()
game.addGroup(radar56.RObj.rObjGroup)
# cursor
interface56.Cursor(game, _fps)
# in game message
if message: # make messages and stub through a message control menu
autoMessage.initImgLib()
autoMessage.StdMessage(game, profileName, puzzleName, _fps)
game.addGroup(autoMessage.StdMessage.me.stdMessageGroup)
stub = infoGraphic56.MessageStub(game, True)
game.addGroup(stub.myGroup)
else: # make stub independent of message control
stub = infoGraphic56.MessageStub(game, False)
game.addGroup(stub.myGroup)
## if message: # make messages and stub through a message control menu
## message(game)
## game.addGroup(message.messageGroup)
## else: # make stub independent of message control
## stub = infoGraphic56.MessageStub(game, None)
## game.addGroup(stub.myGroup)
# in play control menu and dependants
simpleLabel.initImgLib()
formattedName = ""
if isUserMap:
for i in mapName:
if i != "_":
formattedName += i
else:
formattedName += " "
else:
formattedName = puzzleName
inPlayMenu = controlMenu56.InPlayMenu(game)
label = simpleLabel.Label(formattedName, PUZZLE_TITLE_LABEL_CENTER) # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
controlMenu56.InPlayMenu.inPlayGroup.add(label) # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
inPlayMenu.initDependants()
game.addGroup(controlMenu56.InPlayMenu.inPlayGroup)
# endMessage and professor dinostein
endMessage.initImgLib()
if isUserMap:
profileName = "_user"
puzzleName = mapName
eMsg = endMessage.EndMessage(game, mustSave, profileName, puzzleName, _fps)
game.addGroup(eMsg.endMessageGroup)
# run puzzle !!!!!
if isUserMap:
snapshot = game.runGame(_fps, mapName, imageFrom) # map name passed in so photo can be logged
else:
snapshot = game.runGame(_fps, None, imageFrom)
wipeData()
retry = False
if game.retry:
retry = True
#################
### tutorial vars
#################
wonTutorialStage = False
if game.wonTutorialStage:
if not retry:
wonTutorialStage = True
leaveRequest = False
if game.leaveRequest:
leaveRequest = True
#################
#################
#################
#################
### endgame vars
#################
wonLastStage = False
if game.wonLastStage and not retry:
wonLastStage = True
del(game)
if not nextTutorial: # normal case
return retry, snapshot, wonLastStage
else: # coming from a tutorial stage
if wonTutorialStage:
return snapshot, nextTutorial, nextPuzzle, leaveRequest # returns name of next tutorial (or _TERM) to program's main loop
else:
return snapshot, mapName, puzzleName, leaveRequest # returns name of the current to program's main loop
def wipeData():
""" wipes all Data stored in classes that run in the puzzle - call after level ends """
dinosInSpace.Game.wipe()
block56.wipe()
interface56.wipe()
scroller56.wipe()
star56.wipe()
dino56.wipe()
radar56.wipe()
static56.wipe()
tracer56.wipe()
infoGraphic56.wipe()
controlMenu56.wipe()
soundFx56.wipe()
snack.wipe()
endMessage.wipe()
autoMessage.wipe()
snackPacket.wipe()
#EndMessage.wipe()
# * dino parameter rules:
#
# - each list is a set of dinos
# - each list must be the order in which the dinos come out
# - the delay time b/w any dino must be a min of 10 (see static56.Spawn.setDirTimerKey for why)
# - give None as last parameter in first set if direction will not change
#bkgObjData = [
# ("star", 6, BERRY),
# ("scroll", 1, (5,5), "nPlanet2.png", "2X", (0,0), None),
# ("cow", 2, (3,3), "testCowUnlit.png", (60*4, 60*4), (0,0), 1.2, (1,0), (10,100) ),
# ("cow", 2, (3,3), "testCowUnlit.png", (60*4, 60*4), (0,0), 1.2, (.5,0), (100,30) ) # testCow2 and testCowUnlit
# ]
# snax = [
# ["TESTSNACK", (3,1)],
# ["TESTSNACK", (3,2)]
# ]
##
## elif mapName == "2paths":
##
## # mode, mustSave, grid, stars, planets
##
## mode = "puzzle"
## mustSave = 4
## gridSize = (8,6) # max 24 * 24 ; min 8 * 6
##
## # stars: class ("star"), step, color
## # scrolling backgrounds objects
## # (
## # class, -> "scroll", "float", "wrap", "cow"
## # minSpeed, -> 1
## # loc, -> (1,1)
## # imgFile, -> "fileName.png"
## # scale, -> (20,20) or "2X"
## # getAt, -> (0,0)
## # spinRange, -> 1.5
## # *floatSpeed, -> (.5,1)
## # **flashData -> (OFFTIME, VAR_RANGE)
## # )
##
## bkgObjData = [
## ("star", 6, BERRY),
## ]
##
## # user / static block
##
## userBlocks = [
## [ "arrow", "green", 1 ],
## [ "arrow", "yellow", 2 ],
## [ "warp", None, 2 ]
## ]
##
## goals = [ # green / blue / red / yellow / None (grey)
## [ (3, 5), "green" ],
## [ (8, 4), "yellow" ]
## ]
##
## spawns = [
## [ (2, 2), "south" ]
## ]
##
## sArrows = [
## [ (2, 6), None, "east" ]
## ]
##
## linkedArrows = [
## [ (4, 5), None, "hidden", "north", 1 ], # last param is switch
## [ (8, 6), None, "north", "west", 1],
## [ (8, 5), None, "west", "north", 1],
## [ (4, 6), None, "west", "hidden", 2]
## ]
##
## mines = [
## [ (4, 1) ],
## [ (5, 1) ],
## [ (6, 1) ],
## [ (8, 1) ],
## [ (6, 2) ],
## [ (8, 2) ],
## [ (8, 3) ],
## [ (5, 2) ],
## [ (6, 3) ],
## [ (7, 1) ],
## [ (7, 2) ],
## [ (7, 3) ],
## [ (7, 4) ]
## ]
##
## switches = [ # order assigns switch number
## [ (5, 5), 1 ],
## [ (3, 3), 2 ]
## ]
##
## # dinos
##
## dinoSets = [
## # [type, num, color, speed, spawn, delayStart, delayStep, direction]
## [ "delux", 2, "green", DSPEED, 1, 0, DSTEP ],
## [ "delux", 2, "yellow", DSPEED, 1, 60, DSTEP ]
## ]
##
## # message
##
## message = None
##
## elif mapName == "noArrows?":
##
##
## # mode, mustSave, grid, stars, planets
##
## mode = "puzzle"
## mustSave = 8
## gridSize = (10,6) # max 24 * 24 ; min 8 * 6
##
## # stars: class ("star"), step, color
## # scrolling backgrounds objects
## # (
## # class, -> "scroll", "float", "wrap", "cow"
## # minSpeed, -> 1
## # loc, -> (1,1)
## # imgFile, -> "fileName.png"
## # scale, -> (20,20) or "2X"
## # getAt, -> (0,0)
## # spinRange, -> 1.5
## # *floatSpeed, -> (.5,1)
## # **flashData -> (OFFTIME, VAR_RANGE)
## # )
##
## bkgObjData = [
## ("star", 6, BERRY),
## ]
##
## # user / static block
##
## userBlocks = [
## [ "warp", None, 10 ]
## ]
##
## goals = [ # green / blue / red / yellow / None (grey)
## [ (6, 5), None ],
## ]
##
## spawns = [
## [ (2, 3), "north" ],
## ]
##
## sArrows = [
## [ (1, 2), None, "east" ],
## [ (1, 5), None, "north" ],
## [ (2, 1), None, "west" ],
## [ (3, 1), None, "south" ],
## [ (3, 3), None, "south" ],
## [ (3, 4), None, "east" ],
## [ (4, 4), None, "south" ],
## [ (4, 5), None, "west" ],
## ]
##
## linkedArrows = [
## [ (6, 1), None, "hidden", "east", 1 ], # last param is switch
## [ (8, 5), None, "east", "hidden", 1],
## [ (7, 5), None, "east", "hidden", 2],
## [ (10, 3), None, "north", "west", 3],
## [ (9, 5), None, "east", "hidden", 4]
## ]
##
## mines = [
## [ (5, 1) ],
## [ (5, 2) ],
## [ (5, 3) ],
## [ (5, 4) ],
## [ (5, 5) ],
## [ (6, 4) ],
## [ (7, 4) ],
## [ (8, 4) ],
## [ (9, 4) ],
## [ (10, 4) ],
## [ (1, 6) ],
## [ (2, 6) ],
## [ (3, 6) ],
## [ (4, 6) ],
## [ (5, 6) ],
## [ (6, 6) ],
## [ (7, 6) ],
## [ (8, 6) ],
## [ (9, 6) ],
## [ (10, 6) ],
## [ (4, 3) ]
## ]
##
## switches = [ # [coords, switchNum]
## [ (6, 2), 1 ],
## [ (9, 1), 2 ],
## [ (10, 2), 3],
## [ (3, 5), 4 ]
## ]
##
## # dinos
##
## dinoSets = [
## # [type, num, color, speed, spawn, delayStart, delayStep]
## [ "delux", 1, "green", DSPEED, 1, 0, 30 ],
## [ "delux", 1, "red", DSPEED, 1, 30, 30 ],
## [ "delux", 1, "blue", DSPEED, 1, 60, 30 ],
## [ "delux", 1, "yellow", DSPEED, 1, 90, 30 ]
## ]
##
## # message
##
## message = None
##
##
## # -------------------------------------------------------------------------------------------------
#
# nyanData = [
# ("nyan", 2, gridSize, NYAN_FRAMES, "2X", (53*2-1,0), (4,0)),
# ("nyan", 2, gridSize, NYAN_FRAMES, "2X", (53*2-1,0), (4,0)),
# ("nyan", 2, gridSize, NYAN_FRAMES, "2X", (53*2-1,0), (4,0)),
# ("nyan", 2, gridSize, NYAN_FRAMES, "2X", (53*2-1,0), (4,0))
# ]
#
# # NYAN MODE /////////////////////// ?
# if modeSwitch.ModeSwitch.modes["NYAN"]:
# for nyancat in nyanData:
# bkgObjData.append(nyancat)
#
#NYAN_FRAMES = [
# "nyansmall1.png",
# "nyansmall2.png",
# "nyansmall3.png",
# "nyansmall4.png",
# "nyansmall5.png",
# "nyansmall6.png",
# "nyansmall7.png",
# "nyansmall8.png",
# "nyansmall9.png",
# "nyansmall10.png",
# "nyansmall11.png",
# "nyansmall12.png"
# ]
| |
from __future__ import division
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from debug_toolbar.panels import DebugPanel
try:
from line_profiler import LineProfiler, show_func
DJ_PROFILE_USE_LINE_PROFILER = True
except ImportError:
DJ_PROFILE_USE_LINE_PROFILER = False
from cStringIO import StringIO
import cProfile
from pstats import Stats
from colorsys import hsv_to_rgb
import os
class DjangoDebugToolbarStats(Stats):
__root = None
def get_root_func(self):
if self.__root is None:
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
if len(callers) == 0:
self.__root = func
break
return self.__root
class FunctionCall(object):
def __init__(self, statobj, func, depth=0, stats=None,
id=0, parent_ids=[], hsv=(0, 0.5, 1)):
self.statobj = statobj
self.func = func
if stats:
self.stats = stats
else:
self.stats = statobj.stats[func][:4]
self.depth = depth
self.id = id
self.parent_ids = parent_ids
self.hsv = hsv
self._line_stats_text = None
def parent_classes(self):
return self.parent_classes
def background(self):
r, g, b = hsv_to_rgb(*self.hsv)
return 'rgb(%f%%,%f%%,%f%%)' % (r * 100, g * 100, b * 100)
def func_std_string(self): # match what old profile produced
func_name = self.func
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
file_name, line_num, method = self.func
idx = file_name.find('/site-packages/')
if idx > -1:
file_name = file_name[(idx + 14):]
file_path, file_name = file_name.rsplit(os.sep, 1)
return mark_safe('<span class="path">{0}/</span><span class="file">{1}</span> in <span class="func">{3}</span>(<span class="lineno">{2}</span>)'.format(
file_path,
file_name,
line_num,
method,
))
def subfuncs(self):
i = 0
h, s, v = self.hsv
count = len(self.statobj.all_callees[self.func])
for func, stats in self.statobj.all_callees[self.func].iteritems():
i += 1
h1 = h + (i / count) / (self.depth + 1)
if stats[3] == 0:
s1 = 0
else:
s1 = s * (stats[3] / self.stats[3])
yield FunctionCall(self.statobj,
func,
self.depth + 1,
stats=stats,
id=str(self.id) + '_' + str(i),
parent_ids=self.parent_ids + [self.id],
hsv=(h1, s1, 1))
def count(self):
return self.stats[1]
def tottime(self):
return self.stats[2]
def cumtime(self):
cc, nc, tt, ct = self.stats
return self.stats[3]
def tottime_per_call(self):
cc, nc, tt, ct = self.stats
if nc == 0:
return 0
return tt / nc
def cumtime_per_call(self):
cc, nc, tt, ct = self.stats
if cc == 0:
return 0
return ct / cc
def indent(self):
return 16 * self.depth
def line_stats_text(self):
if self._line_stats_text is None and DJ_PROFILE_USE_LINE_PROFILER:
lstats = self.statobj.line_stats
if self.func in lstats.timings:
out = StringIO()
fn, lineno, name = self.func
show_func(fn, lineno, name, lstats.timings[self.func], lstats.unit, stream=out)
self._line_stats_text = out.getvalue()
else:
self._line_stats_text = False
return self._line_stats_text
class ProfilingDebugPanel(DebugPanel):
"""
Panel that displays the Django version.
"""
name = 'Profiling'
template = 'debug_toolbar/panels/profiling.html'
has_content = True
def nav_title(self):
return _('Profiling')
def url(self):
return ''
def title(self):
return _('Profiling')
def _unwrap_closure_and_profile(self, func):
if not hasattr(func, 'func_code'):
return
self.line_profiler.add_function(func)
if func.func_closure:
for cell in func.func_closure:
if hasattr(cell.cell_contents, 'func_code'):
self._unwrap_closure_and_profile(cell.cell_contents)
def process_view(self, request, view_func, view_args, view_kwargs):
__traceback_hide__ = True
self.profiler = cProfile.Profile()
args = (request,) + view_args
if DJ_PROFILE_USE_LINE_PROFILER:
self.line_profiler = LineProfiler()
self._unwrap_closure_and_profile(view_func)
self.line_profiler.enable_by_count()
out = self.profiler.runcall(view_func, *args, **view_kwargs)
self.line_profiler.disable_by_count()
else:
self.line_profiler = None
out = self.profiler.runcall(view_func, *args, **view_kwargs)
return out
def add_node(self, func_list, func, max_depth, cum_time=0.1):
func_list.append(func)
func.has_subfuncs = False
if func.depth < max_depth:
for subfunc in func.subfuncs():
if (subfunc.stats[3] >= cum_time or
(hasattr(self.stats, 'line_stats') and
(subfunc.func in self.stats.line_stats.timings))):
func.has_subfuncs = True
self.add_node(func_list, subfunc, max_depth, cum_time=cum_time)
def process_response(self, request, response):
__traceback_hide__ = True
if not hasattr(self, 'profiler'):
return None
self.profiler.create_stats()
self.stats = DjangoDebugToolbarStats(self.profiler)
if DJ_PROFILE_USE_LINE_PROFILER:
self.stats.line_stats = self.line_profiler.get_stats()
self.stats.calc_callees()
root = FunctionCall(self.stats, self.stats.get_root_func(), depth=0)
func_list = []
self.add_node(func_list, root, 10, root.stats[3] / 8)
self.record_stats({'func_list': func_list})
| |
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
y : ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id : array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = np.frombuffer(labels, np.float64)
data = np.frombuffer(data, actual_dtype)
indices = np.frombuffer(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = np.frombuffer(query, np.int64)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features : int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| |
#!/usr/bin/env python
import csv
import sys
import os
import vxargs
#from monitor import parser as parsermodule
#from monitor import common
#from automate import *
import csv
from glob import glob
import os
import time
def usage():
return """
fetch.py -- run a short bash script across many machines.
The list of nodes is taken to be all MLab nodes, unless
otherwise specified.
Most common parameters:
--cmd <cmdname>
This looks for a script named 'scripts/<cmdname>.sh'
and writes logs to 'logs/<cmdname>'.
You can also define a 'post' script to automatically
run on all the output logs from <cmdname>. If this
file 'scripts/<cmdname>-post.sh' exists, it is
executed with the log directory as its only argument.
--rerun <extension=content>
Rerun a command using the log files as the source of
the node list. This is helpful for rerunning
commands on just a few nodes based on previous runs.
status=255 matches all nodes with an error
status=0 matches all nodes with a success
Examples:
./fetch.py --cmd procs
./fetch.py --cmd procs --rerun status=255 --list
./fetch.py --cmd procs --rerun status=255
"""
def time_to_str(t):
return time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(t))
def csv_to_hash(r):
ret = {}
for line in r:
(k,v) = (line[0], line[1])
if k not in ret:
ret[k] = v
else:
# multiple values for the same key
if isinstance(ret[k], list):
ret[k].append(v)
else:
ret[k] = [ret[k], v]
return ret
def getcsv(file):
return csv_to_hash(csv.reader(open(file,'r')))
def get_hostlist_from_dir(dirname, which):
f = which.split("=")
if len(f) > 1:
suffix = f[0]
value = f[1]
else:
suffix = f[0]
value = None
ret = glob(dirname + "/*.%s" % suffix)
if value:
ret_list = []
for fname in ret:
v = open(fname, 'r').read().strip()
if value in v:
ret_list.append(fname)
ret = ret_list
ret_list = []
for fname in ret:
ret_list.append([os.path.basename(fname)[:-(len(suffix)+1)], ''])
return ret_list
def build_vx_args_external(shell_cmd):
args = shell_cmd.split()
return args
def vx_start_external(nodelist,outdir,cmd, timeout=0, threadcount=20):
args = build_vx_args_external(cmd)
vxargs.start(None, threadcount, nodelist, outdir, False, args, timeout)
def build_vx_args(shell_cmd):
ssh_options="-q -o UserKnownHostsFile=junkssh -o StrictHostKeyChecking=no"
cmd="""ssh -p806 %s root@[] """ % ssh_options
args = cmd.split()
args.append(shell_cmd)
return args
def vx_start(nodelist,outdir,cmd, timeout=0, threadcount=20):
args = build_vx_args(cmd)
vxargs.start(None, threadcount, nodelist, outdir, False, args, timeout)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser(usage=usage())
parser.set_defaults(outdir=None,
timeout=120,
simple=False,
threadcount=20,
external=False,
myopsfilter=None,
nodelist=None,
run=False,
list=False,
rerun=None,
template=None,
cmdline=None,
cmdfile=None,)
parser.add_option("", "--cmd", dest="cmdfile", metavar="<cmdname>",
help="This looks for a script named 'scripts/<cmdname>.sh' and writes logs to 'logs/<cmdname>'.")
parser.add_option("", "--cmdline", dest="cmdline", metavar="<cmdline>",
help="Uses string as explicit command to run on nodes. Writes logs to --outdir <outdir>.")
parser.add_option("", "--rerun", dest="rerun", metavar="ext[=val]",
help="Rerun fetch with the files indicated by the "+
"extension given to --rerun. For example, --rerun "+
"status=255, would rerun fetch on all files in "+
"--outdir that end with .status and have a value of 255")
parser.add_option("", "--outdir", dest="outdir", metavar="dirname",
help="Name of directory to place output. If unset, "+
"automatically set to 'logs/<cmd>/'")
parser.add_option("", "--nodelist", dest="nodelist", metavar="FILE",
help="Provide the input file for the list of objects")
parser.add_option("", "--list", dest="list", action="store_true",
help="List the nodes the command would use; do nothing else.")
parser.add_option("", "--timeout", dest="timeout", metavar="120",
help="Stop trying to execute after <timeout> seconds.")
parser.add_option("", "--threadcount", dest="threadcount", metavar="20",
help="Number of simultaneous threads.")
parser.add_option("", "--external", dest="external", action="store_true",
help="Run commands external to the server. The default is internal.")
parser.add_option("", "--template", dest="template",
help="Command template for external commands; substitutes [] with hostname.")
(config, args) = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
auto_outdir=None
auto_script=None
auto_script_post=None
if config.cmdfile:
if os.path.exists(config.cmdfile):
f = open(config.cmdfile,'r')
else:
auto_script = "scripts/" + config.cmdfile + ".sh"
auto_script_post = "scripts/" + config.cmdfile + "-post.sh"
f = open(auto_script, 'r')
cmd_str = f.read()
auto_outdir="logs/" + config.cmdfile.split(".")[0]
elif config.template and config.external:
cmd_str = config.template
elif config.cmdline:
cmd_str = config.cmdline
else:
parser.print_help()
sys.exit(1)
if config.outdir == None and auto_outdir is None:
outdir="default_outdir"
elif config.outdir == None and auto_outdir is not None:
outdir=auto_outdir
else:
outdir=config.outdir
if not os.path.exists(outdir):
os.system('mkdir -p %s' % outdir)
assert os.path.exists(outdir)
if config.nodelist is None and config.rerun is None:
os.system("./plcquery.py --action=checksession")
filename="/tmp/nodelist.txt"
cmd="./plcquery.py --action=get --type node --filter hostname=*.measurement-lab.org " + \
"--fields hostname > %s" % filename
os.system(cmd)
nodelist = vxargs.getListFromFile(open(filename,'r'))
elif config.nodelist is not None and \
os.path.exists(str(config.nodelist)) and os.path.isfile(config.nodelist):
nodelist = vxargs.getListFromFile(open(config.nodelist,'r'))
elif config.rerun is not None and os.path.isdir(outdir):
if config.rerun:
nodelist = get_hostlist_from_dir(outdir, config.rerun)
else:
nodelist = get_hostlist_from_dir(outdir, "out")
else:
# probably no such file.
raise Exception("Please specifiy a nodelist or --rerun directory" % config.nodelist)
if config.list:
for n in sorted(nodelist, cmp, lambda x: x[0][::-1]):
print n[0]
sys.exit(0)
if config.external or config.template is not None:
vx_start_external(nodelist, outdir, cmd_str, int(config.timeout), int(config.threadcount))
else:
vx_start(nodelist, outdir, cmd_str, int(config.timeout), int(config.threadcount))
if auto_script_post is not None and os.path.isfile(auto_script_post):
os.system("bash %s %s" % (auto_script_post, outdir))
| |
# coding=utf-8
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training setup."""
import json
import os.path
import shutil
import math
import numpy as np
import tensorflow as tf
from absl import flags
from scipy.stats import entropy
from sklearn.metrics import pairwise_distances
from tqdm import trange
from libml import data, utils
import sys
import time
FLAGS = flags.FLAGS
flags.DEFINE_string('train_dir', './MMA_exp/',
'Folder where to save training data.')
flags.DEFINE_float('lr', 0.0001, 'Learning rate.')
flags.DEFINE_integer('batch', 64, 'Batch size.')
flags.DEFINE_integer('train_kimg', 1 << 14, 'Training duration in kibi-samples.')
flags.DEFINE_integer('report_kimg', 64, 'Report summary period in kibi-samples.')
flags.DEFINE_integer('save_kimg', 64, 'Save checkpoint period in kibi-samples.')
flags.DEFINE_integer('grow_size', 250, 'Grow the number of labeled by grow_size.')
flags.DEFINE_string('grow_by', 'max-direct', 'Grow by this measure.')
flags.DEFINE_integer('keep_ckpt', 50, 'Number of checkpoints to keep.')
flags.DEFINE_string('eval_ckpt', '', 'Checkpoint to evaluate. If provided, do not do training, just do eval.')
class Model:
def __init__(self, train_dir, dataset, **kwargs):
self.train_dir = os.path.join(train_dir, self.experiment_name(**kwargs))
self.params = utils.EasyDict(kwargs)
self.dataset = dataset
self.session = None
self.tmp = utils.EasyDict(print_queue=[], cache=utils.EasyDict())
self.step = tf.train.get_or_create_global_step()
self.ops = self.model(**kwargs)
self.ops.update_step = tf.assign_add(self.step, FLAGS.batch)
self.add_summaries(**kwargs)
print(' Config '.center(80, '-'))
print('train_dir', self.train_dir)
print('%-32s %s' % ('Model', self.__class__.__name__))
print('%-32s %s' % ('Dataset', dataset.name))
for k, v in sorted(kwargs.items()):
print('%-32s %s' % (k, v))
print(' Model '.center(80, '-'))
to_print = [tuple(['%s' % x for x in (v.name, np.prod(v.shape), v.shape)]) for v in utils.model_vars(None)]
to_print.append(('Total', str(sum(int(x[1]) for x in to_print)), ''))
sizes = [max([len(x[i]) for x in to_print]) for i in range(3)]
fmt = '%%-%ds %%%ds %%%ds' % tuple(sizes)
for x in to_print[:-1]:
print(fmt % x)
print()
print(fmt % to_print[-1])
print('-' * 80)
self._create_initial_files()
self.work_unit = None
self.measurement = {}
@property
def arg_dir(self):
return os.path.join(self.train_dir, 'args')
@property
def checkpoint_dir(self):
return os.path.join(self.train_dir, 'tf')
def train_print(self, text):
self.tmp.print_queue.append(text)
def _create_initial_files(self):
for dir in (self.checkpoint_dir, self.arg_dir):
if not tf.gfile.IsDirectory(dir):
tf.gfile.MakeDirs(dir)
self.save_args()
def _reset_files(self):
shutil.rmtree(self.train_dir)
self._create_initial_files()
def save_args(self, **extra_params):
with tf.gfile.Open(os.path.join(self.arg_dir, 'args.json'), 'w') as f:
json.dump({**self.params, **extra_params}, f, sort_keys=True, indent=4)
@classmethod
def load(cls, train_dir):
with open(os.path.join(train_dir, 'args/args.json'), 'r') as f:
params = json.load(f)
instance = cls(train_dir=train_dir, **params)
instance.train_dir = train_dir
return instance
def experiment_name_helper(self, exclude, **kwargs):
args = []
for x, y in sorted(kwargs.items()):
if x in exclude:
continue
if y is None or x == 'nclass':
continue
if x in ['arch']:
args.append(str(y))
elif isinstance(y, bool):
args += [x] if y else []
else:
args.append(x + str(y))
return '_'.join([self.__class__.__name__] + args)
def experiment_name(self, **kwargs):
return self.experiment_name_helper(exclude=[], **kwargs)
def eval_mode(self, ckpt=None):
self.session = tf.Session(config=utils.get_config())
saver = tf.train.Saver()
if ckpt is None:
ckpt = utils.find_latest_checkpoint(self.checkpoint_dir)
else:
ckpt = os.path.abspath(ckpt)
saver.restore(self.session, ckpt)
self.tmp.step = self.session.run(self.step)
print('Eval model %s at global_step %d' % (self.__class__.__name__, self.tmp.step))
return self
def model(self, **kwargs):
raise NotImplementedError()
def add_summaries(self, **kwargs):
raise NotImplementedError()
class ClassifySemi(Model):
"""Semi-supervised classification."""
def __init__(self, train_dir, dataset, nclass, **kwargs):
self.nclass = nclass
self.max_labeled_size = dataset.images.shape[0]
Model.__init__(self, train_dir, dataset, nclass=nclass, **kwargs)
def train_step(self, train_session, data_labeled, data_unlabeled):
x, y = self.dataset.session.run([data_labeled, data_unlabeled])
self.tmp.step = train_session.run([self.ops.train_op, self.ops.update_step],
feed_dict={self.ops.x: x['image'],
self.ops.y: y['image'],
self.ops.label: x['label']})[1]
def train_for_contGrow(self, train_nimg, past_nimg, report_nimg,
grow_nimg, grow_size, max_labeled_size):
"""Function for training the model.
Args:
train_nimg: will train for train_nimg/batch iterations
past_nimg: has previously trained for train_nimg/batch iterations
report_nimg: report results every report_nimg samples
grow_nimg: grow every grow_nimg samples
grow_size: number of samples to query each time
max_labeled_size: maximum labelling budget
"""
if max_labeled_size == -1:
max_labeled_size = self.dataset.labeled_indices.size + self.dataset.unlabeled_indices.size
if grow_nimg > 0:
print('grow_kimg:', grow_nimg >> 10)
print('grow_by: ', FLAGS.grow_by)
print('grow_size:', grow_size)
else:
grow_nimg = train_nimg
print('Will not grow.')
print('----')
if FLAGS.eval_ckpt:
accurices = self.eval_checkpoint(FLAGS.eval_ckpt)
return
batch = FLAGS.batch
scaffold = tf.train.Scaffold(saver=tf.train.Saver(max_to_keep=FLAGS.keep_ckpt,
pad_step_number=10))
with tf.train.MonitoredTrainingSession(
scaffold=scaffold,
checkpoint_dir=self.checkpoint_dir,
config=utils.get_config(),
save_checkpoint_steps=FLAGS.save_kimg << 10,
save_summaries_steps=report_nimg - batch) as train_session:
self.session = train_session._tf_sess()
self.tmp.step = self.session.run(self.step)
need_update = True
while self.tmp.step < train_nimg:
if grow_nimg > 0 and (self.tmp.step - past_nimg) % grow_nimg == 0:
# Grow
with self.dataset.graph.as_default():
labeled_indices = utils.fixlen_to_idx(self.session.run(self.ops.label_index))
self.dataset.generate_labeled_and_unlabeled(list(labeled_indices))
# Get unlabeled data
unlabeled_data = tf.data.Dataset.from_tensor_slices(self.dataset.unlabeled_indices) \
.map(self.dataset.tf_get) \
.map(self.dataset.augment[1]) \
.batch(batch) \
.prefetch(16) \
.make_one_shot_iterator() \
.get_next() # not shuffled, not repeated
need_update |= self.grow_labeled(FLAGS.grow_by, grow_size,
max_labeled_size, unlabeled_data)
if need_update:
# If we need to update the labeled and unlabeled set to be used for training
need_update = False
labeled_indices = utils.fixlen_to_idx(self.session.run(self.ops.label_index))
self.dataset.generate_labeled_and_unlabeled(list(labeled_indices))
with self.dataset.graph.as_default():
train_labeled = tf.data.Dataset.from_tensor_slices(self.dataset.labeled_indices) \
.repeat() \
.shuffle(FLAGS.shuffle) \
.map(self.dataset.tf_get) \
.map(self.dataset.augment[0]) \
.batch(batch).prefetch(16)
train_labeled = train_labeled.make_one_shot_iterator().get_next()
train_unlabeled = tf.data.Dataset.from_tensor_slices(self.dataset.unlabeled_indices) \
.repeat() \
.shuffle(FLAGS.shuffle) \
.map(self.dataset.tf_get) \
.map(self.dataset.augment[1]) \
.batch(batch) \
.prefetch(16)
train_unlabeled = train_unlabeled.make_one_shot_iterator().get_next()
print('# of labeled/unlabeled samples to be used:',
self.dataset.labeled_indices.size,
self.dataset.unlabeled_indices.size)
# The actual training
loop = trange(self.tmp.step % report_nimg,
report_nimg,
batch,
leave=False,
unit='img',
unit_scale=batch,
desc='Epoch %d/%d' % (1 + (self.tmp.step // report_nimg),
train_nimg // report_nimg))
for _ in loop:
self.train_step(train_session, train_labeled, train_unlabeled)
while self.tmp.print_queue:
loop.write(self.tmp.print_queue.pop(0))
while self.tmp.print_queue:
print(self.tmp.print_queue.pop(0))
def grow_labeled(self, grow_by_, grow_size, max_labeled_size, unlabeled_data):
"""Grow the labeled set.
Args:
grow_by_: spcifies the AL method used to grow. It consists of 3 parts:
1). uncertainty measurement: max, std, entropy, diff2, w/
optionl suffix .aug denoting using 2 augmentations of samples
2). diversification method: direct, kmeanprop, id (info density)
3). for any diversification method, embd means calcualting
distance with embedding of the sample rather than the original
Examples include diff2.aug-direct, max-kmeanprop-embd, random
kmeanprop: cluster all unlabeled images, pick low confidence
samples from each clusters where #_picked_from_cluster_i
is propotional to size_of_cluster_i
id: cosine is usually the best and beta=1
grow_size: number of samples to query
max_labeled_size: maximum labelling budget
unlabeled_data: currently unlabeled data
Return:
Whether has grown with new labels or not.
"""
if grow_size == 0:
return False
def parse_grow_by(grow_by_):
"""Parse string grow_by_. For example, max.aug-kmeanprop-embd gives
grow_by = ['max', 'kmeanprop', 'embd'], grow_by_aug = True.
"""
grow_by = grow_by_.split('-')
grow_by += [''] * (3 - len(grow_by))
# get uncertainty measurement and aug
grow_by[0] = grow_by[0].split('.')[0]
assert grow_by[0] in ['random', 'max', 'std', 'entropy', 'diff2']
grow_by_aug = grow_by[0].endswith('.aug')
# check if the option is valid
if grow_by[0] != 'random':
assert grow_by[1] in ['direct', 'kmeanprop', 'id']
if grow_by[1] != 'direct':
assert grow_by[2] in ['', 'embd']
# std cannot be used with id
if grow_by[1] == 'id':
assert grow_by[0] != 'std'
return grow_by, grow_by_aug
grow_by, grow_by_aug = parse_grow_by(grow_by_)
n_labeled = self.dataset.labeled_indices.size
n_unlabeled = self.dataset.unlabeled_indices.size
if max_labeled_size > 0 and n_labeled >= max_labeled_size:
print('Currently have {} labeled. Max # ({}) reached. '
'Do not grow.'.format(n_labeled, max_labeled_size))
return False
if max_labeled_size > 0 and max_labeled_size - n_labeled < grow_size:
grow_size = max_labeled_size - n_labeled
print('Labeling budget is adjusted to {}'.format(grow_size))
if n_unlabeled <= grow_size:
new_labeled_indices = self.dataset.unlabeled_indices
print('Not enough unlabeled samples left, will use all those left.')
elif grow_by[0] == 'random': # randomly select
new_labeled_indices = np.random.choice(self.dataset.unlabeled_indices,
grow_size,
replace=False)
else:
def diff2(p):
"""Difference between the top 2 (highest - 2nd highest)."""
psorted = np.sort(p)
return psorted[-1] - psorted[-2]
measure2func = {'max': lambda p: np.max(p),
'std': lambda p: np.std(p),
'entropy': lambda p: -entropy(p), # negation, smaller the better
'diff2': lambda p: diff2(p),
}
unlabeled_images = self.dataset.images[self.dataset.unlabeled_indices]
# Get prediction and confidence
if not grow_by_aug:
# If not using augmentation, get prediction of the original samples
predictions = np.concatenate(
[self.session.run(self.ops.classify_op,
feed_dict={self.ops.x: unlabeled_images[x:x + FLAGS.batch]})
for x in range(0, n_unlabeled, FLAGS.batch)],
axis=0)
else:
# If using augmentation, get predictions of two augmentations of each sample
predictions0, predictions1 = [], []
# Get predictions batch by batch
for i in range(int(math.ceil(n_unlabeled / FLAGS.batch))):
unlabeled_images_aug = self.dataset.session.run(unlabeled_data)
unlabeled_images_aug = unlabeled_images_aug['image']
unlabeled_images0 = unlabeled_images_aug[:, 0]
unlabeled_images1 = unlabeled_images_aug[:, 1]
# Predict
predictions0.append(self.session.run(self.ops.classify_op,
feed_dict={self.ops.x: unlabeled_images0}))
predictions1.append(self.session.run(self.ops.classify_op,
feed_dict={self.ops.x: unlabeled_images1}))
# Concatenate list of np.array into one np.array
predictions0 = np.concatenate(predictions0, axis=0)
predictions1 = np.concatenate(predictions1, axis=0)
# Average the two predictions
predictions = (predictions0 + predictions1) / 2.0
# Measure the confidence of each sample
confidences = np.array([measure2func[grow_by[0]](p) for p in predictions])
# If "direct", take directly the least confident
if grow_by[1] == 'direct':
less_confident_idx = np.argpartition(confidences, grow_size)[:grow_size]
new_labeled_indices = self.dataset.unlabeled_indices[less_confident_idx]
# For kmeanprop and id, cluster/measure similarity of the whole unlabeled set
elif grow_by[1] in ['kmeanprop', 'id']:
if grow_by[2] == 'embd': # get embedding of data
unlabeled_images = np.concatenate([self.session.run(self.ops.embedding_op,
feed_dict={self.ops.x: unlabeled_images[x:x + FLAGS.batch]})
for x in range(0, unlabeled_images.shape[0], FLAGS.batch)],
axis=0)
unlabeled_images = unlabeled_images.reshape(unlabeled_images.shape[0], -1)
if grow_by[1] == 'kmeanprop':
# Perform k-means with 20 clusters and pick from each cluster those w/
# lowest confidence.
selected_idx = utils.get_low_confidence_from_each_clusters(unlabeled_images,
20,
grow_size,
confidences)
new_labeled_indices = self.dataset.unlabeled_indices[selected_idx]
elif grow_by[1] == 'id':
# Compute pairwise distance
avg_dists = pairwise_distances(unlabeled_images, metric='cosine').mean(axis=1)
# Compute uncertainty measurement and get final measurement
if grow_by[0] in ['max', 'diff2']:
info_measure = (1 - confidences) * avg_dists
elif grow_by[0] == 'entropy':
info_measure = -confidences * avg_dists
else:
raise ValueError
selected_idx = np.argpartition(info_measure, -grow_size)[-grow_size:]
new_labeled_indices = self.dataset.unlabeled_indices[selected_idx]
# update labeled_indices
combined_labeled_indices = list(self.dataset.labeled_indices) + list(new_labeled_indices)
self.dataset.generate_labeled_and_unlabeled(combined_labeled_indices)
self.session.run(self.ops.update_label_index,
feed_dict={self.ops.label_index_input: \
utils.idx_to_fixlen(self.dataset.labeled_indices,
self.dataset.ntrain)})
print('Now have #labeled/unlabeled: {} {}'.format(grow_by_,
self.dataset.labeled_indices.size,
self.dataset.unlabeled_indices.size))
return True # labeled data added
def eval_checkpoint(self, ckpt=None):
self.eval_mode(ckpt)
accuracies = self.eval_stats()
print('kimg %-5d accuracy labeled/unlabeled/test %.2f %.2f %.2f' % tuple([self.tmp.step >> 10] + accuracies))
with tf.gfile.Open(ckpt + '_res.json', 'w') as f:
output = {'ckpt': self.tmp.step,
'labeled': accuracies[0],
'unlabeled': accuracies[1],
'test': accuracies[3]}
json.dump(f, output)
return accuracies
def eval_stats(self, batch=None, feed_extra=None, classify_op=None):
def collect_samples(data):
data_it = data.batch(1).prefetch(16).make_one_shot_iterator().get_next()
images, labels = [], []
while 1:
try:
v = self.dataset.session.run(data_it)
except tf.errors.OutOfRangeError:
break
images.append(v['image'])
labels.append(v['label'])
images = np.concatenate(images, axis=0)
labels = np.concatenate(labels, axis=0)
return images, labels
if 'test' not in self.tmp.cache:
with self.dataset.graph.as_default():
self.tmp.cache.test = collect_samples(self.dataset.test)
self.tmp.cache.train_labeled = collect_samples(
tf.data.Dataset.from_tensor_slices(self.dataset.labeled_indices).map(self.dataset.tf_get)
)
if self.dataset.unlabeled_indices.size > 0:
all_unlabeled_indices = self.dataset.unlabeled_indices
if self.dataset.no_label_indices is not None:
all_unlabeled_indices = np.concatenate((all_unlabeled_indices, self.dataset.no_label_indices))
self.tmp.cache.train_unlabeled = collect_samples(
tf.data.Dataset.from_tensor_slices(all_unlabeled_indices).map(self.dataset.tf_get)
)
batch = batch or FLAGS.batch
classify_op = self.ops.classify_op if classify_op is None else classify_op
accuracies = []
for subset in ['train_labeled', 'train_unlabeled', 'test']:
if subset not in self.tmp.cache:
accuracies.append(-1)
continue
images, labels = self.tmp.cache[subset]
predicted = np.concatenate([
self.session.run(classify_op, feed_dict={
self.ops.x: images[x:x + batch], **(feed_extra or {})})
for x in range(0, images.shape[0], batch)
], axis=0)
accuracies.append((predicted.argmax(1) == labels).mean() * 100)
self.train_print('kimg %-5d accuracy labeled/unlabeled/test %.2f %.2f %.2f' % tuple([self.tmp.step >> 10] + accuracies))
return np.array(accuracies, 'f')
def add_summaries(self, feed_extra=None, **kwargs):
del kwargs
def gen_stats():
return self.eval_stats(feed_extra=feed_extra)
accuracies = tf.py_func(gen_stats, [], tf.float32)
tf.summary.scalar('accuracy/train_labeled', accuracies[0])
tf.summary.scalar('accuracy/train_unlabled', accuracies[1])
tf.summary.scalar('accuracy', accuracies[2])
class ClassifyFully(ClassifySemi):
"""Fully-supervised classification."""
def train_step(self, train_session, data_labeled, data_unlabeled):
del data_unlabeled
x = self.dataset.session.run(data_labeled)
self.tmp.step = train_session.run([self.ops.train_op, self.ops.update_step],
feed_dict={self.ops.x: x['image'],
self.ops.label: x['label']})[1]
def tune(self, train_nimg):
batch = FLAGS.batch
with self.graph.as_default():
train_labeled = tf.data.Dataset.from_tensor_slices(self.dataset.labeled_indices).map(self.dataset.tf_get).batch(batch).prefetch(16)
train_labeled = train_labeled.make_one_shot_iterator().get_next()
for _ in trange(0, train_nimg, batch, leave=False, unit='img', unit_scale=batch, desc='Tuning'):
x = self.dataset.session.run([train_labeled])
self.session.run([self.ops.tune_op], feed_dict={self.ops.x: x['image'],
self.ops.label: x['label']})
| |
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
from nova import test
from nova.tests.functional import integrated_helpers
class NoMatch(test.TestingException):
pass
class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
ctype = 'json'
all_extensions = False
extension_name = None
sample_dir = None
request_api_version = None
_use_common_server_api_samples = False
def _pretty_data(self, data):
data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
indent=4)
return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
def _objectify(self, data):
if not data:
return {}
# NOTE(vish): allow non-quoted replacements to survive json
data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
return jsonutils.loads(data)
@classmethod
def _get_sample_path(cls, name, dirname, suffix='', api_version=None):
parts = [dirname]
parts.append('api_samples')
if cls.all_extensions:
parts.append('all_extensions')
# Note(gmann): if _use_common_server_api_samples is set to True
# then common server sample files present in 'servers' directory
# will be used.
elif cls._use_common_server_api_samples:
parts.append('servers')
elif cls.sample_dir:
parts.append(cls.sample_dir)
elif cls.extension_name:
alias = importutils.import_class(cls.extension_name).alias
parts.append(alias)
parts.append(name + "." + cls.ctype + suffix)
return os.path.join(*parts)
@classmethod
def _get_sample(cls, name, api_version):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname, "../../../doc"))
return cls._get_sample_path(name, dirname, api_version=api_version)
@classmethod
def _get_template(cls, name, api_version):
dirname = os.path.dirname(os.path.abspath(__file__))
return cls._get_sample_path(name, dirname, suffix='.tpl',
api_version=api_version)
def _read_template(self, name):
template = self._get_template(name, self.request_api_version)
with open(template) as inf:
return inf.read().strip()
def _write_template(self, name, data):
with open(self._get_template(name,
self.request_api_version), 'w') as outf:
outf.write(data)
def _write_sample(self, name, data):
with open(self._get_sample(
name, self.request_api_version), 'w') as outf:
outf.write(data)
def _compare_result(self, subs, expected, result, result_str):
matched_value = None
if isinstance(expected, dict):
if not isinstance(result, dict):
raise NoMatch('%(result_str)s: %(result)s is not a dict.'
% {'result_str': result_str, 'result': result})
ex_keys = sorted(expected.keys())
res_keys = sorted(result.keys())
if ex_keys != res_keys:
ex_delta = []
res_delta = []
for key in ex_keys:
if key not in res_keys:
ex_delta.append(key)
for key in res_keys:
if key not in ex_keys:
res_delta.append(key)
raise NoMatch(
'Dictionary key mismatch:\n'
'Extra key(s) in template:\n%(ex_delta)s\n'
'Extra key(s) in %(result_str)s:\n%(res_delta)s\n' %
{'ex_delta': ex_delta, 'result_str': result_str,
'res_delta': res_delta})
for key in ex_keys:
res = self._compare_result(subs, expected[key], result[key],
result_str)
matched_value = res or matched_value
elif isinstance(expected, list):
if not isinstance(result, list):
raise NoMatch(
'%(result_str)s: %(result)s is not a list.' %
{'result_str': result_str, 'result': result})
expected = expected[:]
extra = []
for res_obj in result:
for i, ex_obj in enumerate(expected):
try:
matched_value = self._compare_result(subs, ex_obj,
res_obj,
result_str)
del expected[i]
break
except NoMatch:
pass
else:
extra.append(res_obj)
error = []
if expected:
error.append('Extra list items in template:')
error.extend([repr(o) for o in expected])
if extra:
error.append('Extra list items in %(result_str)s:' %
{'result_str': result_str})
error.extend([repr(o) for o in extra])
if error:
raise NoMatch('\n'.join(error))
elif isinstance(expected, six.string_types) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
expected = expected.replace(char, '\\%s' % char)
# NOTE(vish): special handling of subs that are not quoted. We are
# expecting an int but we had to pass in a string
# so the json would parse properly.
if expected.startswith("%(int:"):
result = str(result)
expected = expected.replace('int:', '')
expected = expected % subs
expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
raise NoMatch(
'Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: %(result)s' %
{'expected': expected, 'result_str': result_str,
'result': result})
try:
matched_value = match.group('id')
except IndexError:
if match.groups():
matched_value = match.groups()[0]
else:
if isinstance(expected, six.string_types):
# NOTE(danms): Ignore whitespace in this comparison
expected = expected.strip()
if isinstance(result, six.string_types):
result = result.strip()
if expected != result:
# NOTE(tdurakov):this attempt to parse string as JSON
# is needed for correct comparison of hypervisor.cpu_info,
# which is stringified JSON object
#
# TODO(tdurakov): remove this check as soon as
# hypervisor.cpu_info become common JSON object in REST API.
try:
expected = self._objectify(expected)
result = self._objectify(result)
return self._compare_result(subs, expected, result,
result_str)
except ValueError:
pass
raise NoMatch(
'Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: '
'%(result)s' % {'expected': expected,
'result_str': result_str,
'result': result})
return matched_value
def generalize_subs(self, subs, vanilla_regexes):
"""Give the test a chance to modify subs after the server response
was verified, and before the on-disk doc/api_samples file is checked.
This may be needed by some tests to convert exact matches expected
from the server into pattern matches to verify what is in the
sample file.
If there are no changes to be made, subs is returned unharmed.
"""
return subs
def _verify_response(self, name, subs, response, exp_code):
self.assertEqual(response.status_code, exp_code)
response_data = response.content
response_data = self._pretty_data(response_data)
if not os.path.exists(self._get_template(name,
self.request_api_version)):
self._write_template(name, response_data)
template_data = response_data
else:
template_data = self._read_template(name)
if (self.generate_samples and
not os.path.exists(self._get_sample(
name, self.request_api_version))):
self._write_sample(name, response_data)
sample_data = response_data
else:
with file(self._get_sample(name,
self.request_api_version)) as sample:
sample_data = sample.read()
try:
template_data = self._objectify(template_data)
response_data = self._objectify(response_data)
response_result = self._compare_result(subs, template_data,
response_data, "Response")
# NOTE(danms): replace some of the subs with patterns for the
# doc/api_samples check, which won't have things like the
# correct compute host name. Also let the test do some of its
# own generalization, if necessary
vanilla_regexes = self._get_regexes()
subs['compute_host'] = vanilla_regexes['host_name']
subs['id'] = vanilla_regexes['id']
subs = self.generalize_subs(subs, vanilla_regexes)
sample_data = self._objectify(sample_data)
self._compare_result(subs, template_data, sample_data, "Sample")
return response_result
except NoMatch:
raise
def _get_host(self):
return 'http://openstack.example.com'
def _get_glance_host(self):
return 'http://glance.openstack.example.com'
def _get_regexes(self):
if self.ctype == 'json':
text = r'(\\"|[^"])*'
else:
text = r'[^<]*'
isotime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}Z'
strtime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}\.\d{6}'
xmltime_re = ('\d{4}-[0,1]\d-[0-3]\d '
'\d{2}:\d{2}:\d{2}'
'(\.\d{6})?(\+00:00)?')
# NOTE(claudiub): the x509 keypairs are different from the
# ssh keypairs. For example, the x509 fingerprint has 40 bytes.
return {
'isotime': isotime_re,
'strtime': strtime_re,
'strtime_or_none': r'None|%s' % strtime_re,
'xmltime': xmltime_re,
'password': '[0-9a-zA-Z]{1,12}',
'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12})',
'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}',
'reservation_id': 'r-[0-9a-zA-Z]{8}',
'private_key': '(-----BEGIN RSA PRIVATE KEY-----|)'
'[a-zA-Z0-9\n/+=]*'
'(-----END RSA PRIVATE KEY-----|)',
'public_key': '(ssh-rsa|-----BEGIN CERTIFICATE-----)'
'[ a-zA-Z0-9\n/+=]*'
'(Generated-by-Nova|-----END CERTIFICATE-----)',
'fingerprint': '(([0-9a-f]{2}:){19}|([0-9a-f]{2}:){15})'
'[0-9a-f]{2}',
'keypair_type': 'ssh|x509',
'host': self._get_host(),
'host_name': '[0-9a-z]{32}',
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
'int': '[0-9]+',
}
def _get_response(self, url, method, body=None, strip_version=False,
api_version=None):
headers = {}
headers['Content-Type'] = 'application/' + self.ctype
headers['Accept'] = 'application/' + self.ctype
if api_version:
headers['X-OpenStack-Nova-API-Version'] = api_version
return self.api.api_request(url, body=body, method=method,
headers=headers, strip_version=strip_version)
def _do_get(self, url, strip_version=False, api_version=None):
return self._get_response(url, 'GET', strip_version=strip_version,
api_version=api_version)
def _do_post(self, url, name, subs, method='POST', api_version=None):
body = self._read_template(name) % subs
sample = self._get_sample(name, self.request_api_version)
if self.generate_samples and not os.path.exists(sample):
self._write_sample(name, body)
return self._get_response(url, method, body, api_version=api_version)
def _do_put(self, url, name, subs, api_version=None):
return self._do_post(url, name, subs, method='PUT',
api_version=api_version)
def _do_delete(self, url, api_version=None):
return self._get_response(url, 'DELETE', api_version=api_version)
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the filesystem backend store"""
import errno
import hashlib
import json
import os
import stat
from unittest import mock
import uuid
import fixtures
from oslo_utils.secretutils import md5
from oslo_utils import units
import six
from six.moves import builtins
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance_store._drivers import filesystem
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities
class TestStore(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):
def setUp(self):
"""Establish a clean test environment."""
super(TestStore, self).setUp()
self.store = filesystem.Store(self.conf)
self.config(filesystem_store_datadir=self.test_dir,
filesystem_store_chunk_size=10,
stores=['glance.store.filesystem.Store'],
group="glance_store")
self.store.configure()
self.register_store_schemes(self.store, 'file')
self.hash_algo = 'sha256'
def _create_metadata_json_file(self, metadata):
expected_image_id = str(uuid.uuid4())
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
with open(jsonfilename, 'w') as fptr:
json.dump(metadata, fptr)
def _store_image(self, in_metadata):
expected_image_id = str(uuid.uuid4())
expected_file_size = 10
expected_file_contents = b"*" * expected_file_size
image_file = six.BytesIO(expected_file_contents)
self.store.FILESYSTEM_STORE_METADATA = in_metadata
return self.store.add(expected_image_id, image_file,
expected_file_size, self.hash_algo)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks."""
# First add an image...
image_id = str(uuid.uuid4())
file_contents = b"chunk00000remainder"
image_file = six.BytesIO(file_contents)
loc, size, checksum, multihash, _ = self.store.add(
image_id, image_file, len(file_contents), self.hash_algo)
# Now read it back...
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
(image_file, image_size) = self.store.get(loc)
expected_data = b"chunk00000remainder"
expected_num_chunks = 2
data = b""
num_chunks = 0
for chunk in image_file:
num_chunks += 1
data += chunk
self.assertEqual(expected_data, data)
self.assertEqual(expected_num_chunks, num_chunks)
def test_get_random_access(self):
"""Test a "normal" retrieval of an image in chunks."""
# First add an image...
image_id = str(uuid.uuid4())
file_contents = b"chunk00000remainder"
image_file = six.BytesIO(file_contents)
loc, size, checksum, multihash, _ = self.store.add(
image_id, image_file, len(file_contents), self.hash_algo)
# Now read it back...
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
data = b""
for offset in range(len(file_contents)):
(image_file, image_size) = self.store.get(loc,
offset=offset,
chunk_size=1)
for chunk in image_file:
data += chunk
self.assertEqual(file_contents, data)
data = b""
chunk_size = 5
(image_file, image_size) = self.store.get(loc,
offset=chunk_size,
chunk_size=chunk_size)
for chunk in image_file:
data += chunk
self.assertEqual(b'00000', data)
self.assertEqual(chunk_size, image_size)
def test_get_non_existing(self):
"""
Test that trying to retrieve a file that doesn't exist
raises an error
"""
loc = location.get_location_from_uri(
"file:///%s/non-existing" % self.test_dir, conf=self.conf)
self.assertRaises(exceptions.NotFound,
self.store.get,
loc)
def _do_test_add(self, enable_thin_provisoning):
"""Test that we can add an image via the filesystem backend."""
self.config(filesystem_store_chunk_size=units.Ki,
filesystem_thin_provisioning=enable_thin_provisoning,
group='glance_store')
self.store.configure()
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (self.test_dir,
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
loc, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
uri = "file:///%s/%s" % (self.test_dir, expected_image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = b""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_thin_provisioning_is_disabled_by_default(self):
self.assertEqual(self.store.thin_provisioning, False)
def test_add_with_thick_provisioning(self):
self._do_test_add(enable_thin_provisoning=False)
def test_add_with_thin_provisioning(self):
self._do_test_add(enable_thin_provisoning=True)
def test_add_thick_provisioning_with_holes_in_file(self):
"""
Tests that a file which contains null bytes chunks is fully
written with a thick provisioning configuration.
"""
chunk_size = units.Ki # 1K
content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size
self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, False)
def test_add_thin_provisioning_with_holes_in_file(self):
"""
Tests that a file which contains null bytes chunks is sparsified
with a thin provisioning configuration.
"""
chunk_size = units.Ki # 1K
content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size
self._do_test_thin_provisioning(content, 3 * chunk_size, 1, 2, True)
def test_add_thick_provisioning_without_holes_in_file(self):
"""
Tests that a file which not contain null bytes chunks is fully
written with a thick provisioning configuration.
"""
chunk_size = units.Ki # 1K
content = b"*" * 3 * chunk_size
self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, False)
def test_add_thin_provisioning_without_holes_in_file(self):
"""
Tests that a file which not contain null bytes chunks is fully
written with a thin provisioning configuration.
"""
chunk_size = units.Ki # 1K
content = b"*" * 3 * chunk_size
self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, True)
def test_add_thick_provisioning_with_partial_holes_in_file(self):
"""
Tests that a file which contains null bytes not aligned with
chunk size is fully written with a thick provisioning configuration.
"""
chunk_size = units.Ki # 1K
my_chunk = int(chunk_size * 1.5)
content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk
self._do_test_thin_provisioning(content, 3 * my_chunk, 0, 5, False)
def test_add_thin_provisioning_with_partial_holes_in_file(self):
"""
Tests that a file which contains null bytes not aligned with
chunk size is sparsified with a thin provisioning configuration.
"""
chunk_size = units.Ki # 1K
my_chunk = int(chunk_size * 1.5)
content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk
self._do_test_thin_provisioning(content, 3 * my_chunk, 1, 4, True)
def _do_test_thin_provisioning(self, content, size, truncate, write, thin):
self.config(filesystem_store_chunk_size=units.Ki,
filesystem_thin_provisioning=thin,
group='glance_store')
self.store.configure()
image_file = six.BytesIO(content)
image_id = str(uuid.uuid4())
with mock.patch.object(builtins, 'open') as popen:
self.store.add(image_id, image_file, size, self.hash_algo)
write_count = popen.return_value.__enter__().write.call_count
truncate_count = popen.return_value.__enter__().truncate.call_count
self.assertEqual(write_count, write)
self.assertEqual(truncate_count, truncate)
def test_add_with_verifier(self):
"""Test that 'verifier.update' is called when verifier is provided."""
verifier = mock.MagicMock(name='mock_verifier')
self.config(filesystem_store_chunk_size=units.Ki,
group='glance_store')
self.store.configure()
image_id = str(uuid.uuid4())
file_size = units.Ki # 1K
file_contents = b"*" * file_size
image_file = six.BytesIO(file_contents)
self.store.add(image_id, image_file, file_size, self.hash_algo,
verifier=verifier)
verifier.update.assert_called_with(file_contents)
def test_add_check_metadata_with_invalid_mountpoint_location(self):
in_metadata = [{'id': 'abcdefg',
'mountpoint': '/xyz/images'}]
location, size, checksum, multihash, metadata = self._store_image(
in_metadata)
self.assertEqual({}, metadata)
def test_add_check_metadata_list_with_invalid_mountpoint_locations(self):
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
location, size, checksum, multihash, metadata = self._store_image(
in_metadata)
self.assertEqual({}, metadata)
def test_add_check_metadata_list_with_valid_mountpoint_locations(self):
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/tmp'},
{'id': 'xyz1234', 'mountpoint': '/xyz'}]
location, size, checksum, multihash, metadata = self._store_image(
in_metadata)
self.assertEqual(in_metadata[0], metadata)
def test_add_check_metadata_bad_nosuch_file(self):
expected_image_id = str(uuid.uuid4())
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
expected_file_size = 10
expected_file_contents = b"*" * expected_file_size
image_file = six.BytesIO(expected_file_contents)
location, size, checksum, multihash, metadata = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(metadata, {})
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
image_file = six.BytesIO(file_contents)
location, size, checksum, multihash, _ = self.store.add(
image_id, image_file, file_size, self.hash_algo)
image_file = six.BytesIO(b"nevergonnamakeit")
self.assertRaises(exceptions.Duplicate,
self.store.add,
image_id, image_file, 0, self.hash_algo)
def _do_test_add_write_failure(self, errno, exception):
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
path = os.path.join(self.test_dir, image_id)
image_file = six.BytesIO(file_contents)
with mock.patch.object(builtins, 'open') as popen:
e = IOError()
e.errno = errno
popen.side_effect = e
self.assertRaises(exception,
self.store.add,
image_id, image_file, 0, self.hash_algo)
self.assertFalse(os.path.exists(path))
def test_add_storage_full(self):
"""
Tests that adding an image without enough space on disk
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.ENOSPC, exceptions.StorageFull)
def test_add_file_too_big(self):
"""
Tests that adding an excessively large image file
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.EFBIG, exceptions.StorageFull)
def test_add_storage_write_denied(self):
"""
Tests that adding an image with insufficient filestore permissions
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.EACCES,
exceptions.StorageWriteDenied)
def test_add_other_failure(self):
"""
Tests that a non-space-related IOError does not raise a
StorageFull exceptions.
"""
self._do_test_add_write_failure(errno.ENOTDIR, IOError)
def test_add_cleanup_on_read_failure(self):
"""
Tests the partial image file is cleaned up after a read
failure.
"""
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
path = os.path.join(self.test_dir, image_id)
image_file = six.BytesIO(file_contents)
def fake_Error(size):
raise AttributeError()
with mock.patch.object(image_file, 'read') as mock_read:
mock_read.side_effect = fake_Error
self.assertRaises(AttributeError,
self.store.add,
image_id, image_file, 0, self.hash_algo)
self.assertFalse(os.path.exists(path))
def test_delete(self):
"""
Test we can delete an existing image in the filesystem store
"""
# First add an image
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
image_file = six.BytesIO(file_contents)
loc, size, checksum, multihash, _ = self.store.add(
image_id, image_file, file_size, self.hash_algo)
# Now check that we can delete it
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a file that doesn't exist
raises an error
"""
loc = location.get_location_from_uri(
"file:///tmp/glance-tests/non-existing", conf=self.conf)
self.assertRaises(exceptions.NotFound,
self.store.delete,
loc)
def test_delete_forbidden(self):
"""
Tests that trying to delete a file without permissions
raises the correct error
"""
# First add an image
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
image_file = six.BytesIO(file_contents)
loc, size, checksum, multihash, _ = self.store.add(
image_id, image_file, file_size, self.hash_algo)
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
# Mock unlink to raise an OSError for lack of permissions
# and make sure we can't delete the image
with mock.patch.object(os, 'unlink') as unlink:
e = OSError()
e.errno = errno
unlink.side_effect = e
self.assertRaises(exceptions.Forbidden,
self.store.delete,
loc)
# Make sure the image didn't get deleted
self.store.get(loc)
def test_configure_add_with_multi_datadirs(self):
"""
Tests multiple filesystem specified by filesystem_store_datadirs
are parsed correctly.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.set_override('filesystem_store_datadir',
override=None,
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"],
group='glance_store')
self.store.configure_add()
expected_priority_map = {100: [store_map[0]], 200: [store_map[1]]}
expected_priority_list = [200, 100]
self.assertEqual(expected_priority_map, self.store.priority_data_map)
self.assertEqual(expected_priority_list, self.store.priority_list)
def test_configure_add_with_metadata_file_success(self):
metadata = {'id': 'asdf1234',
'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.store.configure_add()
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
def test_configure_add_check_metadata_list_of_dicts_success(self):
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'id': 'xyz1234', 'mountpoint': '/tmp/'}]
self._create_metadata_json_file(metadata)
self.store.configure_add()
self.assertEqual(metadata, self.store.FILESYSTEM_STORE_METADATA)
def test_configure_add_check_metadata_success_list_val_for_some_key(self):
metadata = {'akey': ['value1', 'value2'], 'id': 'asdf1234',
'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.store.configure_add()
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
def test_configure_add_check_metadata_bad_data(self):
metadata = {'akey': 10, 'id': 'asdf1234',
'mountpoint': '/tmp'} # only unicode is allowed
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_check_metadata_with_no_id_or_mountpoint(self):
metadata = {'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = {'id': 'asdfg1234'}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_check_metadata_id_or_mountpoint_is_not_string(self):
metadata = {'id': 10, 'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = {'id': 'asdf1234', 'mountpoint': 12345}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_check_metadata_list_with_no_id_or_mountpoint(self):
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = [{'id': 'abcdefg'},
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_add_check_metadata_list_id_or_mountpoint_is_not_string(self):
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'id': 1234, 'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = [{'id': 'abcdefg', 'mountpoint': 1234},
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_same_dir_multiple_times(self):
"""
Tests BadStoreConfiguration exception is raised if same directory
is specified multiple times in filesystem_store_datadirs.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.clear_override('filesystem_store_datadir',
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200",
store_map[0] + ":300"],
group='glance_store')
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_same_dir_multiple_times_same_priority(self):
"""
Tests BadStoreConfiguration exception is raised if same directory
is specified multiple times in filesystem_store_datadirs.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.set_override('filesystem_store_datadir',
override=None,
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200",
store_map[0] + ":100"],
group='glance_store')
try:
self.store.configure()
except exceptions.BadStoreConfiguration:
self.fail("configure() raised BadStoreConfiguration unexpectedly!")
# Test that we can add an image via the filesystem backend
filesystem.ChunkedFile.CHUNKSIZE = 1024
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store_map[1],
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
loc, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
loc = location.get_location_from_uri(expected_location,
conf=self.conf)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = b""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_add_with_multiple_dirs(self):
"""Test adding multiple filesystem directories."""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.set_override('filesystem_store_datadir',
override=None,
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"],
group='glance_store')
self.store.configure()
# Test that we can add an image via the filesystem backend
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store_map[1],
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
loc, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
loc = location.get_location_from_uri(expected_location,
conf=self.conf)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = b""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_add_with_multiple_dirs_storage_full(self):
"""
Test StorageFull exception is raised if no filesystem directory
is found that can store an image.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.set_override('filesystem_store_datadir',
override=None,
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"],
group='glance_store')
self.store.configure_add()
def fake_get_capacity_info(mount_point):
return 0
with mock.patch.object(self.store, '_get_capacity_info') as capacity:
capacity.return_value = 0
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
image_file = six.BytesIO(expected_file_contents)
self.assertRaises(exceptions.StorageFull,
self.store.add,
expected_image_id,
image_file,
expected_file_size,
self.hash_algo)
def test_configure_add_with_file_perm(self):
"""
Tests filesystem specified by filesystem_store_file_perm
are parsed correctly.
"""
store = self.useFixture(fixtures.TempDir()).path
self.conf.set_override('filesystem_store_datadir', store,
group='glance_store')
self.conf.set_override('filesystem_store_file_perm', 700, # -rwx------
group='glance_store')
self.store.configure_add()
self.assertEqual(self.store.datadir, store)
def test_configure_add_with_unaccessible_file_perm(self):
"""
Tests BadStoreConfiguration exception is raised if an invalid
file permission specified in filesystem_store_file_perm.
"""
store = self.useFixture(fixtures.TempDir()).path
self.conf.set_override('filesystem_store_datadir', store,
group='glance_store')
self.conf.set_override('filesystem_store_file_perm', 7, # -------rwx
group='glance_store')
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_add_with_file_perm_for_group_other_users_access(self):
"""
Test that we can add an image via the filesystem backend with a
required image file permission.
"""
store = self.useFixture(fixtures.TempDir()).path
self.conf.set_override('filesystem_store_datadir', store,
group='glance_store')
self.conf.set_override('filesystem_store_file_perm', 744, # -rwxr--r--
group='glance_store')
# -rwx------
os.chmod(store, 0o700)
self.assertEqual(0o700, stat.S_IMODE(os.stat(store)[stat.ST_MODE]))
self.store.configure_add()
filesystem.Store.WRITE_CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store,
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
location, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, location)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
# -rwx--x--x for store directory
self.assertEqual(0o711, stat.S_IMODE(os.stat(store)[stat.ST_MODE]))
# -rwxr--r-- for image file
mode = os.stat(expected_location[len('file:/'):])[stat.ST_MODE]
perm = int(str(self.conf.glance_store.filesystem_store_file_perm), 8)
self.assertEqual(perm, stat.S_IMODE(mode))
def test_add_with_file_perm_for_owner_users_access(self):
"""
Test that we can add an image via the filesystem backend with a
required image file permission.
"""
store = self.useFixture(fixtures.TempDir()).path
self.conf.set_override('filesystem_store_datadir', store,
group='glance_store')
self.conf.set_override('filesystem_store_file_perm', 600, # -rw-------
group='glance_store')
# -rwx------
os.chmod(store, 0o700)
self.assertEqual(0o700, stat.S_IMODE(os.stat(store)[stat.ST_MODE]))
self.store.configure_add()
filesystem.Store.WRITE_CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store,
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
location, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, location)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
# -rwx------ for store directory
self.assertEqual(0o700, stat.S_IMODE(os.stat(store)[stat.ST_MODE]))
# -rw------- for image file
mode = os.stat(expected_location[len('file:/'):])[stat.ST_MODE]
perm = int(str(self.conf.glance_store.filesystem_store_file_perm), 8)
self.assertEqual(perm, stat.S_IMODE(mode))
def test_configure_add_chunk_size(self):
# This definitely won't be the default
chunk_size = units.Gi
self.config(filesystem_store_chunk_size=chunk_size,
group="glance_store")
self.store.configure_add()
self.assertEqual(chunk_size, self.store.chunk_size)
self.assertEqual(chunk_size, self.store.READ_CHUNKSIZE)
self.assertEqual(chunk_size, self.store.WRITE_CHUNKSIZE)
| |
# coding=utf-8
"""
History management classes
"""
import json
import re
from collections import (
OrderedDict,
)
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Union,
overload,
)
import attr
from . import (
utils,
)
from .parsing import (
Statement,
)
@attr.s(auto_attribs=True, frozen=True)
class HistoryItem:
"""Class used to represent one command in the history list"""
_listformat = ' {:>4} {}'
_ex_listformat = ' {:>4}x {}'
# Used in JSON dictionaries
_statement_field = 'statement'
statement: Statement = attr.ib(default=None, validator=attr.validators.instance_of(Statement))
def __str__(self) -> str:
"""A convenient human readable representation of the history item"""
return self.statement.raw
@property
def raw(self) -> str:
"""The raw input from the user for this item.
Proxy property for ``self.statement.raw``
"""
return self.statement.raw
@property
def expanded(self) -> str:
"""Return the command as run which includes shortcuts and aliases resolved
plus any changes made in hooks
Proxy property for ``self.statement.expanded_command_line``
"""
return self.statement.expanded_command_line
def pr(self, idx: int, script: bool = False, expanded: bool = False, verbose: bool = False) -> str:
"""Represent this item in a pretty fashion suitable for printing.
If you pass verbose=True, script and expanded will be ignored
:param idx: The 1-based index of this item in the history list
:param script: True if formatting for a script (No item numbers)
:param expanded: True if expanded command line should be printed
:param verbose: True if expanded and raw should both appear when they are different
:return: pretty print string version of a HistoryItem
"""
if verbose:
raw = self.raw.rstrip()
expanded_command = self.expanded
ret_str = self._listformat.format(idx, raw)
if raw != expanded_command:
ret_str += '\n' + self._ex_listformat.format(idx, expanded_command)
else:
if expanded:
ret_str = self.expanded
else:
ret_str = self.raw.rstrip()
# In non-verbose mode, display raw multiline commands on 1 line
if self.statement.multiline_command:
# This is an approximation and not meant to be a perfect piecing together of lines.
# All newlines will be converted to spaces, including the ones in quoted strings that
# are considered literals. Also if the final line starts with a terminator, then the
# terminator will have an extra space before it in the 1 line version.
ret_str = ret_str.replace('\n', ' ')
# Display a numbered list if not writing to a script
if not script:
ret_str = self._listformat.format(idx, ret_str)
return ret_str
def to_dict(self) -> Dict[str, Any]:
"""Utility method to convert this HistoryItem into a dictionary for use in persistent JSON history files"""
return {HistoryItem._statement_field: self.statement.to_dict()}
@staticmethod
def from_dict(source_dict: Dict[str, Any]) -> 'HistoryItem':
"""
Utility method to restore a HistoryItem from a dictionary
:param source_dict: source data dictionary (generated using to_dict())
:return: HistoryItem object
:raises KeyError: if source_dict is missing required elements
"""
statement_dict = source_dict[HistoryItem._statement_field]
return HistoryItem(Statement.from_dict(statement_dict))
class History(List[HistoryItem]):
"""A list of :class:`~cmd2.history.HistoryItem` objects with additional methods
for searching and managing the list.
:class:`~cmd2.Cmd` instantiates this class into the :data:`~cmd2.Cmd.history`
attribute, and adds commands to it as a user enters them.
See :ref:`features/history:History` for information about the built-in command
which allows users to view, search, run, and save previously entered commands.
Developers interested in accessing previously entered commands can use this
class to gain access to the historical record.
"""
# Used in JSON dictionaries
_history_version = '1.0.0'
_history_version_field = 'history_version'
_history_items_field = 'history_items'
def __init__(self, seq: Iterable[HistoryItem] = ()) -> None:
super(History, self).__init__(seq)
self.session_start_index = 0
def start_session(self) -> None:
"""Start a new session, thereby setting the next index as the first index in the new session."""
self.session_start_index = len(self)
# noinspection PyMethodMayBeStatic
def _zero_based_index(self, onebased: Union[int, str]) -> int:
"""Convert a one-based index to a zero-based index."""
result = int(onebased)
if result > 0:
result -= 1
return result
@overload
def append(self, new: HistoryItem) -> None:
... # pragma: no cover
@overload
def append(self, new: Statement) -> None:
... # pragma: no cover
def append(self, new: Union[Statement, HistoryItem]) -> None:
"""Append a new statement to the end of the History list.
:param new: Statement object which will be composed into a HistoryItem
and added to the end of the list
"""
history_item = HistoryItem(new) if isinstance(new, Statement) else new
super(History, self).append(history_item)
def clear(self) -> None:
"""Remove all items from the History list."""
super().clear()
self.start_session()
def get(self, index: int) -> HistoryItem:
"""Get item from the History list using 1-based indexing.
:param index: optional item to get
:return: a single :class:`~cmd2.history.HistoryItem`
"""
if index == 0:
raise IndexError('The first command in history is command 1.')
elif index < 0:
return self[index]
else:
return self[index - 1]
# This regular expression parses input for the span() method. There are five parts:
#
# ^\s* matches any whitespace at the beginning of the
# input. This is here so you don't have to trim the input
#
# (?P<start>-?[1-9]{1}\d*)? create a capture group named 'start' which matches an
# optional minus sign, followed by exactly one non-zero
# digit, and as many other digits as you want. This group
# is optional so that we can match an input string like '..2'.
# This regex will match 1, -1, 10, -10, but not 0 or -0.
#
# (?P<separator>:|(\.{2,}))? create a capture group named 'separator' which matches either
# a colon or two periods.
#
# (?P<end>-?[1-9]{1}\d*)? create a capture group named 'end' which matches an
# optional minus sign, followed by exactly one non-zero
# digit, and as many other digits as you want. This group is
# optional so that we can match an input string like ':'
# or '5:'. This regex will match 1, -1, 10, -10, but not
# 0 or -0.
#
# \s*$ match any whitespace at the end of the input. This is here so
# you don't have to trim the input
#
spanpattern = re.compile(r'^\s*(?P<start>-?[1-9]\d*)?(?P<separator>:|(\.{2,}))(?P<end>-?[1-9]\d*)?\s*$')
def span(self, span: str, include_persisted: bool = False) -> 'OrderedDict[int, HistoryItem]':
"""Return a slice of the History list
:param span: string containing an index or a slice
:param include_persisted: if True, then retrieve full results including from persisted history
:return: a dictionary of history items keyed by their 1-based index in ascending order,
or an empty dictionary if no results were found
This method can accommodate input in any of these forms:
a..b or a:b
a.. or a:
..a or :a
-a.. or -a:
..-a or :-a
Different from native python indexing and slicing of arrays, this method
uses 1-based array numbering. Users who are not programmers can't grok
zero based numbering. Programmers can sometimes grok zero based numbering.
Which reminds me, there are only two hard problems in programming:
- naming
- cache invalidation
- off by one errors
"""
results = self.spanpattern.search(span)
if not results:
# our regex doesn't match the input, bail out
raise ValueError('History indices must be positive or negative integers, and may not be zero.')
start_token = results.group('start')
if start_token:
start = min(self._zero_based_index(start_token), len(self) - 1)
if start < 0:
start = max(0, len(self) + start)
else:
start = 0 if include_persisted else self.session_start_index
end_token = results.group('end')
if end_token:
end = min(int(end_token), len(self))
if end < 0:
end = max(0, len(self) + end + 1)
else:
end = len(self)
return self._build_result_dictionary(start, end)
def str_search(self, search: str, include_persisted: bool = False) -> 'OrderedDict[int, HistoryItem]':
"""Find history items which contain a given string
:param search: the string to search for
:param include_persisted: if True, then search full history including persisted history
:return: a dictionary of history items keyed by their 1-based index in ascending order,
or an empty dictionary if the string was not found
"""
def isin(history_item: HistoryItem) -> bool:
"""filter function for string search of history"""
sloppy = utils.norm_fold(search)
inraw = sloppy in utils.norm_fold(history_item.raw)
inexpanded = sloppy in utils.norm_fold(history_item.expanded)
return inraw or inexpanded
start = 0 if include_persisted else self.session_start_index
return self._build_result_dictionary(start, len(self), isin)
def regex_search(self, regex: str, include_persisted: bool = False) -> 'OrderedDict[int, HistoryItem]':
"""Find history items which match a given regular expression
:param regex: the regular expression to search for.
:param include_persisted: if True, then search full history including persisted history
:return: a dictionary of history items keyed by their 1-based index in ascending order,
or an empty dictionary if the regex was not matched
"""
regex = regex.strip()
if regex.startswith(r'/') and regex.endswith(r'/'):
regex = regex[1:-1]
finder = re.compile(regex, re.DOTALL | re.MULTILINE)
def isin(hi: HistoryItem) -> bool:
"""filter function for doing a regular expression search of history"""
return bool(finder.search(hi.raw) or finder.search(hi.expanded))
start = 0 if include_persisted else self.session_start_index
return self._build_result_dictionary(start, len(self), isin)
def truncate(self, max_length: int) -> None:
"""Truncate the length of the history, dropping the oldest items if necessary
:param max_length: the maximum length of the history, if negative, all history
items will be deleted
:return: nothing
"""
if max_length <= 0:
# remove all history
del self[:]
elif len(self) > max_length:
last_element = len(self) - max_length
del self[0:last_element]
def _build_result_dictionary(
self, start: int, end: int, filter_func: Optional[Callable[[HistoryItem], bool]] = None
) -> 'OrderedDict[int, HistoryItem]':
"""
Build history search results
:param start: start index to search from
:param end: end index to stop searching (exclusive)
"""
results: OrderedDict[int, HistoryItem] = OrderedDict()
for index in range(start, end):
if filter_func is None or filter_func(self[index]):
results[index + 1] = self[index]
return results
def to_json(self) -> str:
"""Utility method to convert this History into a JSON string for use in persistent history files"""
json_dict = {
History._history_version_field: History._history_version,
History._history_items_field: [hi.to_dict() for hi in self],
}
return json.dumps(json_dict, ensure_ascii=False, indent=2)
@staticmethod
def from_json(history_json: str) -> 'History':
"""
Utility method to restore History from a JSON string
:param history_json: history data as JSON string (generated using to_json())
:return: History object
:raises json.JSONDecodeError: if passed invalid JSON string
:raises KeyError: if JSON is missing required elements
:raises ValueError: if history version in JSON isn't supported
"""
json_dict = json.loads(history_json)
version = json_dict[History._history_version_field]
if version != History._history_version:
raise ValueError(
f"Unsupported history file version: {version}. This application uses version {History._history_version}."
)
items = json_dict[History._history_items_field]
history = History()
for hi_dict in items:
history.append(HistoryItem.from_dict(hi_dict))
return history
| |
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Regression test for dnslsview
Make sure you are running this against a database that can be destroyed.
DO NOT EVER RUN THIS TEST AGAINST A PRODUCTION DATABASE.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import os
import sys
import socket
import threading
import time
import getpass
import unittest
import roster_core
import roster_server
from roster_user_tools import roster_client_lib
USER_CONFIG = 'test_data/roster_user_tools.conf'
CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'
HOST = u'localhost'
USERNAME = u'sharrell'
PASSWORD = u'test'
KEYFILE=('test_data/dnsmgmt.key.pem')
CERTFILE=('test_data/dnsmgmt.cert.pem')
CREDFILE='%s/.dnscred' % os.getcwd()
EXEC='../roster-user-tools/scripts/dnslsview'
class options(object):
password = u'test'
username = u'sharrell'
server = None
ldap = u'ldaps://ldap.cs.university.edu:636'
credfile = CREDFILE
view_name = None
ip_address = None
target = u'machine1'
ttl = 64
class DaemonThread(threading.Thread):
def __init__(self, config_instance, port):
threading.Thread.__init__(self)
self.config_instance = config_instance
self.port = port
self.daemon_instance = None
def run(self):
self.daemon_instance = roster_server.Server(self.config_instance, KEYFILE,
CERTFILE)
self.daemon_instance.Serve(port=self.port)
class Testdnsmkview(unittest.TestCase):
def setUp(self):
def PickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, 0))
addr, port = s.getsockname()
s.close()
return port
self.config_instance = roster_core.Config(file_name=CONFIG_FILE)
db_instance = self.config_instance.GetDb()
db_instance.CreateRosterDatabase()
data = open(DATA_FILE, 'r').read()
db_instance.StartTransaction()
db_instance.cursor.execute(data)
db_instance.EndTransaction()
db_instance.close()
self.port = PickUnusedPort()
self.server_name = 'https://%s:%s' % (HOST, self.port)
self.daemon_thread = DaemonThread(self.config_instance, self.port)
self.daemon_thread.daemon = True
self.daemon_thread.start()
self.core_instance = roster_core.Core(USERNAME, self.config_instance)
self.password = 'test'
time.sleep(1)
roster_client_lib.GetCredentials(USERNAME, u'test', credfile=CREDFILE,
server_name=self.server_name)
def tearDown(self):
if( os.path.exists(CREDFILE) ):
os.remove(CREDFILE)
def testListView(self):
self.core_instance.MakeView(u'test_view1')
self.core_instance.MakeView(u'test_view2')
self.core_instance.MakeView(u'test_view3')
self.core_instance.MakeDnsServerSet(u'set1')
self.core_instance.MakeDnsServerSet(u'set2')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view1', 1, u'set1',
u'some option;')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view2', 2, u'set1',
u'some other option;')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view3', 3, u'set1',
u'still some other option;')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view3', 1, u'set2',
u'recursion no;')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view1', 2, u'set2',
u'recursion yes;')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view2', 3, u'set2',
u'recursion maybe;')
#Listing a specific view
command = os.popen('python %s view -v test_view1 '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'view_name\n'
'---------\n'
'test_view1\n'
'\n')
command.close()
#Listing all views (note the omission of the -v flag)
command = os.popen('python %s view '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'view_name\n'
'---------\n'
'test_view1\n'
'test_view2\n'
'test_view3\n'
'\n')
command.close()
def testListViewAclAssignment(self):
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.core_instance.MakeView(u'test_view')
self.core_instance.MakeDnsServerSet(u'set1')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view', 1, u'set1')
self.core_instance.MakeViewToACLAssignments(u'test_view', u'set1', u'acl1', 1)
command = os.popen('python %s acl -v test_view -a acl1 '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'view_name acl_name acl_range_allowed\n'
'------------------------------------\n'
'test_view acl1 True\n'
'\n')
command.close()
def testMakeViewAssignment(self):
self.core_instance.MakeView(u'test_view')
self.core_instance.MakeView(u'test_view2')
command = os.popen('python %s view_subset -v test_view -V test_view2 '
'-u %s -p %s --config-file %s -s %s' % (
EXEC, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'view view_subset\n'
'----------------\n\n')
command.close()
self.core_instance.MakeViewAssignment(u'test_view', u'test_view2')
command = os.popen('python %s view_subset -v test_view -V test_view2 '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
"view view_subset\n"
"---------------------\n"
"test_view test_view2\n\n")
command.close()
def testMakeDnsServerSetAssignment(self):
self.core_instance.MakeView(u'test_view')
self.core_instance.MakeDnsServerSet(u'set1')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view', 1, u'set1',
view_options=u'recursion no;\n')
command = os.popen('python %s dns_server_set -v test_view -e set1 '
'-c %s -u %s -p %s '
'--config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'dns_server_set view_name view_order view_options\n'
'------------------------------------------------\n'
'set1 test_view 1 recursion no;\n'
'\n')
command.close()
def testErrors(self):
command = os.popen('python %s dns_server_set -v test_view -V other_view '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'CLIENT ERROR: The -V/--view-dep flag cannot be used with the '
'dns_server_set command.\n')
command.close()
self.core_instance.MakeView(u'test_view1')
self.core_instance.MakeDnsServerSet(u'set1')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view1', 1, u'set1')
command = os.popen('python %s view -v test_view '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'ERROR: View test_view does not exist\n')
command.close()
if( __name__ == '__main__' ):
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.