gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from ceilometer.openstack.common import fileutils
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import local
from ceilometer.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("CEILOMETER_LOCK_PATH"),
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["CEILOMETER_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
# -*- coding: utf-8 -*-
from collections import namedtuple
from . import codes
from . import utils
from . import exceptions
try:
import numpy as np
except ImportError:
HAS_NUMPY = False
else:
HAS_NUMPY = True
ArchiveProperties = namedtuple("ArchiveProperties", "key start_time end_time")
Limits = namedtuple("Limits", "low high")
class ChannelData(object):
"""
Container for archive data for a single channel.
Attributes:
channel (str): The channel name.
values (List): A list of channel values.
times (List[datetime]): Timestamps corresponding to the retrieved values.
statuses (List[int]): Status values corresponding with the retrieved values.
severities (List[int]): Severity values corresponding with the retrieved
values.
units (str): The units of the values.
states (List[str]): The states a STRING or ENUM can have.
data_type (int): The data type of the channel.
elements (int): The number of elements per sample for waveform channels.
display_limits (Limits): Values advising how to display the values in a user
interface.
warn_limits (Limits): Low and high values for which the channel will
generate a warning.
alarm_limits (Limits): Low and high values for which the channel will
generate an alarm.
display_precision (int): The number of decimal places to show in user
interfaces.
archive_key (int): The archive the data was pulled from.
"""
def __init__(
self,
channel=None,
values=None,
times=None,
statuses=None,
severities=None,
units=None,
states=None,
data_type=None,
elements=None,
display_limits=None,
warn_limits=None,
alarm_limits=None,
display_precision=None,
archive_key=None,
interpolation=None,
):
super(ChannelData, self).__init__()
self.channel = channel
self.values = values
self.times = times
self.statuses = statuses
self.severities = severities
self.units = units
self.states = states
self.data_type = data_type
self.elements = elements
self.display_limits = display_limits
self.warn_limits = warn_limits
self.alarm_limits = alarm_limits
self.display_precision = display_precision
self.archive_key = archive_key
self.interpolation = interpolation
self._array = None
@property
def array(self):
"""Return the data in a numpy array structure."""
if not HAS_NUMPY:
raise exceptions.NumpyNotInstalled("Numpy not found")
# Only compute the array once
if self._array is None:
if self.data_type == codes.data_type.STRING:
value_dtype = np.str
elif self.data_type == codes.data_type.ENUM:
value_dtype = np.uint8
elif self.data_type == codes.data_type.INT:
value_dtype = np.int
else:
value_dtype = np.dtype(float)
dtypes = [
("time", np.dtype("datetime64[us]")),
("value", value_dtype, self.elements),
("status", np.uint8),
("severity", np.uint16),
]
data = zip(self.times, self.values, self.statuses, self.severities)
self._array = np.array(data, dtype=dtypes)
return self._array
def __repr__(self):
if self.data_type == codes.data_type.DOUBLE:
fmt = "{0:.6g}"
else:
fmt = "{0!r}"
s = "ChannelData(\n"
if self.elements == 1:
s += utils.pretty_list_repr(self.values, fmt, prefix=" values=")
else:
s += utils.pretty_waveform_repr(self.values, fmt, prefix=" values=")
s += ",\n"
for attr in ["times", "statuses", "severities", "states"]:
value = self.__getattribute__(attr)
if value is None:
continue
prefix = f" {attr}="
s += utils.pretty_list_repr(value, prefix=prefix)
s += ",\n"
for attr in [
"units",
"data_type",
"elements",
"display_limits",
"warn_limits",
"alarm_limits",
"display_precision",
"archive_key",
"interpolation",
]:
value = self.__getattribute__(attr)
if value is None:
continue
s += f" {attr}={value!r},\n"
s = s[:-2]
s += "\n)"
return s
def __str__(self):
times = ["time"] + [dt.strftime("%Y-%m-%d %H:%M:%S") for dt in self.times]
statuses = ["status"] + [codes.status.str_value(s) for s in self.statuses]
severities = ["severity"] + [
codes.severity.str_value(s) for s in self.severities
]
times_len = max(len(s) for s in times)
statuses_len = max(len(s) for s in statuses)
severities_len = max(len(s) for s in severities)
out = ""
value_format = "{0:.9g}"
if self.elements == 1:
values = ["value"] + [value_format.format(v) for v in self.values]
else:
len_for_values = 79 - times_len - statuses_len - severities_len - 6
values = ["value"]
max_value_len = utils.max_value_len_in_waveform(self.values, value_format)
for value in self.values:
formatted_value = utils.pretty_list_repr(
value,
value_format,
max_line_len=len_for_values,
min_value_len=max_value_len,
)
values += formatted_value.split("\n")
values_len = max(len(s) for s in values)
spec = (
"{0:>" + str(times_len) + "} "
"{1:>" + str(values_len) + "} "
"{2:>" + str(statuses_len) + "} "
"{3:>" + str(severities_len) + "}\n"
)
if self.elements == 1:
for fields in zip(times, values, statuses, severities):
out += spec.format(*fields)
else:
i = 0
for line in values:
if i == 0 or "[" in line:
out += spec.format(times[i], line, statuses[i], severities[i])
i += 1
else:
out += spec.format("", line.ljust(values_len), "", "")
return out.rstrip()
| |
import tempfile, shutil
import os
import re
import subprocess
import time
import datetime
import csv
import json, yaml
import string
from bson.objectid import ObjectId
from bson import json_util
from dateutil.parser import parse
from django.conf import settings
from hashlib import md5
from crits.core.class_mapper import class_from_value
from crits.core.exceptions import ZipFileError
from crits.core.mongo_tools import get_file
def get_file_fs(sample_md5):
"""
Read a file from the filesystem. The path to the file is:
/data/files/<md5[:2]>/<md5[2:4]>/<md5>
:param sample_md5: The MD5 of the file to read off of disk.
:type sample_md5: str
:returns: str
"""
try:
fin = open('/data/files/%s/%s/%s' % (sample_md5[:2],
sample_md5[2:4],
sample_md5),
'rb')
data = fin.read()
fin.close()
except Exception as e:
raise("error: %s" % e)
return data
def put_file_fs(data):
"""
Write a file to the filesystem. The path to write the file to is:
/data/files/<md5[:2]>/<md5[2:4]>/<md5>
:param data: The data of the file to write.
:type data: str
:returns: str (the md5 of the file written)
"""
a = md5()
a.update(data)
sample_md5 = a.hexdigest()
try:
fout = open('/data/files/%s/%s/%s' % (sample_md5[:2],
sample_md5[2:4],
sample_md5),
'wb')
fout.write(data)
fout.close()
except Exception as e:
raise("error: %s" % e)
return sample_md5
def create_zip(files, pw_protect=True):
"""
Create a zip file. Creates a temporary directory to write files to on disk
using :class:`tempfile`. Uses /usr/bin/zip as the zipping mechanism
currently. Will password protect the zip file as a default. The password for
the zip file defaults to "infected", but it can be changed in the config
under zip7_password.
:param files: The files to add to the zip file.
:type files: list of files which are in the format of a list or tuple of
(<filename>, <data>).
:param pw_protect: To password protect the zip file or not.
:type pw_protect: boolean
:returns: :class:`crits.core.exceptions.ZipFileError`, str
"""
dumpdir = ""
try:
# Zip can take data from stdin to compress, but
# you can't define the filenames within the archive,
# they show up as "-". Therefore, we need to write
# out the file, compress it and return the zip.
# Save the sample as a file in a temp directory
# NOTE: the following line was causing a "permission denied" exception.
# Removed dir arg.
from crits.config.config import CRITsConfig
crits_config = CRITsConfig.objects().first()
if crits_config:
zip7_password = crits_config.zip7_password or 'infected'
else:
zip7_password = settings.ZIP7_PASSWORD or 'infected'
dumpdir = tempfile.mkdtemp() #dir=temproot
#write out binary files
for f in files:
filename = f[0]
file_data = f[1]
# make sure our desired path doesn't already exist (some files may
# have the same name but different data)
path = dumpdir + "/" + filename.encode("utf-8")
i = 1
tmp = path
while os.path.exists(tmp):
tmp = path+"("+str(i)+")"
i += 1
with open(tmp, "wb") as fh:
fh.write(file_data)
# Build the command line for zip
# NOTE: forking subprocess instead of using Python's ZipFile library
# because ZipFile does not allow us to create password-protected zip
# archives, only read them.
# -j don't include original filepath
zipname = "zip.zip" #The name we give it doesn't really matter
args = ["/usr/bin/zip", "-r", "-j", dumpdir+"/"+zipname, dumpdir]
if pw_protect:
args += ["-P", zip7_password]
args += [dumpdir+"/"+zipname, dumpdir]
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Give the process 30 seconds to complete, otherwise kill it
waitSeconds = 30
while (proc.poll() is None and waitSeconds):
time.sleep(1)
waitSeconds -= 1
zipdata = ""
if proc.returncode: # zip spit out an error
errmsg = "Error while creating archive\n" + proc.stdout.read()
raise ZipFileError, errmsg
elif not waitSeconds: # Process timed out
proc.terminate()
raise ZipFileError, "Error:\nProcess failed to terminate"
else:
with open(dumpdir + "/" + zipname, "rb") as fh:
zipdata = fh.read()
if not len(zipdata):
raise ZipFileError, "Error:\nThe zip archive contains no data"
return zipdata
except ZipFileError:
raise
except Exception, ex:
errmsg = ""
for err in ex.args:
errmsg = errmsg + " " + unicode(err)
raise ZipFileError, errmsg
finally:
if os.path.isdir(dumpdir):
shutil.rmtree(dumpdir)
def format_file(data, file_format):
"""
Format data into the provided format. Acceptable formats are:
- base64
- zlib
- raw
- invert
:param data: The data to format.
:type data: str
:param file_format: The format to convert the data into.
:type file_format: str
:returns: tuple of (<formatted_data>, <file_extension>)
"""
if data == None:
return ("", "")
if file_format == "base64":
import base64
data = base64.b64encode(data)
ext = ".b64"
elif file_format == "zlib":
import zlib
data = zlib.compress(data)
ext = ".Z"
elif file_format == "raw":
ext = ""
elif file_format == "invert":
data = ''.join([chr(ord(c) ^ 0xff) for c in data])
ext = ".ff"
return (data, ext)
def convert_datetimes_to_string(obj):
"""
Iterates over all the keys of a document to convert all datetime objects
to strings.
Will also work with ordinary datetime objects or lists of datetimes and
lists of dictionaries. Any non-datetime values will be left as-is.
:param obj: The date object(s) to convert to a string.
:type obj: datetime.datetime, list, dict
:returns: obj
"""
if isinstance(obj, datetime.datetime):
return datetime.datetime.strftime(obj, settings.PY_DATETIME_FORMAT)
elif isinstance(obj, list) or isinstance(obj, dict):
for idx in (xrange(len(obj)) if isinstance(obj, list) else obj.keys()):
obj[idx] = convert_datetimes_to_string(obj[idx])
return obj
def convert_string_to_bool(value):
"""
Converts the string values "True" or "False" to their boolean
representation.
:param value: The string.
:type value: str.
:returns: True, False
"""
if(value != None) and ((value == True) or (value == "True") or (value == "true")):
return True
else:
return False
def format_object(obj_type, obj_id, data_format="yaml", cleanse=True,
obj_sources=[], remove_source=False, remove_rels=False,
remove_schema_version=False, remove_campaign=False,
remove_buckets=False, remove_releasability=False,
remove_unsupported=False):
"""
Formats a top-level object for utilization in certain conditions. Removes
CRITs-internal necessary data so users editing the document via the
interface don't alter or have the ability to overwrite things they should
not.
:param obj_type: The CRITs type of the top-level object to format.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param data_format: The format of the returned data.
:type data_format: str of "yaml" or "json"
:param cleanse: Remove "to", "actions", "releasability", and "bucket_list"
if this is an Email or Indicator.
:type cleanse: boolean
:param obj_sources: The sources to overwrite into the document or to set
the source list to an empty list if remove_source is
False.
:type obj_sources: list
:param remove_source: Remove the source key from the document.
:type remove_source: boolean
:param remove_rels: Remove the relationships key from the document.
:type remove_rels: boolean
:param remove_schema_version: Remove the schema_version key from the
document.
:type remove_schema_version: boolean
:param remove_campaign: Remove the campaign key from the document.
:type remove_campaign: boolean
:param remove_buckets: Remove the bucket_list key from the document.
:type remove_buckets: boolean
:param remove_releasability: Remove the releasability key from the document.
:type remove_releasability: boolean
:param remove_unsupported: Remove the unsupported_attrs key from the document.
:type remove_unsupported: boolean
:returns: str
"""
collection = settings.CRITS_TYPES[obj_type]
obj_class = class_from_value(obj_type, obj_id)
if not obj_class:
return ""
data = obj_class.to_dict()
if data is None:
return ""
# Emails use raw_header (singular) as the attribute but store it as
# raw_headers (plural) in the database. When viewing an email in YAML
# or JSON convert from plural to singular. This will allow a copy/paste
# of these views to be imported correctly.
if 'raw_headers' in data:
data['raw_header'] = data['raw_headers']
del data['raw_headers']
if cleanse and collection in [settings.COL_EMAIL, settings.COL_INDICATORS]:
if "to" in data:
del data["to"]
if "actions" in data:
del data["actions"]
if "releasability" in data:
del data["releasability"]
if "bucket_list" in data:
del data["bucket_list"]
if remove_source and 'source' in data:
del data["source"]
elif 'source' in data:
data['source'] = obj_sources
if remove_rels and 'relationships' in data:
del data["relationships"]
if remove_rels and 'objects' in data:
del data["objects"]
if remove_schema_version and 'schema_version' in data:
del data["schema_version"]
if remove_campaign and 'campaign' in data:
del data["campaign"]
del data["_id"]
if data.has_key("modified"):
del data["modified"]
if remove_buckets and 'bucket_list' in data:
del data['bucket_list']
if remove_releasability and 'releasability' in data:
del data['releasability']
if remove_unsupported and 'unsupported_attrs' in data:
del data['unsupported_attrs']
data = json.dumps(convert_datetimes_to_string(data),
default=json_util.default)
if data_format == "yaml":
data = yaml.dump(yaml.load(data), default_flow_style=False)
elif data_format == "json":
data = json.dumps(json.loads(data))
return data
def make_ascii_strings(md5=None, data=None):
"""
Find and return all printable ASCII strings in a string.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
strings_data = 'ASCII Strings\n'
strings_data += "-" * 30
strings_data += "\n"
ascii_regex = re.compile('([ -~]{4,})')
matches = ascii_regex.findall(data)
strings_data += '\n'.join([x for x in matches])
return strings_data + "\n\n\n\n"
def make_unicode_strings(md5=None, data=None):
"""
Find and return all printable Unicode strings in a string.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
strings_data = 'Unicode Strings\n'
strings_data += "-" * 30
strings_data += "\n"
unicode_regex = re.compile('(([%s]\x00){4,})' % string.printable)
matches = unicode_regex.findall(data)
strings_data += '\n'.join([x[0].replace('\x00', '') for x in matches])
return strings_data + "\n\n\n\n"
def make_stackstrings(md5=None, data=None):
"""
Find and return all stack strings in a string.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
x = 0
prev = 0
strings = ''
while x < len(data):
if (data[x] == '\xc6') and ((data[x+1] == '\x45') or (data[x+1] == '\x84')):
a = ord(data[x+3])
if (a <= 126 and a >= 32) or (a==9): strings += data[x+3]
prev = x
x += 4
elif (data[x] == '\xc6') and (data[x+1] == '\x44'):
a = ord(data[x+4])
if (a <= 126 and a >= 32) or (a==9): strings += data[x+4]
prev = x
x += 5
elif (data[x] == '\xc6') and ((data[x+1] == '\x05') or (data[x+1] == '\x85')):
a = ord(data[x+6])
if (a <= 126 and a >= 32) or (a==9): strings += data[x+6]
prev = x
x += 7
else:
if ((x - prev) ==12): strings += '\n'
x += 1
strings = strings.replace('\x00', '\r')
return strings
def make_hex(md5=None, data=None):
"""
Convert data into hex formatted output.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
length = 16
hex_data = ''
digits = 4 if isinstance(data, unicode) else 2
for i in xrange(0, len(data), length):
s = data[i:i+length]
hexa = ' '.join(["%0*X" % (digits, ord(x)) for x in s])
text = ' '.join([x if 0x20 <= ord(x) < 0x7F else '.' for x in s])
hex_data += "%04X %-*s %s\r\n" % (i, length*(digits + 1), hexa, text)
return hex_data
def xor_string(md5=None, data=None, key=0, null=0):
"""
XOR data.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:param key: The XOR key to use.
:type key: int
:param null: Whether or not to skip nulls.
:type null: int (0 or 1)
:returns: str
"""
if md5:
data = get_file(md5)
out = ''
for c in data:
if ord(c) == 0 and null == 1:
out += c
elif ord(c) == key and null == 1:
out += c
else:
out += chr(ord(c) ^ key)
return out
def xor_search(md5=None, data=None, string=None, skip_nulls=0):
"""
Search a string for potential XOR keys. Uses a small list of common
plaintext terms, XORs those terms using keys 0-255 and searches the data for
any match. If there is a match, that key is included in the results.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:param string: The custom string to XOR and search for.
:type string: str
:param skip_nulls: Whether or not to skip nulls.
:type skip_nulls: int (0 or 1)
:returns: list
"""
if md5:
data = get_file(md5)
if string is None or string == '':
plaintext_list = [
'This program',
'kernel32',
'KERNEL32',
'http',
'svchost',
'Microsoft',
'PE for WIN32',
'startxref',
'!This program cannot be run in DOS mode',
'\xD0\xCF\x11\xE0\xA1\xB1\x1a\xE1',
'D\x00o\x00c\x00u\x00m\x00e\x00n\x00t\x00 \x00S\x00u\x00m\x00m\x00a\x00r\x00y\x00 \x00I\x00n\x00f\x00o\x00r\x00m\x00a\x00t\x00i\x00o\x00n',
]
else:
plaintext_list = ["%s" % string]
results = []
for plaintext in plaintext_list:
for i in range(0, 255):
xord_string = xor_string(data=plaintext,
key=i,
null=skip_nulls)
if xord_string in data:
if i not in results:
results.append(i)
results.sort()
return results
def make_list(s):
"""
Make a list of out a string of data that needs to be parsed using
:class:`csv.reader`.
:param s: The string to convert
:type s: str
:returns: list
"""
l = []
l.append(s)
a = csv.reader(l, skipinitialspace=True)
b = None
for i in a:
b = i
return b
def remove_html_tags(data):
"""
Remove html tags from a string.
:param data: The string to parse.
:type data: str
:returns: str
"""
p = re.compile(r'<.*?>')
return p.sub('', data)
def datestring_to_isodate(datestring):
"""
Parse a string using :class:`dateutil` and return the results.
:param datestring: The date string to parse.
:returns: datetime.datetime
"""
return parse(datestring, fuzzy=True)
def clean_dict(dict_, keys_to_remove):
"""
Remove keys we don't want to display to the user.
Can also be used to remove keys from user input that we want to manage
ourselves. In the latter case, be sure the query is using $set and not
completely replacing the document, otherwise keys added elsewhere might
be lost.
:param dict_: The dictionary to iterate over.
:type dict_: dict
:param keys_to_remove: The list of keys we want to remove.
:type keys_to_remove: list
"""
for key in keys_to_remove:
if key in dict_:
del dict_[key]
def json_handler(obj):
"""
Handles converting datetimes and Mongo ObjectIds to string.
Usage: json.dumps(..., default=json_handler)
:param obj: The object that needs converting.
:type obj: datetime.datetime, ObjectId
:returns: str
"""
if isinstance(obj, datetime.datetime):
return datetime.datetime.strftime(obj, settings.PY_DATETIME_FORMAT)
elif isinstance(obj, ObjectId):
return str(obj)
def generate_qrcode(data, size):
"""
Generate a QR Code Image from a string.
Will attempt to import qrcode (which also requires Pillow) and io. If
this fails we will return None.
:param data: data to be converted into a QR Code
:type data: str
:param size: tuple of (width, height) in pixels to resize the QR Code
:type size: tuple
:returns: str in base64 format
"""
try:
import qrcode, io
except:
return None
a = io.BytesIO()
qr = qrcode.QRCode()
qr.add_data(data)
img = qr.make_image().resize(size)
img.save(a, 'PNG')
qr_img = a.getvalue().encode('base64').replace('\n', '')
a.close()
return qr_img
def validate_md5_checksum(md5_checksum):
"""
Validates that string is truly an MD5.
:param md5_checksum: The string to validate.
:type md5_checksum: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
retVal = {'success': True, 'message': ''}
if re.match("^[a-fA-F0-9]{32}$", md5_checksum) == None:
retVal['message'] += "The MD5 digest needs to be 32 hex characters."
retVal['success'] = False
return retVal
def validate_sha1_checksum(sha1_checksum):
"""
Validates that string is truly a SHA1.
:param sha1_checksum: str
:return: dict with keys "success" (boolean) and "message" (str)
"""
retVal = {'success': True, 'message': ''}
if re.match("^[a-fA-F0-9]{40}$", sha1_checksum) == None:
retVal['message'] += "The SHA1 digest needs to be 40 hex characters."
retVal['success'] = False
return retVal
def validate_sha256_checksum(sha256_checksum):
"""
Validates that string is truly a SHA256.
:param sha256_checksum: The string to validate.
:type sha256_checksum: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
retVal = {'success': True, 'message': ''}
if re.match("^[a-fA-F0-9]{64}$", sha256_checksum) == None:
retVal['message'] += "The SHA256 digest needs to be 64 hex characters."
retVal['success'] = False
return retVal
def detect_pcap(data):
"""
Detect if the data has the magic numbers for a PCAP.
:param data: The data to inspect.
:type data: str
:returns: bool
"""
magic = ''.join(x.encode('hex') for x in data[:4])
if magic in (
'a1b2c3d4', #identical
'd4c3b2a1', #swapped
'4d3cb2a1',
'a1b23c4d', #nanosecond resolution
'0a0d0d0a', #pcap-ng
):
return True
else:
return False
| |
#!/usr/bin/env python
#
# Copyright (C) 2014
# Brian Caswell <bmc@lungetech.com>
# Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
from random import choice, randint
import string
import struct
import os
# NOTE: this is super inefficient, don't care.
def random_bytes(min, max, mod=255):
return ''.join(chr(choice(range(mod))) for _ in range(randint(min,max)))
def is_byte_fragmented(byte):
return (ord(byte) & 0x80)
# -----------------
# state transitions
# -----------------
# CONNTERM
# |- OTP (on receipt of PKT_CONNTERM)
# |- else -> CONNTERM
# OTP
# |- MSG (on receipt of correct-length packet, any contents)
# |- else -> OTP
# MSG
# |- OTP (on reciept of PKT_CONNTERM)
# |- else -> MSG
class QuietSquare(Actions):
GLOBAL_DEBUG = False
GLOBAL_POV = False
# States
STATE_CONNTERM = 0
STATE_OTP = 1
STATE_MSG = 2
# Packet lengths
BUF_RX_SZ = 256
CONNTERM_SZ = BUF_RX_SZ
OTP_SZ = 16
MSG_SZ = BUF_RX_SZ
# Packet contents
PKT_INVALID_SZ = "\xFF" + "\xDD" * (BUF_RX_SZ-1)
PKT_CONNTERM = "\x00" * CONNTERM_SZ
PKT_CONNTERM_ACK = "\xFF" + "\xCC" * (CONNTERM_SZ-1)
PKT_CONNTERM_ERR = "\xFF" + "\xFF" * (CONNTERM_SZ-1)
PKT_OTP_ACK = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
PKT_EXIT = "\xAA" * 42
PKT_EXIT_ACK = "\xFF" + "\xAA" * 41
# Maximum number of recursions before vuln is triggered.
MAX_DEPTH = 125 # There's an off-by-one here, but it doesn't matter.
def dispatch(self):
"""
Dummy node. Acts as central node for more interesting connterm, otp
and msg nodes.
"""
pass
def refresh_otp(self):
"""
A seldom-visited node to refreshes what OTP is.
"""
DEBUG = True and self.GLOBAL_DEBUG
if DEBUG:
print "REFRESH_OTP node"
self.state["PKT_OTP"] = random_bytes(16, 16)
def connterm(self):
"""
This node wants to send a CONNTERM pkt regardless of current CB state.
Our expectation on this one is simple: all states should behave the
same way: advance / revert to STATE_OTP.
"""
DEBUG = True and self.GLOBAL_DEBUG
if DEBUG:
print "CONNTERM node"
self.write(self.PKT_CONNTERM)
# We should only expect the CONNTERM ACK if the CB was in CONNTERM state.
# Initially unintentional, makes protocol slightly more conditional.
if self.STATE_CONNTERM == self.state["state"]:
self.read(length=len(self.PKT_CONNTERM_ACK), expect=self.PKT_CONNTERM_ACK)
# If we're in OTP state already, this would be an invalid size.
if self.STATE_OTP == self.state["state"]:
self.read(length=len(self.PKT_INVALID_SZ), expect=self.PKT_INVALID_SZ)
self.state["state"] = self.STATE_OTP
def otp(self):
"""
The node wants to populate the OTP regardless of current CB state.
Expected response should differ based on state.
"""
DEBUG = True and self.GLOBAL_DEBUG
if DEBUG:
print "OTP node"
if self.STATE_CONNTERM == self.state["state"]:
# If the CB is in CONNTERM state, it expects a packet with a length
# of CONNTERM_SZ. Any other size (including OTP_SZ) will result in
# PKT_INVALID_SZ.
if DEBUG:
print "send OTP -> CONNTERM state"
self.write(self.state["PKT_OTP"])
self.read(length=len(self.PKT_INVALID_SZ), expect=self.PKT_INVALID_SZ)
elif self.STATE_OTP == self.state["state"]:
if DEBUG:
print "send OTP -> OTP state"
self.write(self.state["PKT_OTP"])
self.read(length=self.OTP_SZ, expect=self.PKT_OTP_ACK)
self.state["otp_on_cb"] = self.state["PKT_OTP"]
self.state["state"] = self.STATE_MSG # advance to MSG state
elif self.STATE_MSG == self.state["state"]:
# If the CB is in MSG state, it expects a packet with a length
# of MSG_SZ. Any other size (including OTP_SZ) will result in
# PKT_INVALID_SZ.
if DEBUG:
print "send OTP -> MSG state"
self.write(self.state["PKT_OTP"])
self.read(length=len(self.PKT_INVALID_SZ), expect=self.PKT_INVALID_SZ)
else:
print "[E] THIS SHOULD NEVER HAPPEN"
def msg(self):
"""
The node wants to send a MSG regardless of current CB state.
Expected response should differ based on state.
"""
DEBUG = True and self.GLOBAL_DEBUG
if DEBUG:
print "MSG node"
while True:
# Start with a bunch of random bytes, expect CB to drop bytes that
# have their most signficant bit set and then XOR the remaining
# with the OTP (looped over).
# This is a simplification of the FRAG protocol, see service.c
# for a discussion on implications.
PKT_MSG = random_bytes(256, 256)
# Avoid collision with CONNTERM
if "\x00" == PKT_MSG[0]:
PKT_MSG = "\x42" + PKT_MSG[1:]
# We need to make sure don't accidentally trigger the vuln. The
# vuln is triggered when a sufficient depth is reached. We take a
# simplified avoidance approach here and just limit the total
# number of recursions. In reality, stack may be unwound in between
# recursions. This check will catch all vuln-triggering messages -
# and then some.
depth = 0
for byte in PKT_MSG:
if is_byte_fragmented(byte):
depth = depth + 1
# Re-try based on whether we want to trigger the vuln.
if not self.GLOBAL_POV and self.MAX_DEPTH < depth:
if DEBUG:
print "MAX_DEPTH exceeded (depth = %d) and we're not making a POV; looping..." % depth
continue
if self.GLOBAL_POV and self.MAX_DEPTH >= depth:
if DEBUG:
print "MAX_DEPTH not exceeed (depth = %d) and we're making a POV; looping..." % depth
continue
# Cases:
# 1) We are not making a POV and the depth is less than MAX_DEPTH.
# 2) We are making a POV and the depth is greater than MAX_DEPTH.
break
# If we've broken the above loop, we have a PKT_MSG that will not
# trigger the vuln. Now we must calculate the CB's expected response.
# If the MSG begins with "\xFF", this is easy. \xFF signifies error
# MSG, which the CB will reflect verbatim to us.
if "\xFF" == PKT_MSG[0]:
PKT_MSG_RESPONSE = PKT_MSG
# Otherwise, we have to emulate the CB's actions.
# First, we drop all bytes that have the fragment bit set.
else:
PKT_MSG_RESPONSE = ""
for byte in PKT_MSG:
if not is_byte_fragmented(byte):
PKT_MSG_RESPONSE += byte
# Then, we XOR with OTP (on cb), looped over.
# Yes, everything about this snippet is bad.
for i in range(len(PKT_MSG_RESPONSE)):
PKT_MSG_RESPONSE = \
PKT_MSG_RESPONSE[:i] + \
chr(ord(PKT_MSG_RESPONSE[i]) ^ ord(self.state["otp_on_cb"][i % self.OTP_SZ])) + \
PKT_MSG_RESPONSE[i+1:]
if self.STATE_CONNTERM == self.state["state"]:
if DEBUG:
print "send MSG -> CONNTERM state"
# CONNTERM and MSG packet lengths are the same, but a MSG PKT will
# never begin with "\x00", so the two cannot be confused.
# As such, we should expect PKT_CONNTERM_ERR.
self.write(PKT_MSG)
self.read(length=len(self.PKT_CONNTERM_ERR), expect=self.PKT_CONNTERM_ERR)
elif self.STATE_OTP == self.state["state"]:
if DEBUG:
print "send MSG -> OTP state"
# MSG and OTP packets are different lengths, so we should expect
# PKT_INVALID_SZ.
self.write(PKT_MSG)
self.read(length=len(self.PKT_INVALID_SZ), expect=self.PKT_INVALID_SZ)
elif self.STATE_MSG == self.state["state"]:
if DEBUG:
print "send MSG -> MSG state"
# The most interesting case. We send a MSG and the CB is in the MSG
# state. Vulns be triggered thar.
self.write(PKT_MSG)
self.read(length=len(PKT_MSG_RESPONSE), expect=PKT_MSG_RESPONSE)
else:
if DEBUG:
print "[E] THIS SHOULD NEVER HAPPEN"
def finish(self):
"""
Invoke exit functionality, terminating test.
Must always be last node executed.
"""
DEBUG = True and self.GLOBAL_DEBUG
if DEBUG:
print "exit node"
self.write(self.PKT_EXIT)
self.read(length=len(self.PKT_EXIT_ACK), expect=self.PKT_EXIT_ACK)
return -1
def start(self):
"""
Initialize state.
"""
self.state["PKT_OTP"] = random_bytes(16, 16)
self.state["otp_on_cb"] = "\x00" * self.OTP_SZ
self.state["state"] = self.STATE_CONNTERM
| |
'''Convenience functions for use in package hooks.'''
# Copyright (C) 2008 - 2011 Canonical Ltd.
# Authors:
# Matt Zimmerman <mdz@canonical.com>
# Brian Murray <brian@ubuntu.com>
# Martin Pitt <martin.pitt@ubuntu.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version. See http://www.gnu.org/copyleft/gpl.html for
# the full text of the license.
import subprocess
import hashlib
import os
import sys
import time
import calendar
import datetime
import glob
import re
import stat
import base64
import tempfile
import shutil
import locale
import xml.dom, xml.dom.minidom
from gi.repository import Gio, GLib
#from apport.packaging_impl import impl as packaging
import apport
try:
_path_key_trans = ''.maketrans('#/-_+ ', '....._')
except AttributeError:
# Python 2 variant
import string
_path_key_trans = string.maketrans('#/-_+ ', '....._')
def path_to_key(path):
'''Generate a valid report key name from a file path.
This will replace invalid punctuation symbols with valid ones.
'''
if sys.version[0] >= '3':
if type(path) == type(b''):
path = path.decode('UTF-8')
else:
if type(path) != type(b''):
path = path.encode('UTF-8')
return path.translate(_path_key_trans)
def attach_file_if_exists(report, path, key=None, overwrite=True):
'''Attach file contents if file exists.
If key is not specified, the key name will be derived from the file
name with path_to_key().
If overwrite is True, an existing key will be updated. If it is False, a
new key with '_' appended will be added instead.
'''
if not key:
key = path_to_key(path)
if os.path.exists(path):
attach_file(report, path, key, overwrite)
def read_file(path):
'''Return the contents of the specified path.
Upon error, this will deliver a text representation of the error,
instead of failing.
'''
try:
with open(path, 'rb') as f:
return f.read().strip()
except Exception as e:
return 'Error: ' + str(e)
def attach_file(report, path, key=None, overwrite=True):
'''Attach a file to the report.
If key is not specified, the key name will be derived from the file
name with path_to_key().
If overwrite is True, an existing key will be updated. If it is False, a
new key with '_' appended will be added instead.
'''
if not key:
key = path_to_key(path)
# Do not clobber existing keys
if not overwrite:
while key in report:
key += '_'
report[key] = read_file(path)
def attach_conffiles(report, package, conffiles=None, ui=None):
'''Attach information about any modified or deleted conffiles.
If conffiles is given, only this subset will be attached. If ui is given,
ask whether the contents of the file may be added to the report; if this is
denied, or there is no UI, just mark it as "modified" in the report.
'''
modified = packaging.get_modified_conffiles(package)
for path, contents in modified.items():
if conffiles and path not in conffiles:
continue
key = 'modified.conffile.' + path_to_key(path)
if contents == '[deleted]':
report[key] = contents
continue
if ui:
response = ui.yesno('It seems you have modified the contents of "%s". Would you like to add the contents of it to your bug report?' % path)
if response:
report[key] = contents
else:
report[key] = '[modified]'
else:
report[key] = '[modified]'
mtime = datetime.datetime.fromtimestamp(os.stat(path).st_mtime)
report['mtime.conffile.' + path_to_key(path)] = mtime.isoformat()
def attach_upstart_overrides(report, package):
'''Attach information about any Upstart override files'''
try:
files = apport.packaging.get_files(package)
except ValueError:
return
for file in files:
if os.path.exists(file) and file.startswith('/etc/init/'):
override = file.replace('.conf', '.override')
key = 'upstart.' + override.replace('/etc/init/', '')
attach_file_if_exists(report, override, key)
def attach_dmesg(report):
'''Attach information from the kernel ring buffer (dmesg).
This will not overwrite already existing information.
'''
try:
if not report.get('BootDmesg', '').strip():
with open('/var/log/dmesg') as f:
report['BootDmesg'] = f.read()
except IOError:
pass
if not report.get('CurrentDmesg', '').strip():
dmesg = command_output(['sh', '-c', 'dmesg | comm -13 --nocheck-order /var/log/dmesg -'])
# if an initial message was truncated by the ring buffer, skip over it
first_newline = dmesg.find(b'\n[')
if first_newline != -1:
dmesg = dmesg[first_newline+1:]
report['CurrentDmesg'] = dmesg
def attach_dmi(report):
dmi_dir = '/sys/class/dmi/id'
if os.path.isdir(dmi_dir):
for f in os.listdir(dmi_dir):
p = '%s/%s' % (dmi_dir, f)
st = os.stat(p)
# ignore the root-only ones, since they have serial numbers
if not stat.S_ISREG(st.st_mode) or (st.st_mode & 4 == 0):
continue
if f in ('subsystem', 'uevent'):
continue
try:
with open(p) as fd:
value = fd.read().strip()
except (OSError, IOError):
continue
if value:
report['dmi.' + f.replace('_', '.')] = value
def attach_hardware(report):
'''Attach a standard set of hardware-related data to the report, including:
- kernel dmesg (boot and current)
- /proc/interrupts
- /proc/cpuinfo
- /proc/cmdline
- /proc/modules
- lspci -vvnn
- lsusb
- devices from udev
- DMI information from /sys
- prtconf (sparc)
- pccardctl status/ident
'''
attach_dmesg(report)
attach_file(report, '/proc/interrupts', 'ProcInterrupts')
attach_file(report, '/proc/cpuinfo', 'ProcCpuinfo')
attach_file(report, '/proc/cmdline', 'ProcKernelCmdLine')
attach_file(report, '/proc/modules', 'ProcModules')
attach_file(report, '/var/log/udev', 'UdevLog')
if os.path.exists('/sys/bus/pci'):
report['Lspci'] = command_output(['lspci','-vvnn'])
report['Lsusb'] = command_output(['lsusb'])
report['UdevDb'] = command_output(['udevadm', 'info', '--export-db'])
# anonymize partition labels
l = report['UdevLog'].decode('UTF-8', errors='replace')
l = re.sub('ID_FS_LABEL=(.*)', 'ID_FS_LABEL=<hidden>', l)
l = re.sub('ID_FS_LABEL_ENC=(.*)', 'ID_FS_LABEL_ENC=<hidden>', l)
l = re.sub('by-label/(.*)', 'by-label/<hidden>', l)
l = re.sub('ID_FS_LABEL=(.*)', 'ID_FS_LABEL=<hidden>', l)
l = re.sub('ID_FS_LABEL_ENC=(.*)', 'ID_FS_LABEL_ENC=<hidden>', l)
l = re.sub('by-label/(.*)', 'by-label/<hidden>', l)
report['UdevLog'] = l.encode('UTF-8')
attach_dmi(report)
# Use the hardware information to create a machine type.
if 'dmi.sys.vendor' in report and 'dmi.product.name' in report:
report['MachineType'] = '%s %s' % (report['dmi.sys.vendor'],
report['dmi.product.name'])
if command_available('prtconf'):
report['Prtconf'] = command_output(['prtconf'])
if command_available('pccardctl'):
out = command_output(['pccardctl', 'status']).strip()
if out:
report['PccardctlStatus'] = out
out = command_output(['pccardctl', 'ident']).strip()
if out:
report['PccardctlIdent'] = out
def attach_alsa(report):
'''Attach ALSA subsystem information to the report.
(loosely based on http://www.alsa-project.org/alsa-info.sh)
'''
attach_file_if_exists(report, os.path.expanduser('~/.asoundrc'),
'UserAsoundrc')
attach_file_if_exists(report, os.path.expanduser('~/.asoundrc.asoundconf'),
'UserAsoundrcAsoundconf')
attach_file_if_exists(report, '/etc/asound.conf')
attach_file_if_exists(report, '/proc/asound/version', 'AlsaVersion')
attach_file(report, '/proc/cpuinfo', 'ProcCpuinfo')
report['AlsaDevices'] = command_output(['ls','-l','/dev/snd/'])
report['AplayDevices'] = command_output(['aplay','-l'])
report['ArecordDevices'] = command_output(['arecord','-l'])
report['PciMultimedia'] = pci_devices(PCI_MULTIMEDIA)
cards = []
if os.path.exists('/proc/asound/cards'):
with open('/proc/asound/cards') as fd:
for line in fd:
if ']:' in line:
fields = line.lstrip().split()
cards.append(int(fields[0]))
for card in cards:
key = 'Card%d.Amixer.info' % card
report[key] = command_output(['amixer', '-c', str(card), 'info'])
key = 'Card%d.Amixer.values' % card
report[key] = command_output(['amixer', '-c', str(card)])
for codecpath in glob.glob('/proc/asound/card%d/codec*' % card):
if os.path.isfile(codecpath):
codec = os.path.basename(codecpath)
key = 'Card%d.Codecs.%s' % (card, path_to_key(codec))
attach_file(report, codecpath, key=key)
elif os.path.isdir(codecpath):
codec = os.path.basename(codecpath)
for name in os.listdir(codecpath):
path = os.path.join(codecpath, name)
key = 'Card%d.Codecs.%s.%s' % (card, path_to_key(codec), path_to_key(name))
attach_file(report, path, key)
report['AudioDevicesInUse'] = command_output(
['fuser','-v'] + glob.glob('/dev/dsp*')
+ glob.glob('/dev/snd/*')
+ glob.glob('/dev/seq*') )
if os.path.exists('/usr/bin/pacmd'):
report['PulseSinks'] = command_output(['pacmd', 'list-sinks'])
report['PulseSources'] = command_output(['pacmd', 'list-sources'])
attach_dmi(report)
attach_dmesg(report)
# This seems redundant with the amixer info, do we need it?
#report['AlsactlStore'] = command-output(['alsactl', '-f', '-', 'store'])
def command_available(command):
'''Is given command on the executable search path?'''
if 'PATH' not in os.environ:
return False
path = os.environ['PATH']
for element in path.split(os.pathsep):
if not element:
continue
filename = os.path.join(element, command)
if os.path.isfile(filename) and os.access(filename, os.X_OK):
return True
return False
def command_output(command, input = None, stderr = subprocess.STDOUT):
'''Try to execute given command (array) and return its stdout.
In case of failure, a textual error gets returned. This function forces
LC_MESSAGES to C, to avoid translated output in bug reports.
'''
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
sp = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=stderr, close_fds=True, env=env)
except OSError as e:
return 'Error: ' + str(e)
out = sp.communicate(input)[0]
if sp.returncode == 0:
return out.strip()
else:
return 'Error: command %s failed with exit code %i: %s' % (
str(command), sp.returncode, out)
def _root_command_prefix():
if os.getuid() == 0:
prefix = []
elif os.getenv('DISPLAY') and \
subprocess.call(['which', 'kdesudo'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0 and \
subprocess.call(['pgrep', '-x', '-u', str(os.getuid()), 'ksmserver'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0:
prefix = ['kdesudo', '--desktop', '/usr/share/applications/apport-kde-mime.desktop', '--']
elif os.getenv('DISPLAY') and \
subprocess.call(['which', 'gksu'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0 and \
subprocess.call(['pgrep', '-x', '-u', str(os.getuid()), 'gnome-panel|gconfd-2'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0:
prefix = ['gksu', '-D', 'Apport', '--']
else:
prefix = ['sudo']
return prefix
def root_command_output(command, input = None, stderr = subprocess.STDOUT):
'''Try to execute given command (array) as root and return its stdout.
This passes the command through gksu, kdesudo, or sudo, depending on the
running desktop environment.
In case of failure, a textual error gets returned.
'''
assert type(command) == type([]), 'command must be a list'
return command_output(_root_command_prefix() + command, input, stderr)
def attach_root_command_outputs(report, command_map):
'''Execute multiple commands as root and put their outputs into report.
command_map is a keyname -> 'shell command' dictionary with the commands to
run. They are all run through /bin/sh, so you need to take care of shell
escaping yourself. To include stderr output of a command, end it with
"2>&1".
Just like root_command_output() this will use gksu, kdesudo, or sudo for
gaining root privileges, depending on the running desktop environment.
This is preferrable to using root_command_output() multiple times, as that
will ask for the password every time.
'''
workdir = tempfile.mkdtemp()
try:
# create a shell script with all the commands
script_path = os.path.join(workdir, ':script:')
script = open(script_path, 'w')
for keyname, command in command_map.items():
assert hasattr(command, 'strip'), 'command must be a string (shell command)'
# use "| cat" here, so that we can end commands with 2>&1
# (otherwise it would have the wrong redirection order)
script.write('%s | cat > %s\n' % (command, os.path.join(workdir, keyname)))
script.close()
# run script
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
env['LANGUAGE'] = ''
sp = subprocess.Popen(_root_command_prefix() + ['/bin/sh', script_path],
close_fds=True, env=env)
sp.wait()
# now read back the individual outputs
for keyname in command_map:
f = open(os.path.join(workdir, keyname))
buf = f.read().strip()
if buf:
report[keyname] = buf
f.close()
finally:
shutil.rmtree(workdir)
def recent_syslog(pattern):
'''Extract recent messages from syslog which match a regex.
pattern should be a "re" object.
'''
return recent_logfile('/var/log/syslog', pattern)
def recent_logfile(logfile, pattern):
'''Extract recent messages from a logfile which match a regex.
pattern should be a "re" object.
'''
lines = ''
try:
with open(logfile) as f:
for line in f:
if pattern.search(line):
lines += line
except IOError:
return ''
return lines
PCI_MASS_STORAGE = 0x01
PCI_NETWORK = 0x02
PCI_DISPLAY = 0x03
PCI_MULTIMEDIA = 0x04
PCI_MEMORY = 0x05
PCI_BRIDGE = 0x06
PCI_SIMPLE_COMMUNICATIONS = 0x07
PCI_BASE_SYSTEM_PERIPHERALS = 0x08
PCI_INPUT_DEVICES = 0x09
PCI_DOCKING_STATIONS = 0x0a
PCI_PROCESSORS = 0x0b
PCI_SERIAL_BUS = 0x0c
def pci_devices(*pci_classes):
'''Return a text dump of PCI devices attached to the system.'''
if not pci_classes:
return command_output(['lspci', '-vvnn'])
result = ''
output = command_output(['lspci','-vvmmnn'])
for paragraph in output.split(b'\n\n'):
pci_class = None
pci_subclass = None
slot = None
for line in paragraph.split(b'\n'):
key, value = line.split(b':',1)
value = value.strip()
key = key.strip()
if key == 'Class':
n = int(value[-5:-1],16)
pci_class = (n & 0xff00) >> 8
pci_subclass = (n & 0x00ff)
elif key == 'Slot':
slot = value
if pci_class and slot and pci_class in pci_classes:
if result:
result += '\n\n'
result += command_output(['lspci', '-vvnns', slot]).strip()
return result
def attach_mac_events(report):
'''Attach MAC information and events to the report.'''
mac_regex = 'audit\(|apparmor|selinux|security'
mac_re = re.compile(mac_regex, re.IGNORECASE)
aa_denied_regex = 'apparmor="DENIED"'
aa_denied_re = re.compile(aa_denied_regex, re.IGNORECASE)
if os.path.exists('/var/log/kern.log'):
report['KernLog'] = recent_logfile('/var/log/kern.log', mac_re)
elif os.path.exists('/var/log/messages'):
report['KernLog'] = recent_logfile('/var/log/messages', mac_re)
if os.path.exists('/var/run/auditd.pid'):
attach_root_command_outputs(report, {'AuditLog': 'egrep "' + mac_regex + '" /var/log/audit/audit.log'})
attach_file(report, '/proc/version_signature', 'ProcVersionSignature')
attach_file(report, '/proc/cmdline', 'ProcCmdline')
if re.search(aa_denied_re, report.get('KernLog', '')) or re.search(aa_denied_re, report.get('AuditLog', '')):
tags = report.get('Tags', '')
if tags:
tags += ' '
report['Tags'] = tags + 'apparmor'
def shared_libraries(path):
'''Returns a list of strings containing the sonames of shared libraries
with which the specified binary is linked.'''
libs = set()
for line in command_output(['ldd', path]).split('\n'):
try:
lib, rest = line.split('=>', 1)
except ValueError:
continue
lib = lib.strip()
libs.add(lib)
return libs
def links_with_shared_library(path, lib):
'''Returns True if the binary at path links with the library named lib.
path should be a fully qualified path (e.g. report['ExecutablePath'])
lib may be of the form 'lib<name>' or 'lib<name>.so.<version>'
'''
libs = shared_libraries(path)
if lib in libs: return True
for linked_lib in libs:
if linked_lib.startswith(lib + '.so.'): return True
return False
def __drm_con_info(con):
info = ''
for f in os.listdir(con):
path = os.path.join(con, f)
if f == 'uevent' or not os.path.isfile(path):
continue
val = open(path).read().strip()
# format some well-known attributes specially
if f == 'modes':
val = val.replace('\n', ' ')
if f == 'edid':
val = base64.b64encode(val)
f += '-base64'
info += '%s: %s\n' % (f, val)
return info
def attach_drm_info(report):
'''Add information about DRM hardware.
Collect information from /sys/class/drm/.
'''
drm_dir = '/sys/class/drm'
if not os.path.isdir(drm_dir):
return
for f in os.listdir(drm_dir):
con = os.path.join(drm_dir, f)
if os.path.exists(os.path.join(con, 'enabled')):
# DRM can set an arbitrary string for its connector paths.
report['DRM.' + path_to_key(f)] = __drm_con_info(con)
# end
| |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Basic wrapping of UI Automation elements"""
from __future__ import unicode_literals
from __future__ import print_function
import six
import time
import warnings
import comtypes
import threading
from .. import backend
from .. import WindowNotFoundError # noqa #E402
from ..timings import Timings
from .win_base_wrapper import WinBaseWrapper
from .hwndwrapper import HwndWrapper
from ..base_wrapper import BaseMeta
from ..windows.uia_defines import IUIA
from ..windows import uia_defines as uia_defs
from ..windows.uia_element_info import UIAElementInfo, elements_from_uia_array
# region PATTERNS
AutomationElement = IUIA().ui_automation_client.IUIAutomationElement
DockPattern = IUIA().ui_automation_client.IUIAutomationDockPattern
ExpandCollapsePattern = IUIA().ui_automation_client.IUIAutomationExpandCollapsePattern
GridItemPattern = IUIA().ui_automation_client.IUIAutomationGridItemPattern
GridPattern = IUIA().ui_automation_client.IUIAutomationGridPattern
InvokePattern = IUIA().ui_automation_client.IUIAutomationInvokePattern
ItemContainerPattern = IUIA().ui_automation_client.IUIAutomationItemContainerPattern
LegacyIAccessiblePattern = IUIA().ui_automation_client.IUIAutomationLegacyIAccessiblePattern
MultipleViewPattern = IUIA().ui_automation_client.IUIAutomationMultipleViewPattern
RangeValuePattern = IUIA().ui_automation_client.IUIAutomationRangeValuePattern
ScrollItemPattern = IUIA().ui_automation_client.IUIAutomationScrollItemPattern
ScrollPattern = IUIA().ui_automation_client.IUIAutomationScrollPattern
SelectionItemPattern = IUIA().ui_automation_client.IUIAutomationSelectionItemPattern
SelectionPattern = IUIA().ui_automation_client.IUIAutomationSelectionPattern
SynchronizedInputPattern = IUIA().ui_automation_client.IUIAutomationSynchronizedInputPattern
TableItemPattern = IUIA().ui_automation_client.IUIAutomationTableItemPattern
TablePattern = IUIA().ui_automation_client.IUIAutomationTablePattern
TextPattern = IUIA().ui_automation_client.IUIAutomationTextPattern
TogglePattern = IUIA().ui_automation_client.IUIAutomationTogglePattern
TransformPattern = IUIA().ui_automation_client.IUIAutomationTransformPattern
ValuePattern = IUIA().ui_automation_client.IUIAutomationValuePattern
VirtualizedItemPattern = IUIA().ui_automation_client.IUIAutomationVirtualizedItemPattern
WindowPattern = IUIA().ui_automation_client.IUIAutomationWindowPattern
# endregion
# =========================================================================
_friendly_classes = {
'Custom': None,
'DataGrid': 'ListView',
'DataItem': 'DataItem',
'Document': None, # TODO: this is RichTextBox
'Group': 'GroupBox',
'Header': None,
'HeaderItem': None,
'Hyperlink': None,
'Image': None,
'List': 'ListBox',
'ListItem': 'ListItem',
'MenuBar': 'Menu',
'Menu': 'Menu',
'MenuItem': 'MenuItem',
'Pane': None,
'ProgressBar': 'Progress',
'ScrollBar': None,
'Separator': None,
'Slider': None,
'Spinner': 'UpDown',
'SplitButton': None,
'Tab': 'TabControl',
'Table': None,
'Text': 'Static',
'Thumb': None,
'TitleBar': None,
'ToolBar': 'Toolbar',
'ToolTip': 'ToolTips',
'Tree': 'TreeView',
'TreeItem': 'TreeItem',
'Window': 'Dialog',
}
# =========================================================================
class LazyProperty(object):
"""
A lazy evaluation of an object attribute.
The property should represent immutable data, as it replaces itself.
Provided by: http://stackoverflow.com/a/6849299/1260742
"""
def __init__(self, fget):
"""Init the property name and method to calculate the property"""
self.fget = fget
self.func_name = fget.__name__
def __get__(self, obj, cls):
"""Replace the property itself on a first access"""
if obj is None:
return None
value = self.fget(obj)
setattr(obj, self.func_name, value)
return value
lazy_property = LazyProperty
# =========================================================================
class UiaMeta(BaseMeta):
"""Metaclass for UiaWrapper objects"""
control_type_to_cls = {}
def __init__(cls, name, bases, attrs):
"""Register the control types"""
BaseMeta.__init__(cls, name, bases, attrs)
for t in cls._control_types:
UiaMeta.control_type_to_cls[t] = cls
@staticmethod
def find_wrapper(element):
"""Find the correct wrapper for this UIA element"""
# Check for a more specific wrapper in the registry
try:
wrapper_match = UiaMeta.control_type_to_cls[element.control_type]
except KeyError:
# Set a general wrapper by default
wrapper_match = UIAWrapper
return wrapper_match
# =========================================================================
@six.add_metaclass(UiaMeta)
class UIAWrapper(WinBaseWrapper):
"""
Default wrapper for User Interface Automation (UIA) controls.
All other UIA wrappers are derived from this.
This class wraps a lot of functionality of underlying UIA features
for working with windows.
Most of the methods apply to every single element type. For example
you can click() on any element.
"""
_control_types = []
# ------------------------------------------------------------
def __new__(cls, element_info):
"""Construct the control wrapper"""
return super(UIAWrapper, cls)._create_wrapper(cls, element_info, UIAWrapper)
# -----------------------------------------------------------
def __init__(self, element_info):
"""
Initialize the control
* **element_info** is either a valid UIAElementInfo or it can be an
instance or subclass of UIAWrapper.
If the handle is not valid then an InvalidWindowHandle error
is raised.
"""
WinBaseWrapper.__init__(self, element_info, backend.registry.backends['uia'])
# ------------------------------------------------------------
@lazy_property
def iface_expand_collapse(self):
"""Get the element's ExpandCollapse interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "ExpandCollapse")
# ------------------------------------------------------------
@lazy_property
def iface_selection(self):
"""Get the element's Selection interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Selection")
# ------------------------------------------------------------
@lazy_property
def iface_selection_item(self):
"""Get the element's SelectionItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "SelectionItem")
# ------------------------------------------------------------
@lazy_property
def iface_invoke(self):
"""Get the element's Invoke interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Invoke")
# ------------------------------------------------------------
@lazy_property
def iface_toggle(self):
"""Get the element's Toggle interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Toggle")
# ------------------------------------------------------------
@lazy_property
def iface_text(self):
"""Get the element's Text interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Text")
# ------------------------------------------------------------
@lazy_property
def iface_value(self):
"""Get the element's Value interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Value")
# ------------------------------------------------------------
@lazy_property
def iface_range_value(self):
"""Get the element's RangeValue interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "RangeValue")
# ------------------------------------------------------------
@lazy_property
def iface_grid(self):
"""Get the element's Grid interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Grid")
# ------------------------------------------------------------
@lazy_property
def iface_grid_item(self):
"""Get the element's GridItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "GridItem")
# ------------------------------------------------------------
@lazy_property
def iface_table(self):
"""Get the element's Table interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Table")
# ------------------------------------------------------------
@lazy_property
def iface_table_item(self):
"""Get the element's TableItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "TableItem")
# ------------------------------------------------------------
@lazy_property
def iface_scroll_item(self):
"""Get the element's ScrollItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "ScrollItem")
# ------------------------------------------------------------
@lazy_property
def iface_scroll(self):
"""Get the element's Scroll interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Scroll")
# ------------------------------------------------------------
@lazy_property
def iface_transform(self):
"""Get the element's Transform interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Transform")
# ------------------------------------------------------------
@lazy_property
def iface_transformV2(self):
"""Get the element's TransformV2 interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "TransformV2")
# ------------------------------------------------------------
@lazy_property
def iface_window(self):
"""Get the element's Window interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Window")
# ------------------------------------------------------------
@lazy_property
def iface_item_container(self):
"""Get the element's ItemContainer interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "ItemContainer")
# ------------------------------------------------------------
@lazy_property
def iface_virtualized_item(self):
"""Get the element's VirtualizedItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "VirtualizedItem")
# ------------------------------------------------------------
@lazy_property
def iface_legacy_iaccessible(self):
"""Get the element's LegacyIAccessible interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "LegacyIAccessible")
# ------------------------------------------------------------
@property
def writable_props(self):
"""Extend default properties list."""
props = super(UIAWrapper, self).writable_props
props.extend(['is_keyboard_focusable',
'has_keyboard_focus',
'automation_id',
])
return props
# ------------------------------------------------------------
def legacy_properties(self):
"""Get the element's LegacyIAccessible control pattern interface properties"""
elem = self.element_info.element
impl = uia_defs.get_elem_interface(elem, "LegacyIAccessible")
property_name_identifier = 'Current'
interface_properties = [prop for prop in dir(LegacyIAccessiblePattern)
if (isinstance(getattr(LegacyIAccessiblePattern, prop), property)
and property_name_identifier in prop)]
return {prop.replace(property_name_identifier, '') : getattr(impl, prop) for prop in interface_properties}
# ------------------------------------------------------------
def friendly_class_name(self):
"""
Return the friendly class name for the control
This differs from the class of the control in some cases.
class_name() is the actual 'Registered' window class of the control
while friendly_class_name() is hopefully something that will make
more sense to the user.
For example Checkboxes are implemented as Buttons - so the class
of a CheckBox is "Button" - but the friendly class is "CheckBox"
"""
if self.friendlyclassname is None:
if self.element_info.control_type not in IUIA().known_control_types.keys():
self.friendlyclassname = self.element_info.control_type
else:
ctrl_type = self.element_info.control_type
if (ctrl_type not in _friendly_classes) or (_friendly_classes[ctrl_type] is None):
self.friendlyclassname = ctrl_type
else:
self.friendlyclassname = _friendly_classes[ctrl_type]
return self.friendlyclassname
#------------------------------------------------------------
def automation_id(self):
"""Return the Automation ID of the control"""
return self.element_info.auto_id
# -----------------------------------------------------------
def is_keyboard_focusable(self):
"""Return True if the element can be focused with keyboard"""
return self.element_info.element.CurrentIsKeyboardFocusable == 1
# -----------------------------------------------------------
def has_keyboard_focus(self):
"""Return True if the element is focused with keyboard"""
return self.element_info.element.CurrentHasKeyboardFocus == 1
# -----------------------------------------------------------
def set_focus(self):
"""Set the focus to this element"""
try:
if self.is_minimized():
if self.was_maximized():
self.maximize()
else:
self.restore()
except uia_defs.NoPatternInterfaceError:
pass
try:
self.element_info.element.SetFocus()
# SetFocus() can return S_OK even if the element isn't focused actually
active_element = UIAElementInfo.get_active()
if self.element_info != active_element and self.element_info != active_element.top_level_parent:
if self.handle:
warnings.warn("Failed to set focus on element, trying win32 backend", RuntimeWarning)
HwndWrapper(self.element_info).set_focus()
else:
warnings.warn("The element has not been focused because UIA SetFocus() failed "
"and we can't use win32 backend instead because "
"the element doesn't have native handle", RuntimeWarning)
except comtypes.COMError as exc:
if self.handle:
warnings.warn("Failed to set focus on element due to COMError: {}, "
"trying win32 backend".format(exc), RuntimeWarning)
HwndWrapper(self.element_info).set_focus()
else:
warnings.warn("The element has not been focused due to COMError: {}, "
"and we can't use win32 backend instead because "
"the element doesn't have native handle".format(exc), RuntimeWarning)
return self
# TODO: figure out how to implement .has_focus() method (if no handle available)
# -----------------------------------------------------------
def set_value(self, value):
"""An interface to the SetValue method of the Value control pattern"""
self.iface_value.SetValue(value)
return self
# -----------------------------------------------------------
def close(self):
"""
Close the window
Only a control supporting Window pattern should answer.
If it doesn't (menu shadows, tooltips,...), try to send "Esc" key
"""
if not self.is_visible() or \
not self.is_enabled():
return
try:
name = self.element_info.name
control_type = self.element_info.control_type
iface = self.iface_window
iface.Close()
if name and control_type:
self.actions.log("Closed " + control_type.lower() + ' "' + name + '"')
except(uia_defs.NoPatternInterfaceError):
try:
self.type_keys("{ESC}")
except comtypes.COMError:
raise WindowNotFoundError
# -----------------------------------------------------------
def minimize(self):
"""
Minimize the window
Only controls supporting Window pattern should answer
"""
iface = self.iface_window
if iface.CurrentCanMinimize:
iface.SetWindowVisualState(uia_defs.window_visual_state_minimized)
return self
# -----------------------------------------------------------
def maximize(self):
"""
Maximize the window
Only controls supporting Window pattern should answer
"""
iface = self.iface_window
if iface.CurrentCanMaximize:
iface.SetWindowVisualState(uia_defs.window_visual_state_maximized)
return self
# -----------------------------------------------------------
def restore(self):
"""
Restore the window to normal size
Only controls supporting Window pattern should answer
"""
iface = self.iface_window
iface.SetWindowVisualState(uia_defs.window_visual_state_normal)
return self
# -----------------------------------------------------------
def get_show_state(self):
"""Get the show state and Maximized/minimzed/restored state
Returns values as following
window_visual_state_normal = 0
window_visual_state_maximized = 1
window_visual_state_minimized = 2
"""
iface = self.iface_window
ret = iface.CurrentWindowVisualState
return ret
# -----------------------------------------------------------
def is_minimized(self):
"""Indicate whether the window is minimized or not"""
return self.get_show_state() == uia_defs.window_visual_state_minimized
# -----------------------------------------------------------
def is_maximized(self):
"""Indicate whether the window is maximized or not"""
return self.get_show_state() == uia_defs.window_visual_state_maximized
# -----------------------------------------------------------
def is_normal(self):
"""Indicate whether the window is normal (i.e. not minimized and not maximized)"""
return self.get_show_state() == uia_defs.window_visual_state_normal
# -----------------------------------------------------------
def invoke(self):
"""An interface to the Invoke method of the Invoke control pattern"""
name = self.element_info.name
control_type = self.element_info.control_type
invoke_pattern_iface = self.iface_invoke
# sometimes .Invoke() can hang, although an action (such as button click) was completed successfully
def watchdog():
thread = threading.Thread(target=invoke_pattern_iface.Invoke)
thread.daemon = True
thread.start()
thread.join(2.)
if thread.isAlive():
warnings.warn('Timeout for InvokePattern.Invoke() call was exceeded', RuntimeWarning)
watchdog_thread = threading.Thread(target=watchdog)
watchdog_thread.start()
watchdog_thread.join(Timings.after_invoke_wait)
if name and control_type:
self.actions.log("Invoked " + control_type.lower() + ' "' + name + '"')
# Return itself to allow action chaining
return self
# -----------------------------------------------------------
def expand(self):
"""
Displays all child nodes, controls, or content of the control
An interface to Expand method of the ExpandCollapse control pattern.
"""
self.iface_expand_collapse.Expand()
# Return itself to allow action chaining
return self
# -----------------------------------------------------------
def collapse(self):
"""
Displays all child nodes, controls, or content of the control
An interface to Collapse method of the ExpandCollapse control pattern.
"""
self.iface_expand_collapse.Collapse()
# Return itself to allow action chaining
return self
# -----------------------------------------------------------
def get_expand_state(self):
"""
Indicates the state of the control: expanded or collapsed.
An interface to CurrentExpandCollapseState property of the ExpandCollapse control pattern.
Values for enumeration as defined in uia_defines module:
expand_state_collapsed = 0
expand_state_expanded = 1
expand_state_partially = 2
expand_state_leaf_node = 3
"""
return self.iface_expand_collapse.CurrentExpandCollapseState
# -----------------------------------------------------------
def is_expanded(self):
"""Test if the control is expanded"""
state = self.get_expand_state()
return state == uia_defs.expand_state_expanded
# -----------------------------------------------------------
def is_collapsed(self):
"""Test if the control is collapsed"""
state = self.get_expand_state()
return state == uia_defs.expand_state_collapsed
# -----------------------------------------------------------
def get_selection(self):
"""
An interface to GetSelection of the SelectionProvider pattern
Retrieves a UI Automation provider for each child element
that is selected. Builds a list of UIAElementInfo elements
from all retrieved providers.
"""
ptrs_array = self.iface_selection.GetCurrentSelection()
return elements_from_uia_array(ptrs_array)
# -----------------------------------------------------------
def selected_item_index(self):
"""Return the index of a selected item"""
# Go through all children and look for an index
# of an item with the same text.
# Maybe there is another and more efficient way to do it
selection = self.get_selection()
if selection:
for i, c in enumerate(self.children()):
if c.window_text() == selection[0].name:
return i
return None
# -----------------------------------------------------------
def select(self):
"""Select the item
Only items supporting SelectionItem pattern should answer.
Raise NoPatternInterfaceError if the pattern is not supported
Usually applied for controls like: a radio button, a tree view item
or a list item.
"""
self.iface_selection_item.Select()
if not self.is_selected():
warnings.warn("SelectionItem.Select failed, trying LegacyIAccessible.DoDefaultAction", RuntimeWarning)
self.iface_legacy_iaccessible.DoDefaultAction()
name = self.element_info.name
control_type = self.element_info.control_type
if name and control_type:
self.actions.log("Selected " + control_type.lower() + ' "' + name + '"')
# Return itself so that action can be chained
return self
# -----------------------------------------------------------
def is_selected(self):
"""Indicate that the item is selected or not.
Only items supporting SelectionItem pattern should answer.
Raise NoPatternInterfaceError if the pattern is not supported
Usually applied for controls like: a radio button, a tree view item,
a list item.
"""
return self.iface_selection_item.CurrentIsSelected
# -----------------------------------------------------------
def children_texts(self):
"""Get texts of the control's children"""
return [c.window_text() for c in self.children()]
# -----------------------------------------------------------
def can_select_multiple(self):
"""
An interface to CanSelectMultiple of the SelectionProvider pattern
Indicates whether the UI Automation provider allows more than one
child element to be selected concurrently.
"""
return self.iface_selection.CurrentCanSelectMultiple
# -----------------------------------------------------------
def is_selection_required(self):
"""
An interface to IsSelectionRequired property of the SelectionProvider pattern.
This property can be dynamic. For example, the initial state of
a control might not have any items selected by default,
meaning that IsSelectionRequired is FALSE. However,
after an item is selected the control must always have
at least one item selected.
"""
return self.iface_selection.CurrentIsSelectionRequired
# -----------------------------------------------------------
def _select(self, item=None):
"""
Find a child item by the name or index and select
The action can be applied for dirrent controls with items:
ComboBox, TreeView, Tab control
"""
if isinstance(item, six.integer_types):
item_index = item
title = None
elif isinstance(item, six.string_types):
item_index = 0
title = item
else:
err_msg = u"unsupported {0} for item {1}".format(type(item), item)
raise ValueError(err_msg)
list_ = self.children(name=title)
if item_index < len(list_):
wrp = list_[item_index]
wrp.iface_selection_item.Select()
if not wrp.is_selected():
warnings.warn("SelectionItem.Select failed, trying LegacyIAccessible.DoDefaultAction", RuntimeWarning)
wrp.iface_legacy_iaccessible.DoDefaultAction()
else:
raise IndexError("item '{0}' not found".format(item))
# -----------------------------------------------------------
def is_active(self):
"""Whether the window is active or not"""
ae = IUIA().get_focused_element()
focused_wrap = UIAWrapper(UIAElementInfo(ae))
return (focused_wrap.top_level_parent() == self.top_level_parent())
# -----------------------------------------------------------
def is_dialog(self):
"""Return true if the control is a dialog window (WindowPattern interface is available)"""
try:
return self.iface_window is not None
except uia_defs.NoPatternInterfaceError:
return False
# -----------------------------------------------------------
def menu_select(self, path, exact=False, ):
"""Select a menu item specified in the path
The full path syntax is specified in:
:py:meth:`pywinauto.menuwrapper.Menu.get_menu_path`
There are usually at least two menu bars: "System" and "Application"
System menu bar is a standard window menu with items like:
'Restore', 'Move', 'Size', 'Minimize', e.t.c.
This menu bar usually has a "Title Bar" control as a parent.
Application menu bar is often what we look for. In most cases,
its parent is the dialog itself so it should be found among the direct
children of the dialog. Notice that we don't use "Application"
string as a title criteria because it couldn't work on applications
with a non-english localization.
If there is no menu bar has been found we fall back to look up
for Menu control. We try to find the control through all descendants
of the dialog
"""
self.verify_actionable()
cc = self.children(control_type="MenuBar")
if not cc:
cc = self.descendants(control_type="Menu")
if not cc:
raise AttributeError
menu = cc[0]
menu.item_by_path(path, exact).select()
# -----------------------------------------------------------
_scroll_types = {
"left": {
"line": (uia_defs.scroll_small_decrement, uia_defs.scroll_no_amount),
"page": (uia_defs.scroll_large_decrement, uia_defs.scroll_no_amount),
},
"right": {
"line": (uia_defs.scroll_small_increment, uia_defs.scroll_no_amount),
"page": (uia_defs.scroll_large_increment, uia_defs.scroll_no_amount),
},
"up": {
"line": (uia_defs.scroll_no_amount, uia_defs.scroll_small_decrement),
"page": (uia_defs.scroll_no_amount, uia_defs.scroll_large_decrement),
},
"down": {
"line": (uia_defs.scroll_no_amount, uia_defs.scroll_small_increment),
"page": (uia_defs.scroll_no_amount, uia_defs.scroll_large_increment),
},
}
def scroll(self, direction, amount, count=1, retry_interval=Timings.scroll_step_wait):
"""Ask the control to scroll itself
**direction** can be any of "up", "down", "left", "right"
**amount** can be only "line" or "page"
**count** (optional) the number of times to scroll
**retry_interval** (optional) interval between scroll actions
"""
def _raise_attrib_err(details):
control_type = self.element_info.control_type
name = self.element_info.name
msg = "".join([control_type.lower(), ' "', name, '" ', details])
raise AttributeError(msg)
try:
scroll_if = self.iface_scroll
if direction.lower() in ("up", "down"):
if not scroll_if.CurrentVerticallyScrollable:
_raise_attrib_err('is not vertically scrollable')
elif direction.lower() in ("left", "right"):
if not scroll_if.CurrentHorizontallyScrollable:
_raise_attrib_err('is not horizontally scrollable')
h, v = self._scroll_types[direction.lower()][amount.lower()]
# Scroll as often as we have been asked to
for _ in range(count, 0, -1):
scroll_if.Scroll(h, v)
time.sleep(retry_interval)
except uia_defs.NoPatternInterfaceError:
_raise_attrib_err('is not scrollable')
except KeyError:
raise ValueError("""Wrong arguments:
direction can be any of "up", "down", "left", "right"
amount can be only "line" or "page"
""")
return self
# -----------------------------------------------------------
def _texts_from_item_container(self):
"""Get texts through the ItemContainer interface"""
texts = []
try:
com_elem = self.iface_item_container.FindItemByProperty(0, 0, uia_defs.vt_empty)
while com_elem:
itm = UIAWrapper(UIAElementInfo(com_elem))
texts.append(itm.texts())
com_elem = self.iface_item_container.FindItemByProperty(com_elem, 0, uia_defs.vt_empty)
except (uia_defs.NoPatternInterfaceError):
pass
return texts
# -----------------------------------------------------------
def move_window(self, x=None, y=None, width=None, height=None):
"""Move the window to the new coordinates
The method should be implemented explicitly by controls that
support this action. The most obvious is the Window control.
Otherwise the method throws AttributeError
* **x** Specifies the new left position of the window.
Defaults to the current left position of the window.
* **y** Specifies the new top position of the window.
Defaults to the current top position of the window.
* **width** Specifies the new width of the window. Defaults to the
current width of the window.
* **height** Specifies the new height of the window. Default to the
current height of the window.
"""
raise AttributeError("This method is not supported for {0}".format(self))
backend.register('uia', UIAElementInfo, UIAWrapper)
| |
# -*- coding: utf-8 -*-
import os
import sys
import yaml
import logging
import json
import argparse
from chefwrapper import ChefWrapper
import device42
from nodefilter import node_filter
logger = logging.getLogger('log')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter('%(asctime)-15s\t%(levelname)s\t %(message)s'))
logger.addHandler(ch)
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser(description="chefexplore")
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug output')
parser.add_argument('-q', '--quiet', action='store_true', help='Quiet mode - outputs only errors')
parser.add_argument('-c', '--config', help='Config file', default='settings.yaml')
parser.add_argument('-f', '--nodefile', help='Get node info from JSON file instead of Chef server')
parser.add_argument('-S', '--savenodes', help='Save nodes info from Chef server to json file')
parser.add_argument('-n', '--onlynode', action='append', help='Process only selected nodes (fqdn or hostname)')
debugmode = False
# We have to restrict FS to only known types to avoid incorrect disk size calculatons
# add more yourself
ALLOWED_FSTYPES = ['ntfs', 'ext2', 'ext3', 'ext4', 'ocfs2', 'xfs', 'zfs', 'jfs',
'vfat', 'msdos', 'reiser4', 'reiserfs']
def get_config(cfgpath):
config = {}
if not os.path.exists(cfgpath):
if not os.path.exists(os.path.join(CUR_DIR, cfgpath)):
raise ValueError("Config file %s is not found!" % cfgpath)
cfgpath = os.path.join(CUR_DIR, cfgpath)
with open(cfgpath, 'r') as cfgf:
config = yaml.load(cfgf.read())
return config
def d42_update(dev42, nodes, options, static_opt, mapping, chefhost=None):
# get customer info
customer_name = static_opt.get('customer')
customer_id = str(static_opt.get('customer_id') or '') or None
if (not customer_id and customer_name) or (customer_id and not customer_name):
allcustomers = dev42._get('customers')['Customers']
for cst in allcustomers:
if customer_id and str(cst['id']) == customer_id:
customer_name = cst['name']
break
if customer_name and cst['name'] == customer_name:
customer_id = str(cst['id'])
break
logger.debug("Customer %s: '%s'" % (customer_id, customer_name))
# processing all nodes
for node in nodes:
if 'hostname' not in node:
logger.debug("Skip node: no name found")
continue
if options.get('show_node'):
print node
node_name = node['hostname']
if options.get('as_node_name').upper() == 'FQDN':
node_name = node.get('fqdn', node_name)
# filtering by attributes
if options.get('node_filter'):
if not node_filter(node, options['node_filter']):
logger.info("Skip node %s: filter not passed" % node_name)
continue # filter not passed
try:
# device = dev42.get_device_by_name(node_name)
# detect memory
totalmem = '0'
if 'memory' in node:
# linux
totalmem = node['memory']['total']
if totalmem.endswith('kB'):
totalmem = int(totalmem[:-2]) / 1024
elif totalmem.endswith('mB'):
totalmem = int(totalmem[:-2])
elif totalmem.endswith('gB'):
totalmem = int(totalmem[:-2]) * 1024
else:
totalmem = int(totalmem)
else:
# win
totalmem = node.get('kernel', {}).get('cs_info', {}).get('total_physical_memory') or '0'
totalmem = int(totalmem) / (1024 * 1024)
# detect HDD
hddcount = 0
hddsize = 0 # first in bytes, then should be converted to Gb
for devname, dev in node['filesystem'].items():
fstype = dev.get('fs_type') or dev.get('fstype')
if fstype not in ALLOWED_FSTYPES:
continue
hddcount += 1
size = int(dev.get('kb_size', 0)) * 1024
hddsize += size
hddsize = hddsize >> 30 # convert to Gb ( hddsize/ 1024**3 )
virtual_subtype = None
if node.get('virtualization'):
# node['virtualization']['system']
pass
if node.get('kernel', {}).get('os_info', {}).get('registered_user') == 'EC2':
virtual_subtype = 'ec2'
data = {
'name': node_name,
'virtual_subtype': virtual_subtype,
'os': node['platform'],
'osver': node['platform_version'],
'memory': totalmem,
'cpucount': node['cpu']['total'],
'cpucore': node['cpu']['0'].get('cores', 0),
'cpupower': int(float(node['cpu']['0']['mhz'])),
'hddcount': hddcount,
'hddsize': hddsize,
'macaddress': node['macaddress'],
'customer': customer_name,
'service_level': static_opt.get('service_level'),
}
if options.get('hostname_precedence'):
data.update({'new_name': node_name})
logger.debug("Updating node %s" % node_name)
updateinfo = dev42.update_device(**data)
deviceid = updateinfo['msg'][1]
logger.info("Device %s updated/created (id %s)" % (node_name, deviceid))
if chefhost:
cfdata = {
'name': node_name,
'key': 'Chef Node ID',
'value': node_name,
'notes': 'Chef Server %s' % chefhost
}
updateinfo = dev42._put('device/custom_field', cfdata)
global depth
depth = []
res = []
def get_depth(obj):
global depth
for item in obj:
depth.append(item)
if type(obj[item]) == str:
res.append({obj[item]: depth})
depth = []
else:
get_depth(obj[item])
return res
if mapping:
full_depth = get_depth(mapping)
for element in full_depth:
for key in element:
value = None
step = node
try:
for x in element[key]:
step = step[x]
except KeyError:
continue
if type(step) in [unicode, str, int]:
value = step
elif type(step) in [list, tuple, dict]:
value = len(step)
cfdata = {
'name': node_name,
'key': key,
'value': value
}
updateinfo = dev42._put('device/custom_field', cfdata)
# Dealing with IPs
device_ips = dev42._get("ips", data={'device': node_name})['ips']
updated_ips = []
for ifsname, ifs in node['network']['interfaces'].items():
if ifsname == 'lo':
continue # filter out local interface
if [aip for aip, a in ifs['addresses'].items() if aip.startswith('127.0')]:
continue # local loopbacks
macs = [aip for aip, a in ifs['addresses'].items() if a['family'] == 'lladdr']
macaddr = None
if macs:
macaddr = macs[0]
for nodeip, addr in ifs['addresses'].items():
if addr['family'] == 'lladdr':
continue # filter out mac
# update IP
ipdata = {
'ipaddress': nodeip,
'tag': ifsname,
'device': node_name,
'macaddress': macaddr,
}
# logger.debug("IP data: %s" % ipdata)
try:
updateinfo = dev42._post('ips', ipdata)
updated_ips.append(updateinfo['msg'][1])
logger.info("IP %s for device %s updated/created (id %s)" % (nodeip, node_name, deviceid))
except device42.Device42HTTPError as e:
print e
# Delete other IPs from the device
for d_ip in device_ips:
if d_ip['id'] not in updated_ips:
dev42._delete('ips/%s' % d_ip['id'])
logger.debug("Deleted IP %s (id %s) for device %s (id %s)" %
(d_ip['ip'], d_ip['id'], node_name, deviceid))
except Exception as eee:
logger.exception("Error(%s) updating device %s" % (type(eee), node_name))
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.strftime("%Y %m %d %H:%M:%S")
return json.JSONEncoder.default(self, o)
def main():
global debugmode
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
debugmode = True
if args.quiet:
logger.setLevel(logging.ERROR)
debugmode = False
onlynodes = []
if args.onlynode:
onlynodes = args.onlynode
config = get_config(args.config)
if not args.nodefile:
chef = ChefWrapper(
host=config['chef_server']['host'],
user=config['chef_server']['user'],
key=config['chef_server'].get('key'),
key_file=config['chef_server'].get('key_file'),
version=config['chef_server'].get('version'),
organization=config['chef_server'].get('organization'),
logger=logger,
onlynodes=onlynodes,
)
chefnodes = chef.get_nodes()
logger.debug("Got %s nodes from chef" % len(chefnodes))
else:
with open(args.nodefile, 'r') as nf:
allchefnodes = json.loads(nf.read())
if isinstance(allchefnodes, dict):
allchefnodes = [allchefnodes]
chefnodes = allchefnodes
if onlynodes:
chefnodes = []
for node in allchefnodes:
if not (node.get('hostname') in onlynodes or
node.get('fqdn') in onlynodes or
node.get('ipaddress') in onlynodes):
continue
chefnodes.append(node)
if args.savenodes:
with open(args.savenodes, 'w') as wnf:
wnf.write(json.dumps(chefnodes, cls=JSONEncoder, indent=4, sort_keys=True, ensure_ascii=False))
dev42 = device42.Device42(
endpoint=config['device42']['host'],
user=config['device42']['user'],
password=config['device42']['pass'],
logger=logger,
debug=debugmode,
)
d42_update(dev42, chefnodes, config['options'], config.get('static', {}), config.get('mapping', {}), config['chef_server']['host'])
return 0
if __name__ == "__main__":
retval = main()
sys.exit(retval)
| |
import platform
from conans.paths import CONANFILE, BUILD_INFO_CMAKE
conanfile_build_cmake = """ def build(self):
defs = {
"BUILD_SHARED_LIBS": not self.options.static,
"CONAN_LANGUAGE": self.options.language
}
cmake = CMake(self.settings)
cmake.configure(self, defs=defs)
cmake.build(self)"""
conanfile_build_new_env = """
def build(self):
import os
from conans import VisualStudioBuildEnvironment, AutoToolsBuildEnvironment
from conans.tools import environment_append, vcvars_command, save
if self.settings.compiler == "Visual Studio":
env_build = VisualStudioBuildEnvironment(self)
with environment_append(env_build.vars):
vcvars = vcvars_command(self.settings)
flags = " ".join("%s.lib" % lib for lib in self.deps_cpp_info.libs)
lang = '/DCONAN_LANGUAGE=%s' % self.options.language
if self.options.static:
self.run('{} && cl /c /EHsc hello.cpp {}'.format(vcvars, lang))
self.run('{} && lib hello.obj -OUT:hello{}.lib'.format(vcvars, self.name))
else:
self.run('{} && cl /EHsc /LD hello.cpp {} {} /link /IMPLIB:hello{}.lib '
'/link /OUT:hello{}.dll'.format(vcvars, lang, flags, self.name, self.name))
command = ('{} && cl /EHsc main.cpp hello{}.lib {}'.format(vcvars, self.name, flags))
self.run(command)
elif self.settings.compiler == "gcc" and self.settings.os == "Linux":
makefile_am = '''
bin_PROGRAMS = main
lib_LIBRARIES = libhello{}.a
libhello{}_a_SOURCES = hello.cpp
main_SOURCES = main.cpp
main_LDADD = libhello{}.a
'''.format(self.name, self.name, self.name)
configure_ac = '''
AC_INIT([main], [1.0], [luism@jfrog.com])
AM_INIT_AUTOMAKE([-Wall -Werror foreign])
AC_PROG_CXX
AC_PROG_RANLIB
AM_PROG_AR
AC_CONFIG_FILES([Makefile])
AC_OUTPUT
'''
save("Makefile.am", makefile_am)
save("configure.ac", configure_ac)
self.run("aclocal")
self.run("autoconf")
self.run("automake --add-missing --foreign")
env_build = AutoToolsBuildEnvironment(self)
env_build.defines.append('CONAN_LANGUAGE=%s' % self.options.language)
with environment_append(env_build.vars):
self.run("./configure")
self.run("make")
elif self.settings.compiler == "gcc" or "clang" in str(self.settings.compiler):
lang = '-DCONAN_LANGUAGE=%s' % self.options.language
if self.options.static:
self.run("c++ -c hello.cpp {} @conanbuildinfo.gcc".format(lang))
self.run("ar rcs libhello{}.a hello.o".format(self.name))
else:
if self.settings.os == "Windows":
self.run("c++ -o libhello{}.dll -shared -fPIC hello.cpp {} @conanbuildinfo.gcc "
"-Wl,--out-implib,libhello{}.a".
format(self.name, lang, self.name))
else:
self.run("c++ -o libhello{}.so -shared -fPIC hello.cpp {} @conanbuildinfo.gcc".
format(self.name, lang))
self.run('c++ -o main main.cpp -L. -lhello{} @conanbuildinfo.gcc'.format(self.name))
elif self.settings.compiler == "sun-cc":
lang = '-DCONAN_LANGUAGE=%s' % self.options.language
if self.options.static:
self.run("CC -c hello.cpp {} @conanbuildinfo.gcc".format(lang))
self.run("ar rcs libhello{}.a hello.o".format(self.name))
else:
self.run("CC -o libhello{}.so -G -Kpic hello.cpp {} @conanbuildinfo.gcc".
format(self.name, lang))
self.run('CC -o main main.cpp -L. -lhello{} @conanbuildinfo.gcc'.format(self.name))
try:
os.makedirs("bin")
except:
pass
try:
if self.settings.os == "Windows":
os.rename("main.exe", "bin/say_hello.exe")
else:
os.rename("main", "bin/say_hello")
if not self.options.static:
os.rename("libhello.so", "bin/libhello.so")
except:
pass
"""
conanfile_template = """
from conans import ConanFile, CMake
from conans.tools import replace_in_file
import platform
class {name}Conan(ConanFile):
name = "{name}"
version = "{version}"
options = {{"language": [0, 1],
"static": [True, False]}}
default_options = '''language={language}
static= {static}'''
requires = ({requires})
settings = "os", "compiler", "arch"
generators = "cmake", "gcc"
exports = '*'
def config(self):
{libcxx_remove}
for name, req in self.requires.iteritems():
self.options[name].language = self.options.language
def source(self):
# Try-except necessary, not all tests have all files
try:
replace_in_file("CMakeLists.txt", "projct", "project")
except:
pass
try:
replace_in_file("main.cpp", "retunr", "return")
except:
pass
{build}
def package(self):
self.copy(pattern="*.h", dst="include", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*lib*.a", dst="lib", keep_path=False)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
self.copy(pattern="*.dylib", dst="lib", keep_path=False)
self.copy(pattern="*.so", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello{name}"]
def imports(self):
self.copy(pattern="*.dylib", dst=".", src="lib")
self.copy(pattern="*.dll", dst=".", src="bin")
self.copy(pattern="*", dst="bin", src="bin")
"""
cmake_file = """
project(MyHello)
cmake_minimum_required(VERSION 2.8.12)
include(${{CMAKE_BINARY_DIR}}/%s)
add_definitions(-DCONAN_LANGUAGE=${{CONAN_LANGUAGE}})
message("HELLO LANGUAGE " ${{CONAN_LANGUAGE}})
conan_basic_setup()
add_library(hello{name} hello{ext})
target_link_libraries(hello{name} ${{CONAN_LIBS}})
set_target_properties(hello{name} PROPERTIES POSITION_INDEPENDENT_CODE ON)
add_executable(say_hello main{ext})
target_link_libraries(say_hello hello{name})
""" % BUILD_INFO_CMAKE
cmake_targets_file = """
project(MyHello)
cmake_minimum_required(VERSION 2.8.12)
include(${{CMAKE_BINARY_DIR}}/%s)
add_definitions(-DCONAN_LANGUAGE=${{CONAN_LANGUAGE}})
message("HELLO LANGUAGE " ${{CONAN_LANGUAGE}})
conan_basic_setup(TARGETS)
add_library(hello{name} hello{ext})
target_link_libraries(hello{name} PUBLIC {targets})
set_target_properties(hello{name} PROPERTIES POSITION_INDEPENDENT_CODE ON)
add_executable(say_hello main{ext})
target_link_libraries(say_hello hello{name})
""" % BUILD_INFO_CMAKE
body = r"""#include "hello{name}.h"
#include <iostream>
using namespace std;
{includes}
void hello{name}(){{
#if CONAN_LANGUAGE == 0
cout<<"Hello {msg}\n";
#elif CONAN_LANGUAGE == 1
cout<<"Hola {msg}\n";
#endif
{other_calls}
}}
"""
body_c = r"""#include "hello{name}.h"
#include <stdio.h>
{includes}
void hello{name}(){{
#if CONAN_LANGUAGE == 0
printf("Hello {msg}\n");
#elif CONAN_LANGUAGE == 1
printf("Hola {msg}\n");
#endif
{other_calls}
}}
"""
header = """
#pragma once
{includes}
{export}void hello{name}();
"""
main = """
#include "hello{name}.h"
int main(){{
hello{name}();
return 0;
}}
"""
executable = """
"""
def cpp_hello_source_files(name="Hello", deps=None, private_includes=False, msg=None,
dll_export=False, need_patch=False, pure_c=False, cmake_targets=False):
"""
param number: integer, defining name of the conans Hello0, Hello1, HelloX
param deps: [] list of integers, defining which dependencies this conans
depends on
param private_includes: includes will exist only in cpp, then hidden from
downstream consumers
param msg: the message to append to Hello/Hola, will be equal the number
by default
param dll_export: Adds __declspec(dllexport) to the .h declaration
(to be exported to lib with a dll)
param need_patch: It will generated wrong CMakeLists and main.cpp files,
so they will need to be fixed/patched in the source() method.
Such method just have to replace_in_file in those two files to have a
correct "source" directory. This was introduced to be sure that the
source and build methods are executed using their respective folders
while packaging.
e.g. (3, [4, 7]) means that a Hello3 conans will be created, with message
"Hello 3", that depends both in Hello4 and Hello7.
The output of such a conans exe could be like: Hello 3, Hello 4, Hello7
"""
assert deps is None or isinstance(deps, list)
deps = deps or []
if msg is None:
msg = name
ret = {}
ext = ".c" if pure_c else ".cpp"
ret["main%s" % ext] = main.format(name=name)
includes = "\n".join(['#include "hello%s.h"' % d for d in deps])
export = "__declspec(dllexport) " if dll_export else ""
ret["hello%s.h" % name] = header.format(name=name,
export=export,
includes=(includes if not private_includes else ""))
other_calls = "\n".join(["hello%s();" % d for d in deps])
body_content = body if not pure_c else body_c
ret["hello%s" % ext] = body_content.format(name=name,
includes=includes,
other_calls=other_calls,
msg=msg)
# Naive approximation, NO DEPS
if cmake_targets:
ret["CMakeLists.txt"] = cmake_targets_file.format(name=name, ext=ext,
targets=" ".join("CONAN_PKG::%s"
% d for d in deps))
else:
ret["CMakeLists.txt"] = cmake_file.format(name=name, ext=ext)
if pure_c:
ret["CMakeLists.txt"] = ret["CMakeLists.txt"].replace("project(MyHello)",
"project(MyHello C)")
if need_patch:
ret["CMakeLists.txt"] = ret["CMakeLists.txt"].replace("project", "projct")
ret["main%s" % ext] = ret["main%s" % ext].replace("return", "retunr")
ret["executable"] = executable
return ret
def cpp_hello_conan_files(name="Hello", version="0.1", deps=None, language=0, static=True,
private_includes=False, msg=None, dll_export=False, need_patch=False,
pure_c=False, config=True, build=True, collect_libs=False,
use_cmake=True, cmake_targets=False):
"""Generate hello_files, as described above, plus the necessary
CONANFILE to manage it
param number: integer, defining name of the conans Hello0, Hello1, HelloX
param version: string with the version of the current conans "0.1" by default
param deps: [] list of string of the form "0/0.1@user/channel"
param language: 0 = English, 1 = Spanish
param dll_export: Adds __declspec(dllexport) to the .h declaration
(to be exported to lib with a dll)
e.g. (3, [4, 7]) means that a Hello3 conans will be created, with message
"Hello 3", that depends both in Hello4 and Hello7.
The output of such a conans exe could be like: Hello 3, Hello 4, Hello7"""
assert deps is None or isinstance(deps, list)
code_deps = []
requires = []
for d in deps or []:
if isinstance(d, str):
requires.append('"%s"' % d)
code_dep = d.split("/", 1)[0]
elif isinstance(d, tuple):
requires.append('(%s)' % (", ".join('"%s"' % e for e in d)))
code_dep = d[0].split("/", 1)[0]
else:
raise Exception("Wrong input %s %s" % (d, type(d)))
code_deps.append(code_dep)
requires.append("")
requires = ", ".join(requires)
base_files = cpp_hello_source_files(name, code_deps, private_includes, msg=msg,
dll_export=dll_export, need_patch=need_patch,
pure_c=pure_c, cmake_targets=cmake_targets)
libcxx_remove = "del self.settings.compiler.libcxx" if pure_c else ""
build_env = conanfile_build_cmake if use_cmake else conanfile_build_new_env
conanfile = conanfile_template.format(name=name,
version=version,
requires=requires,
language=language,
static=static,
libcxx_remove=libcxx_remove,
build=build_env)
if pure_c:
conanfile = conanfile.replace("hello.cpp", "hello.c").replace("main.cpp", "main.c")
conanfile = conanfile.replace("c++", "cc" if platform.system()!="Windows" else "gcc")
if not build:
conanfile = conanfile.replace("build(", "build2(")
if not config:
conanfile = conanfile.replace("config(", "config2(")
if collect_libs:
conanfile = conanfile.replace('["hello%s"]' % name, "self.collect_libs()")
base_files[CONANFILE] = conanfile
return base_files
| |
"""
Setup script for data_api Python packages and scripts.
"""
import setuptools
import glob
import logging
import os
import subprocess
import shutil
import sys
# Logging
#logging.basicConfig()
_log = logging.getLogger('setup')
_h = logging.StreamHandler()
_h.setFormatter(logging.Formatter('%(asctime)s %(levelname)-7s %(name)s: %('
'message)s'))
_log.addHandler(_h)
vb = sum([len(a) - 1 if a.startswith('-v') else 0 for a in sys.argv[1:]])
_log.setLevel([logging.WARN, logging.INFO, logging.DEBUG][min(vb, 2)])
# Globals
version = open('VERSION').read().strip()
packages = setuptools.find_packages("lib")
g_with_jupyter = False
server_languages = ["python_server"]
client_languages = ["python", "javascript", "perl", "java"]
class BuildAttr(object):
"""Attributes for building Thrift clients and servers.
This wrapper class greatly improves the aesthetics of the code.
"""
def __init__(self, values):
self.style = values['style']
self.generated_dir = values['generated_dir']
self.copy_files = values['copy_files']
self.rename_files = values.get('rename_files', {})
# There are two sets of python code generated.
# The python server code is twisted based and contains an async client.
# The default python client code is synchronous and not based on twisted.
# When building the server code, we generate both sets of code and then rename
# the generated synchronous code to thrift_client.py to not overwrite the
# twisted service code.
thrift_build = {
"python_server": BuildAttr({
"style": "py:twisted",
"generated_dir": "gen-py.twisted",
"copy_files": ["constants.py", "ttypes.py", "thrift_service.py"],
}),
"python": BuildAttr({
"style": "py:new_style",
"generated_dir": "gen-py",
"copy_files": ["constants.py", "ttypes.py", "thrift_service.py"],
"rename_files": {"thrift_service.py": "thrift_client.py"}
}),
"javascript": BuildAttr({
"style": "js:jquery",
"generated_dir": "gen-js",
"copy_files": ["*"]
}),
"perl": BuildAttr({
"style": "perl",
"generated_dir": "gen-perl",
"copy_files": ["Constants.pm", "Types.pm", "thrift_service.pm"]
}),
"java": BuildAttr({
"style": "java:sorted_containers",
"generated_dir": "gen-java",
"copy_files": ["*"]
})
}
# Functions
def filter_args():
global g_with_jupyter
setup_args = sys.argv[1:]
# TODO: Put this new option into --help output
if "--jupyter" in setup_args:
g_with_jupyter = True
setup_args.remove("--jupyter")
return setup_args
def get_dependencies():
def parse_requirements(filename):
pkg = list()
with open(filename, 'r') as req_file:
req_lines = req_file.read().splitlines()
for line in req_lines:
if line.strip() == "":
pass
elif line.startswith("-r"):
pkg.extend(parse_requirements(line.split(" ")[-1]))
else:
pkg.append(line)
return pkg
setup_args = sys.argv[1:]
if g_with_jupyter:
install_requires = parse_requirements(
os.path.join(os.path.dirname(__file__),"requirements-jupyter.txt"))
open(os.path.join(os.path.dirname(__file__),'exclude-tests.txt'), 'w').write('') # clear it
else:
_log.warn("--jupyter not specified, so using minimal install "
"without packages in doekbase.data_api.interactive")
exclude_pkg = 'doekbase.data_api.interactive'
global packages
if exclude_pkg in packages:
packages.remove(exclude_pkg)
install_requires = parse_requirements(
os.path.join(os.path.dirname(__file__),"requirements.txt"))
open(os.path.join(os.path.dirname(__file__),'exclude-tests.txt'), 'w')\
.write('lib/' + exclude_pkg.replace('.', '/'))
# clear it
_log.debug('Install requirements: {}'.format(install_requires))
return install_requires
def call_command(command, is_thrift=False):
"""Better exception when calling a command."""
cstr = ' '.join(command) if isinstance(command, list) else command
_log.debug('Run command "{}"'.format(cstr))
try:
errno = subprocess.call(command)
except Exception as err:
if is_thrift:
sys.stderr.write("==\nUnable to run `thrift` executable, is Apache "
"Thrift installed? See https://thrift.apache.org/ "
"for help\n==\n")
raise RuntimeError('Cannot run Thrift ({c}): {'
'e}'.format(c=cstr, e=err))
else:
raise RuntimeError('Command "{c}" failed: {e}'.format(c=cstr,
e=err))
return errno
class BuildThriftClients(setuptools.Command):
"""Build command for generating Thrift client code"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
_log.info('Build Thrift code')
try:
self._try_run()
except Exception as err:
_log.error("error in BuildThriftClients.run: {}".format(err))
raise
def _try_run(self):
for dirpath, dirnames, filenames in os.walk("thrift/specs"):
for f in filter(lambda _: _.endswith('.thrift'), filenames):
spec_path = os.path.abspath(os.path.join(dirpath, f))
# Process each language
for lang in client_languages:
settings = thrift_build[lang] # settings for current lang.
# Remove old generated files, if any
if os.path.exists(settings.generated_dir):
shutil.rmtree(settings.generated_dir)
# Run Thrift compiler to generate new stubs
cmd = ["thrift", "-r", "--gen"]
cmd.append(settings.style)
cmd.append(spec_path)
_log.debug("{}: Thrift command = {}".format(lang, cmd))
errno = call_command(cmd, is_thrift=True)
if errno != 0:
raise Exception("Thrift build for {} failed with : {}"
.format(lang, errno))
# Get a list of all generated stub files
generated_files = glob.glob(settings.generated_dir + "/*/*")
if len(generated_files) == 0:
generated_files = glob.glob(thrift_build[
lang].generated_dir + "/*")
# Copy generated files to their final place in the tree
copied = False
if settings.copy_files == ["*"]:
destination = spec_path.rsplit("/",1)[0].replace(
"specs", "stubs/" + lang)
for name in generated_files:
source = os.path.basename(name)
shutil.copyfile(name, os.path.join(destination,
source))
copied = len(generated_files) > 0
else:
for x in generated_files:
for name in settings.copy_files:
if name == os.path.basename(x):
destination = spec_path.rsplit("/",1)[0]\
.replace("specs", "stubs/" + lang)
shutil.copyfile(x, os.path.join(destination,
name))
copied = True
if not copied:
raise Exception("Unable to find thrift-generated "
"files to copy!")
# Remove original generated directories
for lang in client_languages:
settings = thrift_build[lang]
shutil.rmtree(settings.generated_dir)
class BuildThriftServers(setuptools.Command):
"""Build command for generating Thrift server code"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
_log.info('Install Thrift code')
try:
self._try_run()
except Exception as err:
_log.error("error in CustomInstall.run: {}".format(err))
raise
def _try_run(self):
start_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),"thrift/specs")
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
if f.endswith(".thrift"):
# first generate all the twisted server code
settings = thrift_build['python_server']
#TODO - modify version string in .thrift before code generation
spec_path = os.path.abspath(os.path.join(dirpath, f))
command = ["thrift", "-r", "--gen"]
command.append(settings.style)
command.append(spec_path)
errno = call_command(command, is_thrift=True)
if errno != 0:
raise Exception("Thrift build for python service failed with : {}".format(errno))
generated_files = glob.glob(settings.generated_dir + "/*/*")
if len(generated_files) == 0:
generated_files = glob.glob(settings.generated_dir +
"/*")
# now copy the generated server files to the target
copied = False
for x in generated_files:
for name in settings.copy_files:
if name in x:
destination = os.path.join(dirpath.replace(
"thrift/specs", "lib/doekbase/data_api")
+ "/service/", name)
shutil.copyfile(x, destination)
copied = True
if not copied:
raise Exception("Unable to find thrift service generated"
" files to copy!")
shutil.rmtree(settings.generated_dir)
# generate the client code
settings = thrift_build['python']
command = ["thrift", "-r", "--gen"]
command.append(settings.style)
command.append(spec_path)
errno = call_command(command, is_thrift=True)
if errno != 0:
raise Exception("Thrift build for python client failed "
"with : {}".format(errno))
generated_files = glob.glob(settings.generated_dir + \
"/*/*")
if len(generated_files) == 0:
generated_files = glob.glob(settings.generated_dir
+ \
"/*")
# rename the thrift_service.py generated to thrift_client.py
renamed = False
for x in generated_files:
for name in settings.rename_files:
if name in x:
destination = os.path.join(dirpath.replace(
"thrift/specs", "lib/doekbase/data_api") +
"/service/",
settings.rename_files[name])
shutil.copyfile(x, destination)
renamed = True
if not renamed:
raise Exception("Unable to find thrift client generated"
" files to copy!")
shutil.rmtree(settings.generated_dir)
config = {
"description": "KBase Data API",
"author": "Matt Henderson",
"url": "https://github.com/kbase/data_api/",
"download_url": "https://github.com/kbase/data_api/stuff?download",
"author_email": "mhenderson@lbl.gov",
"version": version,
"setup_requires": ["six"],
"tests_require": ["nose", "nose-timer", "codecov"],
"packages": packages,
"scripts": ["bin/data_api_demo.py",
"bin/data_api_benchmark.py",
"bin/dump_wsfile",
"bin/data_api_start_service.py",
"bin/assembly_client_driver.py",
"bin/genome_annotation_client_driver.py",
"bin/taxon_client_driver.py",
"bin/test_api_service.py",
"bin/extract_thrift_docs"],
"name": "doekbase_data_api",
"entry_points": {
'nose.plugins.0.10': [
'wsurl = doekbase.data_api.tests.nose_plugin_wsurl:WorkspaceURL'
]
},
"zip_safe": True
}
setuptools.setup(package_dir = {'': 'lib'},
script_args = filter_args(),
install_requires = get_dependencies(),
cmdclass = {'build_thrift_servers': BuildThriftServers,
'build_thrift_clients': BuildThriftClients},
**config)
| |
#!/usr/bin/env python
import MySQLdb as mdb
import sys
# Various strings used throughout the program
ver = "0.0.0b"
error_message = "There was an error when attempting to process your request. System returned:\n"
no_res = "No results found, response from server: "
no_args = "Proper arguments not fulfilled."
# Parent class
class CMS:
def __init__(self, hostname, username, password, db):
self.hostname = hostname
self.username = username
self.password = password
self.db = db
# DB connection shorcut (Less paramaters, faster coding)
def connect(self):
return mdb.connect(self.hostname, self.username, self.password, self.db)
def set_hostname(self, hostname):
self.hostname = hostname
def get_hostname(self):
return self.hostname
def set_username(self, username):
self.username = username
def get_username(self):
return self.username
def set_pasword(self, password):
self.password = password
def get_password(self):
return self.password
def set_db(self, db):
self.db = db
def get_db(self):
return self.db
# Returns how many entries are in the Posts table
def get_entry_count(self):
try:
con = self.connect()
cur = con.cursor()
cur.execute("SELECT COUNT(*) FROM Posts;")
res = cur.fetchone()
return res[0]
except mdb.Error, e:
print error_message + str(e)
sys.exit(1)
finally:
if con:
con.close()
# Returns an array of all posts in the Posts table
def get_all_posts(self):
try:
con = self.connect()
cur = con.cursor()
cur.execute("SELECT * FROM Posts;")
res = cur.fetchall()
if res == None:
return error_message + no_res
else:
posts = []
for i in res:
p = {}
p["pid"] = i[0]
p["title"] = i[1]
p["author"] = i[2]
p["content"] = i[3]
posts.append(p)
return posts
except mdb.Error, e:
print error_message + str(e)
sys.exit(1)
finally:
if con:
con.close()
# Returns a dictionary of a specified post in Posts table
def get_post_by_id(self, pid):
if pid:
req = "SELECT * FROM Posts WHERE pid=\"" + str(pid) + "\";"
try:
con = self.connect()
cur = con.cursor()
cur.execute(req)
res = cur.fetchone()
if res == None:
return error_message + no_res
else:
p = {}
p["pid"] = res[0]
p["title"] = res[1]
p["author"] = res[2]
p["content"] = res[3]
return p
except mdb.Error, e:
print error_message + str(e)
sys.exit(1)
finally:
if con:
con.close()
else:
print error_message + no_args
sys.exit(1)
# Returns an array of posts matching a specified title in Posts table
def get_posts_by_title(self, title):
if title:
req = "SELECT * FROM Posts WHERE title=\"" + title + "\";"
try:
con = self.connect()
cur = con.cursor()
cur.execute(req)
res = cur.fetchall()
if res == None:
return error_mesage + no_res
else:
posts = []
for i in res:
p = {}
p["pid"] = i[0]
p["title"] = i[1]
p["author"] = i[2]
p["content"] = i[3]
posts.append(p)
return posts
except mdb.Error, e:
print error_message + str(e)
sys.exit(1)
finally:
if con:
con.close()
else:
print error_message + no_args
sys.exit(1)
# Returns an array of posts by a specified author in Posts table
def get_posts_by_author(self, author):
if author:
req = "SELECT * FROM Posts WHERE author=\"" + author + "\";"
try:
con = self.connect()
cur = con.cursor()
cur.execute(req)
res = cur.fetchall()
if res == None:
return error_message + no_res
else:
posts = []
for i in res:
p = {}
p["pid"] = i[0]
p["title"] = i[1]
p["author"] = i[2]
p["content"] = i[3]
posts.append(p)
return posts
except mdb.Error, e:
print error_message + str(e)
sys.exit(1)
finally:
if con:
con.close()
else:
print error_message + no_args
sys.exit(1)
# Method to create a new post, returns True if successful, other wise returns False
def new_post(self, title, author, content):
if title and author and content:
req = "INSERT INTO Posts (title, author, content) VALUES (\"" + str(title) + "\", \"" + str(author) + "\", \"" + str(content) + "\");"
try:
con = self.connect()
cur = con.cursor()
cur.execute(req)
con.commit()
return True
except mdb.IntegrityError or mdb.Error, e:
print error_message + str(e)
sys.exit(1)
return False
finally:
if con:
con.close()
else:
print error_message + no_args
sys.exit(1)
return False
# Updates the title of a specified post in Posts table, returns True if successful, otherwise False
def update_post_title(self, pid, new_title):
if new_title:
req = "UPDATE Posts SET title=\"" + new_title + "\" WHERE pid=\"" + str(pid) + "\";"
try:
con = self.connect()
cur = con.cursor()
cur.execute(req)
con.commit()
return True
except mdb.IntegrityError or mdb.Error, e:
print error_message + str(e)
sys.exit(1)
return False
finally:
if con:
con.close()
else:
print error_message + no_args
sys.exit(1)
return False
# Updates the author of a specified post in Posts table, returns True if successful, otherwise False
def update_post_author(self, pid, new_author):
if new_author:
req = "UPDATE Posts SET author=\"" + new_author + "\" WHERE pid=\"" + str(pid) + "\";"
try:
con = self.connect()
cur = con.cursor()
cur.execute(req)
con.commit()
return True
except mdb.IntegrityError or mdb.Error, e:
print error_message + str(e)
sys.exit(1)
return False
finally:
if con:
con.close()
else:
print error_message + no_args
sys.exit(1)
return False
# Updates the content of a specified post in Posts table, returns True if successful, otherwise False
def update_post_content(self, pid, new_content):
if new_content:
req = "UPDATE Posts SET content=\"" + new_content + "\" WHERE pid=\"" + str(pid) + "\";"
try:
con = self.connect()
cur = con.cursor()
cur.execute(req)
con.commit()
return True
except mdb.IntegrityError or mdb.Error, e:
print error_message + str(e)
sys.exit(1)
return False
finally:
if con:
con.close()
else:
print error_message + no_args
sys.exit(1)
return False
# Deletes a specified post from Posts table, return True if successful, otherwsie False
def remove_post(self, pid):
if pid:
req = "DELETE FROM Posts WHERE pid=\"" + str(pid) + "\";"
try:
con = self.connect()
cur = con.cursor()
cur.execute(req)
con.commit()
cur.execute("ALTER TABLE Posts AUTO_INCREMENT=1;")
return True
except mdb.IntegrityError or mdb.Error, e:
print error_message + str(e)
sys.exit(1)
return False
finally:
if con:
con.close()
else:
print error_message + no_args
sys.exit(1)
return False
# Returns CMS version
def version():
return ver
| |
###############################################################################
# TwoPowerSphericalPotential.py: General class for potentials derived from
# densities with two power-laws
#
# amp
# rho(r)= ------------------------------------
# (r/a)^\alpha (1+r/a)^(\beta-\alpha)
###############################################################################
import numpy
from scipy import special, optimize
from ..util import conversion
from .Potential import Potential, kms_to_kpcGyrDecorator, _APY_LOADED
if _APY_LOADED:
from astropy import units
class TwoPowerSphericalPotential(Potential):
"""Class that implements spherical potentials that are derived from
two-power density models
.. math::
\\rho(r) = \\frac{\\mathrm{amp}}{4\\,\\pi\\,a^3}\\,\\frac{1}{(r/a)^\\alpha\\,(1+r/a)^{\\beta-\\alpha}}
"""
def __init__(self,amp=1.,a=5.,alpha=1.5,beta=3.5,normalize=False,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a two-power-density potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass
a - scale radius (can be Quantity)
alpha - inner power
beta - outer power
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2010-07-09 - Started - Bovy (NYU)
"""
# Instantiate
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units='mass')
# _specialSelf for special cases (Dehnen class, Dehnen core, Hernquist, Jaffe, NFW)
self._specialSelf= None
if ((self.__class__ == TwoPowerSphericalPotential) &
(alpha == round(alpha)) & (beta == round(beta))):
if int(alpha) == 0 and int(beta) == 4:
self._specialSelf=\
DehnenCoreSphericalPotential(amp=1.,a=a,
normalize=False)
elif int(alpha) == 1 and int(beta) == 4:
self._specialSelf=\
HernquistPotential(amp=1.,a=a,normalize=False)
elif int(alpha) == 2 and int(beta) == 4:
self._specialSelf= JaffePotential(amp=1.,a=a,normalize=False)
elif int(alpha) == 1 and int(beta) == 3:
self._specialSelf= NFWPotential(amp=1.,a=a,normalize=False)
# correcting quantities
a= conversion.parse_length(a,ro=self._ro)
# setting properties
self.a= a
self._scale= self.a
self.alpha= alpha
self.beta= beta
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
return None
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-07-09 - Started - Bovy (NYU)
"""
if self._specialSelf is not None:
return self._specialSelf._evaluate(R,z,phi=phi,t=t)
elif self.beta == 3.:
r= numpy.sqrt(R**2.+z**2.)
return (1./self.a)\
*(r-self.a*(r/self.a)**(3.-self.alpha)/(3.-self.alpha)\
*special.hyp2f1(3.-self.alpha,
2.-self.alpha,
4.-self.alpha,
-r/self.a))/(self.alpha-2.)/r
else:
r= numpy.sqrt(R**2.+z**2.)
return special.gamma(self.beta-3.)\
*((r/self.a)**(3.-self.beta)/special.gamma(self.beta-1.)\
*special.hyp2f1(self.beta-3.,
self.beta-self.alpha,
self.beta-1.,
-self.a/r)
-special.gamma(3.-self.alpha)/special.gamma(self.beta-self.alpha))/r
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
if self._specialSelf is not None:
return self._specialSelf._Rforce(R,z,phi=phi,t=t)
else:
r= numpy.sqrt(R**2.+z**2.)
return -R/r**self.alpha*self.a**(self.alpha-3.)/(3.-self.alpha)\
*special.hyp2f1(3.-self.alpha,
self.beta-self.alpha,
4.-self.alpha,
-r/self.a)
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
if self._specialSelf is not None:
return self._specialSelf._zforce(R,z,phi=phi,t=t)
else:
r= numpy.sqrt(R**2.+z**2.)
return -z/r**self.alpha*self.a**(self.alpha-3.)/(3.-self.alpha)\
*special.hyp2f1(3.-self.alpha,
self.beta-self.alpha,
4.-self.alpha,
-r/self.a)
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2010-08-08 - Written - Bovy (NYU)
"""
r= numpy.sqrt(R**2.+z**2.)
return (self.a/r)**self.alpha/(1.+r/self.a)**(self.beta-self.alpha)/4./numpy.pi/self.a**3.
def _ddensdr(self,r,t=0.):
"""
NAME:
_ddensdr
PURPOSE:
s evaluate the radial density derivative for this potential
INPUT:
r - spherical radius
t= time
OUTPUT:
the density derivative
HISTORY:
2021-02-05 - Written - Bovy (UofT)
"""
return -self._amp*(self.a/r)**(self.alpha-1.)\
*(1.+r/self.a)**(self.alpha-self.beta-1.)\
*(self.a*self.alpha+r*self.beta)/r**2/4./numpy.pi/self.a**3.
def _d2densdr2(self,r,t=0.):
"""
NAME:
_d2densdr2
PURPOSE:
evaluate the second radial density derivative for this potential
INPUT:
r - spherical radius
t= time
OUTPUT:
the 2nd density derivative
HISTORY:
2021-02-05 - Written - Bovy (UofT)
"""
return self._amp*(self.a/r)**(self.alpha-2.)\
*(1.+r/self.a)**(self.alpha-self.beta-2.)\
*(self.alpha*(self.alpha+1.)*self.a**2+
2.*self.alpha*self.a*(self.beta+1.)*r
+self.beta*(self.beta+1.)*r**2)/r**4/4./numpy.pi/self.a**3.
def _ddenstwobetadr(self,r,beta=0):
"""
NAME:
_ddenstwobetadr
PURPOSE:
evaluate the radial density derivative x r^(2beta) for this potential
INPUT:
r - spherical radius
beta= (0)
OUTPUT:
d (rho x r^{2beta} ) / d r
HISTORY:
2021-02-14 - Written - Bovy (UofT)
"""
return self._amp/4./numpy.pi/self.a**3.\
*r**(2.*beta-2.)*(self.a/r)**(self.alpha-1.)\
*(1.+r/self.a)**(self.alpha-self.beta-1.)\
*(self.a*(2.*beta-self.alpha)+r*(2.*beta-self.beta))
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second cylindrically radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second cylindrically radial derivative
HISTORY:
2020-11-23 - Written - Beane (CfA)
"""
r = numpy.sqrt(R**2.+z**2.)
A = self.a**(self.alpha-3.)/(3.-self.alpha)
hyper = special.hyp2f1(3.-self.alpha,
self.beta-self.alpha,
4.-self.alpha,
-r/self.a)
hyper_deriv = (3.-self.alpha) * (self.beta - self.alpha) / (4.-self.alpha) \
* special.hyp2f1(4.-self.alpha,
1.+self.beta-self.alpha,
5.-self.alpha,
-r/self.a)
term1 = A * r**(-self.alpha) * hyper
term2 = -self.alpha * A * R**2. * r**(-self.alpha-2.) * hyper
term3 = -A * R**2 * r**(-self.alpha-1.) / self.a * hyper_deriv
return term1 + term2 + term3
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the mixed radial/vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the mixed radial/vertical derivative
HISTORY:
2020-11-28 - Written - Beane (CfA)
"""
r = numpy.sqrt(R**2.+z**2.)
A = self.a**(self.alpha-3.)/(3.-self.alpha)
hyper = special.hyp2f1(3.-self.alpha,
self.beta-self.alpha,
4.-self.alpha,
-r/self.a)
hyper_deriv = (3.-self.alpha) * (self.beta - self.alpha) / (4.-self.alpha) \
* special.hyp2f1(4.-self.alpha,
1.+self.beta-self.alpha,
5.-self.alpha,
-r/self.a)
term1 = -self.alpha * A * R * r**(-self.alpha-2.) * z * hyper
term2 = -A * R * r**(-self.alpha-1.) * z / self.a * hyper_deriv
return term1 + term2
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second vertical derivative
HISTORY:
2012-07-26 - Written - Bovy (IAS@MPIA)
"""
return self._R2deriv(numpy.fabs(z),R) #Spherical potential
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2014-04-01 - Written - Erkal (IoA)
"""
if z is not None: raise AttributeError # use general implementation
return (R/self.a)**(3.-self.alpha)/(3.-self.alpha)\
*special.hyp2f1(3.-self.alpha,-self.alpha+self.beta,
4.-self.alpha,-R/self.a)
class DehnenSphericalPotential(TwoPowerSphericalPotential):
"""Class that implements the Dehnen Spherical Potential from `Dehnen (1993) <https://ui.adsabs.harvard.edu/abs/1993MNRAS.265..250D>`_
.. math::
\\rho(r) = \\frac{\\mathrm{amp}(3-\\alpha)}{4\\,\\pi\\,a^3}\\,\\frac{1}{(r/a)^{\\alpha}\\,(1+r/a)^{4-\\alpha}}
"""
def __init__(self,amp=1.,a=1.,alpha=1.5,normalize=False,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a Dehnen Spherical Potential; note that the amplitude definitio used here does NOT match that of Dehnen (1993)
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass
a - scale radius (can be Quantity)
alpha - inner power, restricted to [0, 3)
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2019-10-07 - Started - Starkman (UofT)
"""
if (alpha < 0.) or (alpha >= 3.):
raise IOError('DehnenSphericalPotential requires 0 <= alpha < 3')
# instantiate
TwoPowerSphericalPotential.__init__(
self,amp=amp,a=a,alpha=alpha,beta=4,
normalize=normalize,ro=ro,vo=vo)
# make special-self and protect subclasses
self._specialSelf= None
if ((self.__class__ == DehnenSphericalPotential) &
(alpha == round(alpha))):
if round(alpha) == 0:
self._specialSelf=\
DehnenCoreSphericalPotential(amp=1.,a=a,
normalize=False)
elif round(alpha) == 1:
self._specialSelf=\
HernquistPotential(amp=1.,a=a,normalize=False)
elif round(alpha) == 2:
self._specialSelf= JaffePotential(amp=1.,a=a,normalize=False)
# set properties
self.hasC= True
self.hasC_dxdv= True
self.hasC_dens= True
return None
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2019-11-20 - Written - Starkman (UofT)
"""
if self._specialSelf is not None:
return self._specialSelf._evaluate(R,z,phi=phi,t=t)
else: # valid for alpha != 2, 3
r= numpy.sqrt(R**2.+z**2.)
return -(1.-1./(1.+self.a/r)**(2.-self.alpha))/\
(self.a * (2.-self.alpha) * (3.-self.alpha))
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2019-11-20 - Written - Starkman (UofT)
"""
if self._specialSelf is not None:
return self._specialSelf._Rforce(R,z,phi=phi,t=t)
else:
r= numpy.sqrt(R**2.+z**2.)
return -R/r**self.alpha*(self.a+r)**(self.alpha-3.)/(3.-self.alpha)
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second radial derivative
HISTORY:
2019-10-11 - Written - Starkman (UofT)
"""
if self._specialSelf is not None:
return self._specialSelf._R2deriv(R, z, phi=phi, t=t)
a, alpha = self.a, self.alpha
r = numpy.sqrt(R**2. + z**2.)
# formula not valid for alpha=2,3, (integers?)
return (numpy.power(r, -2.-alpha)*numpy.power(r+a, alpha-4.)*
(-a*r**2. + (2.*R**2.-z**2.)*r + a*alpha*R**2.)/
(alpha - 3.))
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2019-11-21 - Written - Starkman (UofT)
"""
if self._specialSelf is not None:
return self._specialSelf._zforce(R,z,phi=phi,t=t)
else:
r= numpy.sqrt(R**2.+z**2.)
return -z/r**self.alpha*(self.a+r)**(self.alpha-3.)/(3.-self.alpha)
def _z2deriv(self,R,z,phi=0.,t=0.):
r"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second vertical derivative
HISTORY:
2019-10-20 - Written - Starkman (UofT)
"""
return self._R2deriv(z,R,phi=phi,t=t)
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
d2phi/dR/dz
HISTORY:
2019-10-11 - Written - Starkman (UofT)
"""
if self._specialSelf is not None:
return self._specialSelf._Rzderiv(R, z, phi=phi, t=t)
a, alpha= self.a, self.alpha
r= numpy.sqrt(R**2.+z**2.)
return ((R*z*numpy.power(r,-2.-alpha)*numpy.power(a+r,alpha-4.)
*(3*r+a*alpha))/(alpha-3))
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2019-11-20 - Written - Starkman (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
return (self.a/r)**self.alpha/(1.+r/self.a)**(4.-self.alpha)/4./numpy.pi/self.a**3.
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2019-11-20 - Written - Starkman (UofT)
"""
if z is not None: raise AttributeError # use general implementation
return 1./(1.+self.a/R)**(3.-self.alpha)/(3.-self.alpha) # written so it works for r=numpy.inf
class DehnenCoreSphericalPotential(DehnenSphericalPotential):
"""Class that implements the Dehnen Spherical Potential from `Dehnen (1993) <https://ui.adsabs.harvard.edu/abs/1993MNRAS.265..250D>`_ with alpha=0 (corresponding to an inner core)
.. math::
\\rho(r) = \\frac{\\mathrm{amp}}{12\\,\\pi\\,a^3}\\,\\frac{1}{(1+r/a)^{4}}
"""
def __init__(self,amp=1.,a=1.,normalize=False,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a cored Dehnen Spherical Potential; note that the amplitude definition used here does NOT match that of Dehnen (1993)
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass
a - scale radius (can be Quantity)
alpha - inner power, restricted to [0, 3)
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2019-10-07 - Started - Starkman (UofT)
"""
DehnenSphericalPotential.__init__(
self,amp=amp,a=a,alpha=0,
normalize=normalize,ro=ro,vo=vo)
# set properties explicitly
self.hasC= True
self.hasC_dxdv= True
self.hasC_dens= True
return None
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2019-11-20 - Written - Starkman (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
return -(1.-1./(1.+self.a/r)**2.)/(6.*self.a)
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2019-11-20 - Written - Starkman (UofT)
"""
return -R/numpy.power(numpy.sqrt(R**2.+z**2.)+self.a,3.)/3.
def _rforce_jax(self,r):
"""
NAME:
_rforce_jax
PURPOSE:
evaluate the spherical radial force for this potential using JAX
INPUT:
r - Galactocentric spherical radius
OUTPUT:
the radial force
HISTORY:
2021-02-25 - Written - Bovy (UofT)
"""
# No need for actual JAX!
return -self._amp*r/(r+self.a)**3./3.
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second radial derivative
HISTORY:
2019-10-11 - Written - Starkman (UofT)
"""
r = numpy.sqrt(R**2.+z**2.)
return -(((2.*R**2.-z**2.)-self.a*r)/(3.*r*numpy.power(r+self.a,4.)))
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2019-11-21 - Written - Starkman (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
return -z/numpy.power(self.a+r,3.)/3.
def _z2deriv(self,R,z,phi=0.,t=0.):
r"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second vertical derivative
HISTORY:
2019-10-20 - Written - Starkman (UofT)
"""
return self._R2deriv(z,R,phi=phi,t=t)
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
d2phi/dR/dz
HISTORY:
2019-10-11 - Written - Starkman (UofT)
"""
a= self.a
r= numpy.sqrt(R**2.+z**2.)
return -(R * z/r/numpy.power(a+r,4.))
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2019-11-20 - Written - Starkman (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
return 1./(1.+r/self.a)**4./4./numpy.pi/self.a**3.
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2019-11-20 - Written - Starkman (UofT)
"""
if z is not None: raise AttributeError # use general implementation
return 1./(1.+self.a/R)**3./3. # written so it works for r=numpy.inf
class HernquistPotential(DehnenSphericalPotential):
"""Class that implements the Hernquist potential
.. math::
\\rho(r) = \\frac{\\mathrm{amp}}{4\\,\\pi\\,a^3}\\,\\frac{1}{(r/a)\\,(1+r/a)^{3}}
"""
def __init__(self,amp=1.,a=1.,normalize=False,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize a Hernquist potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass (note that amp is 2 x [total mass] for the chosen definition of the Hernquist potential)
a - scale radius (can be Quantity)
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
DehnenSphericalPotential.__init__(
self,amp=amp,a=a,alpha=1,
normalize=normalize,ro=ro,vo=vo)
self._nemo_accname= 'Dehnen'
# set properties explicitly
self.hasC= True
self.hasC_dxdv= True
self.hasC_dens= True
return None
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-07-09 - Started - Bovy (NYU)
"""
return -1./(1.+numpy.sqrt(R**2.+z**2.)/self.a)/2./self.a
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
sqrtRz= numpy.sqrt(R**2.+z**2.)
return -R/self.a/sqrtRz/(1.+sqrtRz/self.a)**2./2./self.a
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the vertical force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
sqrtRz= numpy.sqrt(R**2.+z**2.)
return -z/self.a/sqrtRz/(1.+sqrtRz/self.a)**2./2./self.a
def _rforce_jax(self,r):
"""
NAME:
_rforce_jax
PURPOSE:
evaluate the spherical radial force for this potential using JAX
INPUT:
r - Galactocentric spherical radius
OUTPUT:
the radial force
HISTORY:
2021-02-14 - Written - Bovy (UofT)
"""
# No need for actual JAX!
return -self._amp/2./(r+self.a)**2.
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second radial derivative
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
sqrtRz= numpy.sqrt(R**2.+z**2.)
return (self.a*z**2.+(z**2.-2.*R**2.)*sqrtRz)/sqrtRz**3.\
/(self.a+sqrtRz)**3./2.
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
d2phi/dR/dz
HISTORY:
2013-08-28 - Written - Bovy (IAS)
"""
sqrtRz= numpy.sqrt(R**2.+z**2.)
return -R*z*(self.a+3.*sqrtRz)*(sqrtRz*(self.a+sqrtRz))**-3./2.
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
Rma= numpy.sqrt(R**2.-self.a**2.+0j)
if Rma == 0.:
return (-12.*self.a**3-5.*self.a*z**2
+numpy.sqrt(1.+z**2/self.a**2)\
*(12.*self.a**3-self.a*z**2+2/self.a*z**4))\
/30./numpy.pi*z**-5.
else:
return self.a*((2.*self.a**2.+R**2.)*Rma**-5\
*(numpy.arctan(z/Rma)-numpy.arctan(self.a*z/r/Rma))
+z*(5.*self.a**3.*r-4.*self.a**4
+self.a**2*(2.*r**2.+R**2)
-self.a*r*(5.*R**2.+3.*z**2.)+R**2.*r**2.)
/(self.a**2.-R**2.)**2.
/(r**2-self.a**2.)**2.).real/4./numpy.pi
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
calculate the mass out to a given radius
INPUT:
R - radius at which to return the enclosed mass
z - (don't specify this) vertical height
OUTPUT:
mass in natural units
HISTORY:
2014-01-29 - Written - Bovy (IAS)
"""
if z is not None: raise AttributeError # use general implementation
return 1./(1.+self.a/R)**2./2. # written so it works for r=numpy.inf
@kms_to_kpcGyrDecorator
def _nemo_accpars(self,vo,ro):
"""
NAME:
_nemo_accpars
PURPOSE:
return the accpars potential parameters for use of this potential with NEMO
INPUT:
vo - velocity unit in km/s
ro - length unit in kpc
OUTPUT:
accpars string
HISTORY:
2018-09-14 - Written - Bovy (UofT)
"""
GM= self._amp*vo**2.*ro/2.
return "0,1,%s,%s,0" % (GM,self.a*ro)
class JaffePotential(DehnenSphericalPotential):
"""Class that implements the Jaffe potential
.. math::
\\rho(r) = \\frac{\\mathrm{amp}}{4\\,\\pi\\,a^3}\\,\\frac{1}{(r/a)^2\\,(1+r/a)^{2}}
"""
def __init__(self,amp=1.,a=1.,normalize=False,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize a Jaffe potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass
a - scale radius (can be Quantity)
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units='mass')
a= conversion.parse_length(a,ro=self._ro)
self.a= a
self._scale= self.a
self.alpha= 2
self.beta= 4
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
self.hasC= True
self.hasC_dxdv= True
self.hasC_dens= True
return None
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-07-09 - Started - Bovy (NYU)
"""
return -numpy.log(1.+self.a/numpy.sqrt(R**2.+z**2.))/self.a
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
sqrtRz= numpy.sqrt(R**2.+z**2.)
return -R/sqrtRz**3./(1.+self.a/sqrtRz)
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
sqrtRz= numpy.sqrt(R**2.+z**2.)
return -z/sqrtRz**3./(1.+self.a/sqrtRz)
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
sqrtRz= numpy.sqrt(R**2.+z**2.)
return (self.a*(z**2.-R**2.)+(z**2.-2.*R**2.)*sqrtRz)\
/sqrtRz**4./(self.a+sqrtRz)**2.
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2013-08-28 - Written - Bovy (IAS)
"""
sqrtRz= numpy.sqrt(R**2.+z**2.)
return -R*z*(2.*self.a+3.*sqrtRz)*sqrtRz**-4.\
*(self.a+sqrtRz)**-2.
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
Rma= numpy.sqrt(R**2.-self.a**2.+0j)
if Rma == 0.:
return (3.*z**2.-2.*self.a**2.
+2.*numpy.sqrt(1.+(z/self.a)**2.)\
*(self.a**2.-2.*z**2.)
+3.*z**3./self.a*numpy.arctan(z/self.a))\
/self.a/z**3./6./numpy.pi
else:
return ((2.*self.a**2.-R**2.)*Rma**-3\
*(numpy.arctan(z/Rma)-numpy.arctan(self.a*z/r/Rma))
+numpy.arctan(z/R)/R
-self.a*z/(R**2-self.a**2)/(r+self.a)).real\
/self.a/2./numpy.pi
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
calculate the mass out to a given radius
INPUT:
R - radius at which to return the enclosed mass
z - (don't specify this) vertical height
OUTPUT:
mass in natural units
HISTORY:
2014-01-29 - Written - Bovy (IAS)
"""
if z is not None: raise AttributeError # use general implementation
return 1./(1.+self.a/R) # written so it works for r=numpy.inf
class NFWPotential(TwoPowerSphericalPotential):
"""Class that implements the NFW potential
.. math::
\\rho(r) = \\frac{\\mathrm{amp}}{4\\,\\pi\\,a^3}\\,\\frac{1}{(r/a)\\,(1+r/a)^{2}}
"""
def __init__(self,amp=1.,a=1.,normalize=False,
rmax=None,vmax=None,
conc=None,mvir=None,
vo=None,ro=None,
H=70.,Om=0.3,overdens=200.,wrtcrit=False):
"""
NAME:
__init__
PURPOSE:
Initialize a NFW potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units of mass or Gxmass
a - scale radius (can be Quantity)
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
Alternatively, NFW potentials can be initialized in the following two manners:
a)
rmax= radius where the rotation curve peaks (can be a Quantity, otherwise assumed to be in internal units)
vmax= maximum circular velocity (can be a Quantity, otherwise assumed to be in internal units)
b)
conc= concentration
mvir= virial mass in 10^12 Msolar
in which case you also need to supply the following keywords
H= (default: 70) Hubble constant in km/s/Mpc
Om= (default: 0.3) Omega matter
overdens= (200) overdensity which defines the virial radius
wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2010-07-09 - Written - Bovy (NYU)
2014-04-03 - Initialization w/ concentration and mass - Bovy (IAS)
2020-04-29 - Initialization w/ rmax and vmax - Bovy (UofT)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units='mass')
a= conversion.parse_length(a,ro=self._ro)
if conc is None and rmax is None:
self.a= a
self.alpha= 1
self.beta= 3
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)):
self.normalize(normalize)
elif not rmax is None:
if _APY_LOADED and isinstance(rmax,units.Quantity):
rmax= conversion.parse_length(rmax,ro=self._ro)
self._roSet= True
if _APY_LOADED and isinstance(vmax,units.Quantity):
vmax= conversion.parse_velocity(vmax,vo=self._vo)
self._voSet= True
self.a= rmax/2.1625815870646098349
self._amp= vmax**2.*self.a/0.21621659550187311005
else:
if wrtcrit:
od= overdens/conversion.dens_in_criticaldens(self._vo,
self._ro,H=H)
else:
od= overdens/conversion.dens_in_meanmatterdens(self._vo,
self._ro,
H=H,Om=Om)
mvirNatural= mvir*100./conversion.mass_in_1010msol(self._vo,
self._ro)
rvir= (3.*mvirNatural/od/4./numpy.pi)**(1./3.)
self.a= rvir/conc
self._amp= mvirNatural/(numpy.log(1.+conc)-conc/(1.+conc))
# Turn on physical output, because mass is given in 1e12 Msun (see #465)
self._roSet= True
self._voSet= True
self._scale= self.a
self.hasC= True
self.hasC_dxdv= True
self.hasC_dens= True
self._nemo_accname= 'NFW'
return None
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-07-09 - Started - Bovy (NYU)
"""
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,(float,int)) and r == 0:
return -1./self.a
elif isinstance(r,(float,int)):
return -special.xlogy(1./r,1.+r/self.a) # stable as r -> infty
else:
out= -special.xlogy(1./r,1.+r/self.a) # stable as r -> infty
out[r == 0]= -1./self.a
return out
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
Rz= R**2.+z**2.
sqrtRz= numpy.sqrt(Rz)
return R*(1./Rz/(self.a+sqrtRz)-numpy.log(1.+sqrtRz/self.a)/sqrtRz/Rz)
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
Rz= R**2.+z**2.
sqrtRz= numpy.sqrt(Rz)
return z*(1./Rz/(self.a+sqrtRz)-numpy.log(1.+sqrtRz/self.a)/sqrtRz/Rz)
def _rforce_jax(self,r):
"""
NAME:
_rforce_jax
PURPOSE:
evaluate the spherical radial force for this potential using JAX
INPUT:
r - Galactocentric spherical radius
OUTPUT:
the radial force
HISTORY:
2021-02-14 - Written - Bovy (UofT)
"""
try:
import jax.numpy as jnp
except ImportError: # pragma: no cover
raise ImportError("Making use of _rforce_jax function requires the google/jax library")
return self._amp*(1./r/(self.a+r)-jnp.log(1.+r/self.a)/r**2.)
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
Rz= R**2.+z**2.
sqrtRz= numpy.sqrt(Rz)
return (3.*R**4.+2.*R**2.*(z**2.+self.a*sqrtRz)\
-z**2.*(z**2.+self.a*sqrtRz)\
-(2.*R**2.-z**2.)*(self.a**2.+R**2.+z**2.+2.*self.a*sqrtRz)\
*numpy.log(1.+sqrtRz/self.a))\
/Rz**2.5/(self.a+sqrtRz)**2.
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2013-08-28 - Written - Bovy (IAS)
"""
Rz= R**2.+z**2.
sqrtRz= numpy.sqrt(Rz)
return -R*z*(-4.*Rz-3.*self.a*sqrtRz+3.*(self.a**2.+Rz+2.*self.a*sqrtRz)*numpy.log(1.+sqrtRz/self.a))*Rz**-2.5*(self.a+sqrtRz)**-2.
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
Rma= numpy.sqrt(R**2.-self.a**2.+0j)
if Rma == 0.:
za2= (z/self.a)**2
return self.a*(2.+numpy.sqrt(za2+1.)*(za2-2.))/6./numpy.pi/z**3
else:
return (self.a*Rma**-3\
*(numpy.arctan(self.a*z/r/Rma)-numpy.arctan(z/Rma))
+z/(r+self.a)/(R**2.-self.a**2.)).real/2./numpy.pi
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
calculate the mass out to a given radius
INPUT:
R - radius at which to return the enclosed mass
z - (don't specify this) vertical height
OUTPUT:
mass in natural units
HISTORY:
2014-01-29 - Written - Bovy (IAS)
"""
if z is not None: raise AttributeError # use general implementation
return numpy.log(1+R/self.a)-R/self.a/(1.+R/self.a)
@conversion.physical_conversion('position',pop=False)
def rvir(self,H=70.,Om=0.3,t=0.,overdens=200.,wrtcrit=False,ro=None,vo=None,
use_physical=False): # use_physical necessary bc of pop=False, does nothing inside
"""
NAME:
rvir
PURPOSE:
calculate the virial radius for this density distribution
INPUT:
H= (default: 70) Hubble constant in km/s/Mpc
Om= (default: 0.3) Omega matter
overdens= (200) overdensity which defines the virial radius
wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density
ro= distance scale in kpc or as Quantity (default: object-wide, which if not set is 8 kpc))
vo= velocity scale in km/s or as Quantity (default: object-wide, which if not set is 220 km/s))
OUTPUT:
virial radius
HISTORY:
2014-01-29 - Written - Bovy (IAS)
"""
if ro is None: ro= self._ro
if vo is None: vo= self._vo
if wrtcrit:
od= overdens/conversion.dens_in_criticaldens(vo,ro,H=H)
else:
od= overdens/conversion.dens_in_meanmatterdens(vo,ro,
H=H,Om=Om)
dc= 12.*self.dens(self.a,0.,t=t,use_physical=False)/od
x= optimize.brentq(lambda y: (numpy.log(1.+y)-y/(1.+y))/y**3.-1./dc,
0.01,100.)
return x*self.a
@conversion.physical_conversion('position',pop=True)
def rmax(self):
"""
NAME:
rmax
PURPOSE:
calculate the radius at which the rotation curve peaks
INPUT:
(none)
OUTPUT:
Radius at which the rotation curve peaks
HISTORY:
2020-02-05 - Written - Bovy (UofT)
"""
# Magical number, solve(derivative (ln(1+x)-x/(1+x))/x wrt x=0,x)
return 2.1625815870646098349*self.a
@conversion.physical_conversion('velocity',pop=True)
def vmax(self):
"""
NAME:
vmax
PURPOSE:
calculate the maximum rotation curve velocity
INPUT:
(none)
OUTPUT:
Peak velocity in the rotation curve
HISTORY:
2020-02-05 - Written - Bovy (UofT)
"""
# 0.21621659550187311005 = (numpy.log(1.+rmax)-rmax/(1.+rmax))/rmax
return numpy.sqrt(0.21621659550187311005*self._amp/self.a)
@kms_to_kpcGyrDecorator
def _nemo_accpars(self,vo,ro):
"""
NAME:
_nemo_accpars
PURPOSE:
return the accpars potential parameters for use of this potential with NEMO
INPUT:
vo - velocity unit in km/s
ro - length unit in kpc
OUTPUT:
accpars string
HISTORY:
2014-12-18 - Written - Bovy (IAS)
"""
ampl= self._amp*vo**2.*ro
vmax= numpy.sqrt(ampl/self.a/ro*0.2162165954) #Take that factor directly from gyrfalcon
return "0,%s,%s" % (self.a*ro,vmax)
| |
# Copyright (c) 2014 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2015 Alex Meade. All Rights Reserved.
# Copyright (c) 2015 Rushil Chugh. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client classes for web services.
"""
import copy
import json
from oslo_log import log as logging
import requests
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.i18n import _, _LE
from cinder.volume.drivers.netapp.eseries import utils
LOG = logging.getLogger(__name__)
class WebserviceClient(object):
"""Base client for e-series web services."""
def __init__(self, scheme, host, port, service_path, username,
password, **kwargs):
self._validate_params(scheme, host, port)
self._create_endpoint(scheme, host, port, service_path)
self._username = username
self._password = password
self._init_connection()
def _validate_params(self, scheme, host, port):
"""Does some basic validation for web service params."""
if host is None or port is None or scheme is None:
msg = _("One of the required inputs from host, port"
" or scheme not found.")
raise exception.InvalidInput(reason=msg)
if scheme not in ('http', 'https'):
raise exception.InvalidInput(reason=_("Invalid transport type."))
def _create_endpoint(self, scheme, host, port, service_path):
"""Creates end point url for the service."""
netloc = '%s:%s' % (host, port)
self._endpoint = urlparse.urlunparse((scheme, netloc, service_path,
None, None, None))
def _init_connection(self):
"""Do client specific set up for session and connection pooling."""
self.conn = requests.Session()
if self._username and self._password:
self.conn.auth = (self._username, self._password)
def invoke_service(self, method='GET', url=None, params=None, data=None,
headers=None, timeout=None, verify=False):
url = url or self._endpoint
try:
response = self.conn.request(method, url, params, data,
headers=headers, timeout=timeout,
verify=verify)
# Catching error conditions other than the perceived ones.
# Helps propagating only known exceptions back to the caller.
except Exception as e:
LOG.exception(_LE("Unexpected error while invoking web service."
" Error - %s."), e)
raise exception.NetAppDriverException(
_("Invoking web service failed."))
self._eval_response(response)
return response
def _eval_response(self, response):
"""Evaluates response before passing result to invoker."""
pass
class RestClient(WebserviceClient):
"""REST client specific to e-series storage service."""
def __init__(self, scheme, host, port, service_path, username,
password, **kwargs):
super(RestClient, self).__init__(scheme, host, port, service_path,
username, password, **kwargs)
kwargs = kwargs or {}
self._system_id = kwargs.get('system_id')
self._content_type = kwargs.get('content_type') or 'json'
def set_system_id(self, system_id):
"""Set the storage system id."""
self._system_id = system_id
def get_system_id(self):
"""Get the storage system id."""
return getattr(self, '_system_id', None)
def _get_resource_url(self, path, use_system=True, **kwargs):
"""Creates end point url for rest service."""
kwargs = kwargs or {}
if use_system:
if not self._system_id:
raise exception.NotFound(_('Storage system id not set.'))
kwargs['system-id'] = self._system_id
path = path.format(**kwargs)
if not self._endpoint.endswith('/'):
self._endpoint = '%s/' % self._endpoint
return urlparse.urljoin(self._endpoint, path.lstrip('/'))
def _invoke(self, method, path, data=None, use_system=True,
timeout=None, verify=False, **kwargs):
"""Invokes end point for resource on path."""
scrubbed_data = copy.deepcopy(data)
if scrubbed_data:
if 'password' in scrubbed_data:
scrubbed_data['password'] = "****"
if 'storedPassword' in scrubbed_data:
scrubbed_data['storedPassword'] = "****"
LOG.debug("Invoking rest with method: %(m)s, path: %(p)s,"
" data: %(d)s, use_system: %(sys)s, timeout: %(t)s,"
" verify: %(v)s, kwargs: %(k)s.",
{'m': method, 'p': path, 'd': scrubbed_data,
'sys': use_system, 't': timeout, 'v': verify, 'k': kwargs})
url = self._get_resource_url(path, use_system, **kwargs)
if self._content_type == 'json':
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
data = json.dumps(data) if data else None
res = self.invoke_service(method, url, data=data,
headers=headers,
timeout=timeout, verify=verify)
return res.json() if res.text else None
else:
raise exception.NetAppDriverException(
_("Content type not supported."))
def _eval_response(self, response):
"""Evaluates response before passing result to invoker."""
super(RestClient, self)._eval_response(response)
status_code = int(response.status_code)
# codes >= 300 are not ok and to be treated as errors
if status_code >= 300:
# Response code 422 returns error code and message
if status_code == 422:
msg = _("Response error - %s.") % response.text
else:
msg = _("Response error code - %s.") % status_code
raise exception.NetAppDriverException(msg)
def create_volume(self, pool, label, size, unit='gb', seg_size=0):
"""Creates volume on array."""
path = "/storage-systems/{system-id}/volumes"
data = {'poolId': pool, 'name': label, 'sizeUnit': unit,
'size': int(size), 'segSize': seg_size}
return self._invoke('POST', path, data)
def delete_volume(self, object_id):
"""Deletes given volume from array."""
path = "/storage-systems/{system-id}/volumes/{object-id}"
return self._invoke('DELETE', path, **{'object-id': object_id})
def list_volumes(self):
"""Lists all volumes in storage array."""
path = "/storage-systems/{system-id}/volumes"
return self._invoke('GET', path)
def list_volume(self, object_id):
"""List given volume from array."""
path = "/storage-systems/{system-id}/volumes/{object-id}"
return self._invoke('GET', path, **{'object-id': object_id})
def update_volume(self, object_id, label):
"""Renames given volume in array."""
path = "/storage-systems/{system-id}/volumes/{object-id}"
data = {'name': label}
return self._invoke('POST', path, data, **{'object-id': object_id})
def get_volume_mappings(self):
"""Creates volume mapping on array."""
path = "/storage-systems/{system-id}/volume-mappings"
return self._invoke('GET', path)
def create_volume_mapping(self, object_id, target_id, lun):
"""Creates volume mapping on array."""
path = "/storage-systems/{system-id}/volume-mappings"
data = {'mappableObjectId': object_id, 'targetId': target_id,
'lun': lun}
return self._invoke('POST', path, data)
def delete_volume_mapping(self, map_object_id):
"""Deletes given volume mapping from array."""
path = "/storage-systems/{system-id}/volume-mappings/{object-id}"
return self._invoke('DELETE', path, **{'object-id': map_object_id})
def move_volume_mapping_via_symbol(self, map_ref, to_ref, lun_id):
"""Moves a map from one host/host_group object to another."""
path = "/storage-systems/{system-id}/symbol/moveLUNMapping"
data = {'lunMappingRef': map_ref,
'lun': int(lun_id),
'mapRef': to_ref}
return_code = self._invoke('POST', path, data)
if return_code == 'ok':
return {'lun': lun_id}
msg = _("Failed to move LUN mapping. Return code: %s") % return_code
raise exception.NetAppDriverException(msg)
def list_hardware_inventory(self):
"""Lists objects in the hardware inventory."""
path = "/storage-systems/{system-id}/hardware-inventory"
return self._invoke('GET', path)
def create_host_group(self, label):
"""Creates a host group on the array."""
path = "/storage-systems/{system-id}/host-groups"
data = {'name': label}
return self._invoke('POST', path, data)
def get_host_group(self, host_group_ref):
"""Gets a single host group from the array."""
path = "/storage-systems/{system-id}/host-groups/{object-id}"
try:
return self._invoke('GET', path, **{'object-id': host_group_ref})
except exception.NetAppDriverException:
raise exception.NotFound(_("Host group with ref %s not found") %
host_group_ref)
def get_host_group_by_name(self, name):
"""Gets a single host group by name from the array."""
host_groups = self.list_host_groups()
matching = [host_group for host_group in host_groups
if host_group['label'] == name]
if len(matching):
return matching[0]
raise exception.NotFound(_("Host group with name %s not found") % name)
def list_host_groups(self):
"""Lists host groups on the array."""
path = "/storage-systems/{system-id}/host-groups"
return self._invoke('GET', path)
def list_hosts(self):
"""Lists host objects in the system."""
path = "/storage-systems/{system-id}/hosts"
return self._invoke('GET', path)
def create_host(self, label, host_type, ports=None, group_id=None):
"""Creates host on array."""
path = "/storage-systems/{system-id}/hosts"
data = {'name': label, 'hostType': host_type}
data.setdefault('groupId', group_id if group_id else None)
data.setdefault('ports', ports if ports else None)
return self._invoke('POST', path, data)
def create_host_with_port(self, label, host_type, port_id,
port_label, port_type='iscsi', group_id=None):
"""Creates host on array with given port information."""
port = {'type': port_type, 'port': port_id, 'label': port_label}
return self.create_host(label, host_type, [port], group_id)
def update_host(self, host_ref, data):
"""Updates host type for a given host."""
path = "/storage-systems/{system-id}/hosts/{object-id}"
return self._invoke('POST', path, data, **{'object-id': host_ref})
def get_host(self, host_ref):
"""Gets a single host from the array."""
path = "/storage-systems/{system-id}/hosts/{object-id}"
return self._invoke('GET', path, **{'object-id': host_ref})
def update_host_type(self, host_ref, host_type):
"""Updates host type for a given host."""
data = {'hostType': host_type}
return self.update_host(host_ref, data)
def set_host_group_for_host(self, host_ref, host_group_ref=utils.NULL_REF):
"""Sets or clears which host group a host is in."""
data = {'groupId': host_group_ref}
self.update_host(host_ref, data)
def list_host_types(self):
"""Lists host types in storage system."""
path = "/storage-systems/{system-id}/host-types"
return self._invoke('GET', path)
def list_snapshot_groups(self):
"""Lists snapshot groups."""
path = "/storage-systems/{system-id}/snapshot-groups"
return self._invoke('GET', path)
def create_snapshot_group(self, label, object_id, storage_pool_id,
repo_percent=99, warn_thres=99, auto_del_limit=0,
full_policy='failbasewrites'):
"""Creates snapshot group on array."""
path = "/storage-systems/{system-id}/snapshot-groups"
data = {'baseMappableObjectId': object_id, 'name': label,
'storagePoolId': storage_pool_id,
'repositoryPercentage': repo_percent,
'warningThreshold': warn_thres,
'autoDeleteLimit': auto_del_limit, 'fullPolicy': full_policy}
return self._invoke('POST', path, data)
def delete_snapshot_group(self, object_id):
"""Deletes given snapshot group from array."""
path = "/storage-systems/{system-id}/snapshot-groups/{object-id}"
return self._invoke('DELETE', path, **{'object-id': object_id})
def create_snapshot_image(self, group_id):
"""Creates snapshot image in snapshot group."""
path = "/storage-systems/{system-id}/snapshot-images"
data = {'groupId': group_id}
return self._invoke('POST', path, data)
def delete_snapshot_image(self, object_id):
"""Deletes given snapshot image in snapshot group."""
path = "/storage-systems/{system-id}/snapshot-images/{object-id}"
return self._invoke('DELETE', path, **{'object-id': object_id})
def list_snapshot_images(self):
"""Lists snapshot images."""
path = "/storage-systems/{system-id}/snapshot-images"
return self._invoke('GET', path)
def create_snapshot_volume(self, image_id, label, base_object_id,
storage_pool_id,
repo_percent=99, full_thres=99,
view_mode='readOnly'):
"""Creates snapshot volume."""
path = "/storage-systems/{system-id}/snapshot-volumes"
data = {'snapshotImageId': image_id, 'fullThreshold': full_thres,
'storagePoolId': storage_pool_id,
'name': label, 'viewMode': view_mode,
'repositoryPercentage': repo_percent,
'baseMappableObjectId': base_object_id,
'repositoryPoolId': storage_pool_id}
return self._invoke('POST', path, data)
def delete_snapshot_volume(self, object_id):
"""Deletes given snapshot volume."""
path = "/storage-systems/{system-id}/snapshot-volumes/{object-id}"
return self._invoke('DELETE', path, **{'object-id': object_id})
def list_storage_pools(self):
"""Lists storage pools in the array."""
path = "/storage-systems/{system-id}/storage-pools"
return self._invoke('GET', path)
def list_drives(self):
"""Lists drives in the array."""
path = "/storage-systems/{system-id}/drives"
return self._invoke('GET', path)
def list_storage_systems(self):
"""Lists managed storage systems registered with web service."""
path = "/storage-systems"
return self._invoke('GET', path, use_system=False)
def list_storage_system(self):
"""List current storage system registered with web service."""
path = "/storage-systems/{system-id}"
return self._invoke('GET', path)
def register_storage_system(self, controller_addresses, password=None,
wwn=None):
"""Registers storage system with web service."""
path = "/storage-systems"
data = {'controllerAddresses': controller_addresses}
data.setdefault('wwn', wwn if wwn else None)
data.setdefault('password', password if password else None)
return self._invoke('POST', path, data, use_system=False)
def update_stored_system_password(self, password):
"""Update array password stored on web service."""
path = "/storage-systems/{system-id}"
data = {'storedPassword': password}
return self._invoke('POST', path, data)
def create_volume_copy_job(self, src_id, tgt_id, priority='priority4',
tgt_wrt_protected='true'):
"""Creates a volume copy job."""
path = "/storage-systems/{system-id}/volume-copy-jobs"
data = {'sourceId': src_id, 'targetId': tgt_id,
'copyPriority': priority,
'targetWriteProtected': tgt_wrt_protected}
return self._invoke('POST', path, data)
def control_volume_copy_job(self, obj_id, control='start'):
"""Controls a volume copy job."""
path = ("/storage-systems/{system-id}/volume-copy-jobs-control"
"/{object-id}?control={String}")
return self._invoke('PUT', path, **{'object-id': obj_id,
'String': control})
def list_vol_copy_job(self, object_id):
"""List volume copy job."""
path = "/storage-systems/{system-id}/volume-copy-jobs/{object-id}"
return self._invoke('GET', path, **{'object-id': object_id})
def delete_vol_copy_job(self, object_id):
"""Delete volume copy job."""
path = "/storage-systems/{system-id}/volume-copy-jobs/{object-id}"
return self._invoke('DELETE', path, **{'object-id': object_id})
| |
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2019 Jonathan Dent, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import math
import appleseed as asr
from .textures import TextureTranslator
from .translator import Translator
from ..logger import get_logger
logger = get_logger()
class WorldTranslator(Translator):
"""
This class translates a Blender world block into an appleseed environment
"""
# Constructor.
def __init__(self, world, asset_handler):
super().__init__(world, asset_handler)
self.__as_colors = list()
self.__as_env_type = None
self.__as_env = None
self.__as_env_shader = None
self.__as_env_edf = None
self.__as_edf_params = dict()
self._bl_obj.appleseed_sky.obj_name = self._bl_obj.name_full
@property
def bl_world(self):
return self._bl_obj
def create_entities(self, depsgraph):
logger.debug("appleseed: Creating world entity")
as_world = self.bl_world.appleseed_sky
self.__as_env_type = as_world.env_type
if self.__as_env_type != 'none':
self.__as_env_type = as_world.env_type if as_world.env_type != 'sunsky' else as_world.sun_model
self.__set_colors()
self.__as_edf_params = self.__create_params()
self.__as_env_edf = asr.EnvironmentEDF(
self.__as_env_type + "_environment_edf",
"sky_edf",
self.__as_edf_params)
self.__as_env_shader = asr.EnvironmentShader("edf_environment_shader",
"sky_shader",
{'environment_edf': 'sky_edf', 'alpha_value': as_world.env_alpha})
self.__as_env = asr.Environment("sky",
{"environment_edf": "sky_edf", "environment_shader": "sky_shader"})
else:
self.__as_env = asr.Environment("environment", {})
def flush_entities(self, as_scene, as_assembly, as_project):
logger.debug("appleseed: Flushing world entity to project")
if self.__as_env_type != 'none':
self.__as_env_edf.transform_sequence().set_transform(0.0, asr.Transformd(self._convert_matrix(asr.Matrix4d.identity())))
for index, color in enumerate(self.__as_colors):
color_name = color.get_name()
as_scene.colors().insert(color)
self.__as_colors[index] = as_scene.colors().get_by_name(color_name)
as_env_edf_name = self.__as_env_edf.get_name()
as_scene.environment_edfs().insert(self.__as_env_edf)
self.__as_env_edf = as_scene.environment_edfs().get_by_name(as_env_edf_name)
as_env_shader_name = self.__as_env_shader.get_name()
as_scene.environment_shaders().insert(self.__as_env_shader)
self.__as_env_shader = as_scene.environment_shaders().get_by_name(as_env_shader_name)
as_scene.set_environment(self.__as_env)
def update_world(self, as_scene, depsgraph):
logger.debug("appleseed: Updating world")
as_world = self.bl_world.appleseed_sky
current_env_type = self.__as_env_type
self.__as_env_type = as_world.env_type if as_world.env_type != 'sunsky' else as_world.sun_model
if current_env_type == 'none': # Create new environment entities.
self.create_entities(depsgraph)
self.flush_entities(as_scene, None, None)
elif self.__as_env_type == 'none': # Delete current world entities.
self.delete_world(as_scene)
elif current_env_type == self.__as_env_type:
self.__as_edf_params = self.__create_params()
self.__as_env_edf.set_parameters(self.__as_edf_params)
env_shader_params = self.__as_env_shader.get_parameters()
env_shader_params['alpha_value'] = as_world.env_alpha
self.__as_env_shader.set_parameters(env_shader_params)
for color in self.__as_colors:
as_scene.colors().remove(color)
self.__as_colors.clear()
self.__set_colors()
for index, color in enumerate(self.__as_colors):
color_name = color.get_name()
as_scene.colors().insert(color)
self.__as_colors[index] = as_scene.colors().get_by_name(color_name)
else: # World still exists but needs to be changed.
self.delete_world(as_scene)
self.create_entities(depsgraph)
self.flush_entities(as_scene, None, None)
def delete_world(self, as_scene):
logger.debug("appleseed: Deleting world")
for color in self.__as_colors:
as_scene.colors().remove(color)
self.__as_colors.clear()
if self.__as_env_edf is not None:
as_scene.environment_edfs().remove(self.__as_env_edf)
self.__as_env_edf = None
if self.__as_env_shader is not None:
as_scene.environment_shaders().remove(self.__as_env_shader)
self.__as_env_shader = None
as_scene.set_environment(asr.Environment("environment", {}))
# Internal methods.
def __set_colors(self):
as_world = self.bl_world.appleseed_sky
if self.__as_env_type == 'constant':
self.__as_colors.append(asr.ColorEntity('horizon_radiance_color',
{'color_space': 'linear_rgb'},
self._convert_color(as_world.horizon_color)))
elif self.__as_env_type in ('gradient', 'constant_hemisphere'):
self.__as_colors.append(asr.ColorEntity('horizon_radiance_color',
{'color_space': 'linear_rgb'},
self._convert_color(as_world.horizon_color)))
self.__as_colors.append(asr.ColorEntity('zenith_radiance_color',
{'color_space': 'linear_rgb'},
self._convert_color(as_world.zenith_color)))
def _convert_matrix(self, m):
as_world = self.bl_world.appleseed_sky
vertical_shift = asr.Matrix4d.make_rotation(asr.Vector3d(1.0, 0.0, 0.0), math.radians(as_world.vertical_shift))
horizontal_shift = asr.Matrix4d.make_rotation(asr.Vector3d(0.0, 1.0, 0.0), math.radians(as_world.horizontal_shift))
m = vertical_shift * horizontal_shift * m
return m
def __create_params(self):
as_world = self.bl_world.appleseed_sky
params = dict()
if self.__as_env_type == 'latlong_map':
if as_world.env_tex is not None:
tex_name = f"{as_world.env_tex.name_full}_inst"
params = {'radiance': tex_name, 'radiance_multiplier': as_world.env_tex_mult, 'exposure': as_world.env_exposure}
else:
return params
elif self.__as_env_type == 'mirrorball_map':
if as_world.env_tex is not None:
tex_name = f"{as_world.env_tex.name_full}_inst"
params = {'radiance': tex_name, 'exposure': as_world.env_exposure, 'radiance_multiplier': as_world.env_tex_mult}
else:
return params
elif self.__as_env_type == 'constant':
params = {'radiance': 'horizon_radiance_color'}
elif self.__as_env_type == 'gradient':
params = {'horizon_radiance': "horizon_radiance_color",
'zenith_radiance': "zenith_radiance_color"}
elif self.__as_env_type == 'constant_hemisphere':
params = {'lower_hemi_radiance': "horizon_radiance_color",
'upper_hemi_radiance': "zenith_radiance_color"}
else:
params = {'ground_albedo': as_world.ground_albedo,
'sun_phi': as_world.sun_phi,
'sun_theta': as_world.sun_theta,
'turbidity': as_world.turbidity,
'turbidity_multiplier': as_world.turbidity_multiplier,
'luminance_multiplier': as_world.luminance_multiplier,
'luminance_gamma': as_world.luminance_gamma,
'saturation_multiplier': as_world.saturation_multiplier,
'horizon_shift': as_world.horizon_shift}
return params
| |
"""
Discrete Fourier Transforms - basic.py
"""
# Created by Pearu Peterson, August,September 2002
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2']
from numpy import zeros, swapaxes
import numpy
import _fftpack
import atexit
atexit.register(_fftpack.destroy_zfft_cache)
atexit.register(_fftpack.destroy_zfftnd_cache)
atexit.register(_fftpack.destroy_drfft_cache)
atexit.register(_fftpack.destroy_cfft_cache)
atexit.register(_fftpack.destroy_cfftnd_cache)
atexit.register(_fftpack.destroy_rfft_cache)
del atexit
def istype(arr, typeclass):
return issubclass(arr.dtype.type, typeclass)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
# XXX: single precision FFTs partially disabled due to accuracy issues
# for large prime-sized inputs.
#
# See http://permalink.gmane.org/gmane.comp.python.scientific.devel/13834
# ("fftpack test failures for 0.8.0b1", Ralf Gommers, 17 Jun 2010,
# @ scipy-dev)
#
# These should be re-enabled once the problems are resolved
def _is_safe_size(n):
"""
Is the size of FFT such that FFTPACK can handle it in single precision
with sufficient accuracy?
Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those
"""
n = int(n)
for c in (2, 3, 5):
while n % c == 0:
n /= c
return (n <= 1)
def _fake_crfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.crfft(x, n, *a, **kw)
else:
return _fftpack.zrfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_cfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.cfft(x, n, *a, **kw)
else:
return _fftpack.zfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_rfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.rfft(x, n, *a, **kw)
else:
return _fftpack.drfft(x, n, *a, **kw).astype(numpy.float32)
def _fake_cfftnd(x, shape, *a, **kw):
if numpy.all(map(_is_safe_size, shape)):
return _fftpack.cfftnd(x, shape, *a, **kw)
else:
return _fftpack.zfftnd(x, shape, *a, **kw).astype(numpy.complex64)
_DTYPE_TO_FFT = {
# numpy.dtype(numpy.float32): _fftpack.crfft,
numpy.dtype(numpy.float32): _fake_crfft,
numpy.dtype(numpy.float64): _fftpack.zrfft,
# numpy.dtype(numpy.complex64): _fftpack.cfft,
numpy.dtype(numpy.complex64): _fake_cfft,
numpy.dtype(numpy.complex128): _fftpack.zfft,
}
_DTYPE_TO_RFFT = {
# numpy.dtype(numpy.float32): _fftpack.rfft,
numpy.dtype(numpy.float32): _fake_rfft,
numpy.dtype(numpy.float64): _fftpack.drfft,
}
_DTYPE_TO_FFTN = {
# numpy.dtype(numpy.complex64): _fftpack.cfftnd,
numpy.dtype(numpy.complex64): _fake_cfftnd,
numpy.dtype(numpy.complex128): _fftpack.zfftnd,
# numpy.dtype(numpy.float32): _fftpack.cfftnd,
numpy.dtype(numpy.float32): _fake_cfftnd,
numpy.dtype(numpy.float64): _fftpack.zfftnd,
}
def _asfarray(x):
"""Like numpy asfarray, except that it does not modify x dtype if x is
already an array with a float dtype, and do not cast complex types to
real."""
if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]:
return x
else:
# We cannot use asfarray directly because it converts sequences of
# complex to sequence of real
ret = numpy.asarray(x)
if not ret.dtype.char in numpy.typecodes["AllFloat"]:
return numpy.asfarray(x)
return ret
def _fix_shape(x, n, axis):
""" Internal auxiliary function for _raw_fft, _raw_fftnd."""
s = list(x.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
x = x[index]
return x, False
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s,x.dtype.char)
z[index] = x
return z, True
def _raw_fft(x, n, axis, direction, overwrite_x, work_function):
""" Internal auxiliary function for fft, ifft, rfft, irfft."""
if n is None:
n = x.shape[axis]
elif n != x.shape[axis]:
x, copy_made = _fix_shape(x,n,axis)
overwrite_x = overwrite_x or copy_made
if axis == -1 or axis == len(x.shape)-1:
r = work_function(x,n,direction,overwrite_x=overwrite_x)
else:
x = swapaxes(x, axis, -1)
r = work_function(x,n,direction,overwrite_x=overwrite_x)
r = swapaxes(r, axis, -1)
return r
def fft(x, n=None, axis=-1, overwrite_x=0):
"""
Return discrete Fourier transform of arbitrary type sequence x.
Parameters
----------
x : array-like
array to fourier transform.
n : int, optional
Length of the Fourier transform. If n<x.shape[axis],
x is truncated. If n>x.shape[axis], x is zero-padded.
(Default n=x.shape[axis]).
axis : int, optional
Axis along which the fft's are computed. (default=-1)
overwrite_x : bool, optional
If True the contents of x can be destroyed. (default=False)
Returns
-------
z : complex ndarray
with the elements:
[y(0),y(1),..,y(n/2-1),y(-n/2),...,y(-1)] if n is even
[y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd
where
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
Note that y(-j) = y(n-j).conjugate().
See Also
--------
ifft : Inverse FFT
rfft : FFT of a real sequence
Notes
-----
The packing of the result is "standard": If A = fft(a, n), then A[0]
contains the zero-frequency term, A[1:n/2+1] contains the
positive-frequency terms, and A[n/2+1:] contains the negative-frequency
terms, in order of decreasingly negative frequency. So for an 8-point
transform, the frequencies of the result are [ 0, 1, 2, 3, 4, -3, -2, -1].
This is most efficient for n a power of two.
Examples
--------
>>> x = np.arange(5)
>>> np.all(np.abs(x-fft(ifft(x))<1.e-15) #within numerical accuracy.
True
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,1,0,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,1,0,overwrite_x)
return swapaxes(tmp, axis, -1)
def ifft(x, n=None, axis=-1, overwrite_x=0):
"""
Return discrete inverse Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)`` where
``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform. If ``n < x.shape[axis]``,
`x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
The default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the ifft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True the contents of `x` can be destroyed; the default is False.
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,-1,1,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,-1,1,overwrite_x)
return swapaxes(tmp, axis, -1)
def rfft(x, n=None, axis=-1, overwrite_x=0):
"""
Discrete Fourier transform of a real sequence.
The returned real arrays contains::
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd
where
::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n)
j = 0..n-1
Note that ``y(-j) == y(n-j).conjugate()``.
Parameters
----------
x : array_like, real-valued
The data to tranform.
n : int, optional
Defines the length of the Fourier transform. If `n` is not specified
(the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``,
`x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded.
axis : int, optional
The axis along which the transform is applied. The default is the
last axis.
overwrite_x : bool, optional
If set to true, the contents of `x` can be overwritten. Default is
False.
See also
--------
fft, irfft, scipy.fftpack.basic
Notes
-----
Within numerical accuracy, ``y == rfft(irfft(y))``.
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,1,overwrite_x,work_function)
def irfft(x, n=None, axis=-1, overwrite_x=0):
""" irfft(x, n=None, axis=-1, overwrite_x=0) -> y
Return inverse discrete Fourier transform of real sequence x.
The contents of x is interpreted as the output of rfft(..)
function.
The returned real array contains
[y(0),y(1),...,y(n-1)]
where for n is even
y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0] + (-1)**(j) x[n-1])
and for n is odd
y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0])
c.c. denotes complex conjugate of preceeding expression.
Optional input: see rfft.__doc__
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function)
def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function):
""" Internal auxiliary function for fftnd, ifftnd."""
if s is None:
if axes is None:
s = x.shape
else:
s = numpy.take(x.shape, axes)
s = tuple(s)
if axes is None:
noaxes = True
axes = range(-x.ndim, 0)
else:
noaxes = False
if len(axes) != len(s):
raise ValueError("when given, axes and shape arguments "\
"have to be of the same length")
# No need to swap axes, array is in C order
if noaxes:
for i in axes:
x, copy_made = _fix_shape(x, s[i], i)
overwrite_x = overwrite_x or copy_made
return work_function(x,s,direction,overwrite_x=overwrite_x)
# We ordered axes, because the code below to push axes at the end of the
# array assumes axes argument is in ascending order.
id = numpy.argsort(axes)
axes = [axes[i] for i in id]
s = [s[i] for i in id]
# Swap the request axes, last first (i.e. First swap the axis which ends up
# at -1, then at -2, etc...), such as the request axes on which the
# operation is carried become the last ones
for i in range(1, len(axes)+1):
x = numpy.swapaxes(x, axes[-i], -i)
# We can now operate on the axes waxes, the p last axes (p = len(axes)), by
# fixing the shape of the input array to 1 for any axis the fft is not
# carried upon.
waxes = range(x.ndim - len(axes), x.ndim)
shape = numpy.ones(x.ndim)
shape[waxes] = s
for i in range(len(waxes)):
x, copy_made = _fix_shape(x, s[i], waxes[i])
overwrite_x = overwrite_x or copy_made
r = work_function(x, shape, direction, overwrite_x=overwrite_x)
# reswap in the reverse order (first axis first, etc...) to get original
# order
for i in range(len(axes), 0, -1):
r = numpy.swapaxes(r, -i, axes[-i])
return r
def fftn(x, shape=None, axes=None, overwrite_x=0):
""" fftn(x, shape=None, axes=None, overwrite_x=0) -> y
Return multi-dimensional discrete Fourier transform of arbitrary
type sequence x.
The returned array contains
y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
where d = len(x.shape) and n = x.shape.
Note that y[..., -j_i, ...] = y[..., n_i-j_i, ...].conjugate().
Optional input:
shape
Defines the shape of the Fourier transform. If shape is not
specified then shape=take(x.shape,axes,axis=0).
If shape[i]>x.shape[i] then the i-th dimension is padded with
zeros. If shape[i]<x.shape[i], then the i-th dimension is
truncated to desired length shape[i].
axes
The transform is applied along the given axes of the input
array (or the newly constructed array if shape argument was
used).
overwrite_x
If set to true, the contents of x can be destroyed.
Notes:
y == fftn(ifftn(y)) within numerical accuracy.
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, 1)
def _raw_fftn_dispatch(x, shape, axes, overwrite_x, direction):
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFTN[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fftnd(tmp,shape,axes,direction,overwrite_x,work_function)
def ifftn(x, shape=None, axes=None, overwrite_x=0):
"""
Return inverse multi-dimensional discrete Fourier transform of
arbitrary type sequence x.
The returned array contains::
y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i)
where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``.
For description of parameters see `fftn`.
See Also
--------
fftn : for detailed information.
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, -1)
def fft2(x, shape=None, axes=(-2,-1), overwrite_x=0):
"""
2-D discrete Fourier transform.
Return the two-dimensional discrete Fourier transform of the 2-D argument
`x`.
See Also
--------
fftn : for detailed information.
"""
return fftn(x,shape,axes,overwrite_x)
def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=0):
"""
2-D discrete inverse Fourier transform of real or complex sequence.
Return inverse two-dimensional discrete Fourier transform of
arbitrary type sequence x.
See `ifft` for more information.
See also
--------
fft2, ifft
"""
return ifftn(x,shape,axes,overwrite_x)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - Frank Lin
import re
from skrutil.skr_logger import skr_log_warning
from skrutil import string_utils
_OBJC_BR = '\n\n'
_OBJC_SPACE = ' '
class ObjcManager:
def __init__(self, manager_name):
self.manager_name = manager_name
self.save_commands = []
self.delete_commands = []
self.fetch_commands = []
self.apis = []
self.object_name = ''
self.plural_object_name = ''
self.objc_variable_list = []
def set_object_name(self, class_name, plural_class_name):
self.object_name = class_name
self.plural_object_name = plural_class_name
def set_objc_variable_list(self, objc_variable_list):
self.objc_variable_list = objc_variable_list
def add_save_command(self, save_command):
self.save_commands.append(save_command)
def add_fetch_command(self, fetch_command):
self.fetch_commands.append(fetch_command)
def add_delete_command(self, delete_command):
self.delete_commands.append(delete_command)
def add_api_description(self, api_description):
self.apis.append(api_description)
def class_name(self):
return self.manager_name
def generate_fetch_declarations(self, config):
"""Generates Objective-C++ fetch declarations.
Args:
config: A <Config> object represents user-defined info.
Returns:
A string which is Objective-C++ fetch declarations.
"""
declaration = ''
for fetch_command in self.fetch_commands:
by_list = []
if fetch_command.where != '':
by_list = re.split(',', fetch_command.where)
if not fetch_command.is_plural:
if len(by_list) == 0:
skr_log_warning('Singular often comes with at least one by parameter')
declaration += '- (nullable {2}{0} *)fetch{0}FromCache{1};\n\n'\
.format(self.object_name, self.__convert_bys_to_string(by_list), config.objc_prefix)
else:
declaration += '- (NSArray<LCC{0} *> *)fetch{1}FromCache{2};\n\n'\
.format(self.object_name, self.plural_object_name, self.__convert_bys_to_string(by_list))
return declaration
def generate_fetch_implementations(self, config):
"""Generates Objective-C++ fetch implementations.
Args:
config: A <Config> object represents user-defined info.
Returns:
Objective-C++ fetch implementations.
"""
impl = ''
for fetch_command in self.fetch_commands:
impl += self.__fetch_implementation(fetch_command, config)
impl += _OBJC_BR
return impl
def generate_web_api_declarations(self, config):
"""Generates Objective-C++ web api declarations.
Args:
config: A <Config> object represents user-defined info.
Returns:
A string which is Objective-C++ web api declarations.
"""
declaration = ''
for api in self.apis:
declaration += self.__web_api_declaration(api, config)
declaration += ';'
declaration += _OBJC_BR
return declaration
def generate_web_api_implementations(self, config):
"""Generates Objective-C++ web api implementations.
Args:
config: A <Config> object represents user-defined info.
Returns:
A string which is Objective-C++ web api implementations.
"""
impl = ''
for api in self.apis:
impl += self.__web_api_declaration(api, config)
impl += ' {\n'
impl += string_utils.indent(2)
impl += '_coreManagerHandler->\n'
impl += string_utils.indent(2)
impl += api.alias + '('
for input_var in api.input_var_list:
impl += input_var.cast_to_cpp_parameter()
impl += ', '
impl += '[successBlock, failureBlock](bool success, const std::string& errorUTF8String'
for output_var in api.output_var_list:
impl += ', {0}'.format(output_var.objc_wrapper_from_cpp_parameter(config))
impl += ') {\n'
impl += string_utils.indent(4)
impl += 'if (success) {\n'
for output_var in api.output_var_list:
impl += output_var.objc_form_cpp_parameter(6, config)
impl += _OBJC_BR
impl += string_utils.indent(6)
impl += 'successBlock('
for i, output_var in enumerate(api.output_var_list):
if i != 0:
impl += ', '
impl += string_utils.to_objc_property_name(output_var.name)
impl += ');\n'
impl += string_utils.indent(4)
impl += '} else {\n'
impl += string_utils.indent(6)
impl += 'NSString *error = [NSString stringWithUTF8String:errorUTF8String.c_str()];\n'
impl += string_utils.indent(6)
impl += 'failureBlock({0}(error));\n'.format(config.objc_error_method)
impl += string_utils.indent(4)
impl += '}\n'
impl += string_utils.indent(2)
impl += '});\n}'
impl += _OBJC_BR
return impl
def generate_constructor_implementation(self, config):
"""Generates Objective-C++ init method.
Args:
config: A <Config> object represents user-defined info.
Returns:
Objective-C++ init method.
"""
impl = '- (instancetype)init {\n'
impl += string_utils.indent(2)
impl += 'if (self = [super init]) {\n'
impl += string_utils.indent(4)
impl += '_coreManagerHandler = {1}::{0}Manager::DefaultManager();\n'.format(self.object_name,
config.cpp_namespace)
impl += string_utils.indent(2)
impl += '}\n'
impl += string_utils.indent(2)
impl += 'return self;\n'
impl += '}'
return impl
def generate_default_manager_implementation(self, config):
"""Generates Objective-C++ default manager method.
Args:
config: A <Config> object represents user-defined info.
Returns:
Objective-C++ default manager method.
"""
impl = '+ (instancetype)defaultManager {\n'
impl += _OBJC_SPACE
impl += 'return [{1}Director defaultDirector].{0}Manager;\n'.format(
string_utils.first_char_to_lower(self.object_name), config.objc_prefix)
impl += '}'
return impl
def __convert_bys_to_string(self, by_string_list):
"""Returns "ById:(NSString *)id name:(NSString *)name" or ""
"""
if len(by_string_list) == 0: # empty string
return ''
else: # "(const std::string& id, const std::string& username)"
bys_string = 'By'
it = 0
for by_string in by_string_list:
objc_var = self.__objc_var_by_name(by_string)
if objc_var is not None:
if it == 0:
bys_string += string_utils.first_char_to_upper(objc_var.parameter()) + ' '
else:
bys_string += objc_var.parameter() + ' '
it += 1
else:
print 'Unknown "{0}" in "by"'.format(by_string)
return ''
bys_string = bys_string[:-1]
return bys_string
def __objc_var_by_name(self, name_string):
"""Returns None if not found.
"""
for objc_var in self.objc_variable_list:
if objc_var.name == name_string:
return objc_var
return None
def __fetch_implementation(self, fetch_command, config):
"""Generates Objective-C++ fetch implementation.
Args:
fetch_command: A <FetchCommand> object represents necessary info for generating fetch implementation.
config: A <Config> object represents user-defined info.
Returns:
A string which is Objective-C++ fetch implementation.
"""
by_list = []
if fetch_command.where != '':
by_list = re.split(',', fetch_command.where)
if not fetch_command.is_plural:
impl = '- (nullable {2}{0} *)fetch{0}FromCache{1} {{\n'\
.format(self.object_name, self.__convert_bys_to_string(by_list), config.objc_prefix)
impl += string_utils.indent(2)
impl += 'std::unique_ptr<{2}::{0}> core{0} = _coreManagerHandler->{1};\n'\
.format(self.object_name, self.__cpp_fetch_method_name(fetch_command), config.cpp_namespace)
impl += string_utils.indent(2)
impl += 'if (core{0}) {{\n'.format(self.object_name)
impl += string_utils.indent(4)
impl += 'return [{2}{0} {1}WithCore{0}:*core{0}];\n'\
.format(self.object_name, string_utils.first_char_to_lower(self.object_name), config.objc_prefix)
impl += string_utils.indent(2)
impl += '}\n'
impl += string_utils.indent(2)
impl += 'return nil;\n'
impl += '}'
return impl
else:
impl = '- (NSArray<LCC{0} *> *)fetch{1}FromCache{2} {{\n'\
.format(self.object_name, self.plural_object_name, self.__convert_bys_to_string(by_list))
impl += string_utils.indent(2)
impl += 'NSMutableArray *{0} = [NSMutableArray array];\n'.format(string_utils.first_char_to_lower(self.plural_object_name))
impl += string_utils.indent(2)
impl += 'std::vector<std::unique_ptr<{3}::{0}>> core{1} = _coreManagerHandler->{2};\n'\
.format(self.object_name,
self.plural_object_name,
self.__cpp_fetch_method_name(fetch_command),
config.objc_prefix)
impl += string_utils.indent(2)
impl += 'for (auto it = core{0}.begin(); it != core{0}.end(); ++it) {{\n'.format(self.plural_object_name)
impl += string_utils.indent(4)
impl += '[{0} addObject:[LCC{1} {2}WithCore{1}:(**it)]];\n'\
.format(string_utils.first_char_to_lower(self.plural_object_name),
self.object_name,
string_utils.first_char_to_lower(self.object_name))
impl += string_utils.indent(2)
impl += '}\n'
impl += string_utils.indent(2)
impl += 'return [{0} copy];\n'.format(string_utils.first_char_to_lower(self.plural_object_name))
impl += '}\n'
self.impl = impl
return self.impl
def __cpp_fetch_method_name(self, fetch_command):
by_list = []
if fetch_command.where != '':
by_list = re.split(',', fetch_command.where)
if not fetch_command.is_plural:
if len(by_list) == 0:
skr_log_warning('Singular often comes with at least one by parameter')
return 'Fetch{0}FromCache{1}'\
.format(self.object_name, self.__convert_bys_to_cpp_string(by_list))
else:
return 'Fetch{0}FromCache{1}'\
.format(self.plural_object_name, self.__convert_bys_to_cpp_string(by_list))
def __convert_bys_to_cpp_string(self, by_string_list):
"""Returns "ById([id UTF8String])" or "([id UTF8String], [username UTF8String])" or "()".
"""
if len(by_string_list) == 0: # ()
return '()'
elif len(by_string_list) == 1: # "ById(const std::string& id)"
by_string = by_string_list[0]
objc_var = self.__objc_var_by_name(by_string)
if objc_var is not None:
return 'By{0}({1})'.format(objc_var.to_title_style_name(), objc_var.cast_to_cpp_parameter())
else:
print 'Unknown "{0}" in "by"'.format(by_string)
return ''
else: # "([id UTF8String], [username UTF8String])"
bys_string = '('
for by_string in by_string_list:
objc_var = self.__objc_var_by_name(by_string)
if objc_var is not None:
bys_string += objc_var.cast_to_cpp_parameter() + ', '
else:
print 'Unknown "{0}" in "by"'.format(by_string)
return ''
bys_string = bys_string[:-2] # remove last 2 chars
bys_string += ')'
return bys_string
def __web_api_declaration(self, api, config):
declaration = ''
declaration += '- (void){0}'.format(string_utils.first_char_to_lower(api.alias))
if len(api.input_var_list) > 0:
if len(api.input_var_list) == 1:
declaration += 'By'
else:
declaration += 'With'
for i, input_var in enumerate(api.input_var_list):
input_name = string_utils.to_objc_property_name(input_var.name)
if i == 0:
input_name = string_utils.first_char_to_upper(input_name)
declaration += '{0}:({1}){2} '.format(input_name,
input_var.var_type.to_objc_getter_string(config),
string_utils.first_char_to_lower(input_name))
declaration += 'success:(void (^)('
else:
declaration += 'Success:(void (^)('
if len(api.output_var_list) > 0:
for i, output_var in enumerate(api.output_var_list):
declaration += output_var.var_type.to_objc_getter_string(config)
declaration += string_utils.to_objc_property_name(output_var.name)
if i != len(api.output_var_list) - 1:
declaration += ', '
declaration += '))successBlock failure:(void (^)(NSError *error))failureBlock'
return declaration
| |
# -----------------------------
# Author: Haoxi Zhang
# Version: 0.01 beta
# Date: 2016.06.09
# -----------------------------
import tensorflow as tf
import numpy as np
import random
from collections import deque
# Hyper Parameters:
GAMMA = 0.99 # decay rate of past observations
OBSERVE = 1000. # timesteps to observe before training
EXPLORE = 50000. # frames over which to anneal epsilon
FINAL_EPSILON = 0 #0.001 # final value of epsilon
INITIAL_EPSILON = 1.0 #0.01 # starting value of epsilon
REPLAY_MEMORY = 5000 # number of previous transitions to remember
BATCH_SIZE = 64 # size of minibatch
UPDATE_TIME = 10
"""
I feel hard (particularly, in this code structure)
to attach op like 'tf.scalar_summary("accuracy", self.accuracy)'
to track 'accuracy' while training.
So, I decided to wite it out in a external file:'temp.txt'.
And, if anyone knows how to use tensorboard op in this code for accuracy,
please let me know. THANKS!
"""
#'temp.txt' is a file used to store the accuracy change during traning
filename = "temp.txt"
f = open(filename,"w")
# Network Parameters
n_input = 8 # one-hot array of initial state : 8 total states
n_classes = 4 # four actions the agent can choose to act
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
class BrainDQN:
def __init__(self, actions):
# init replay memory
self.replayMemory = deque()
# init some parameters
self.timeStep = 0
self.epsilon = INITIAL_EPSILON
self.actions = actions
# init Q network
self.stateInput, self.QValue, self.W_fc1, self.b_fc1, self.W_fc2, self.b_fc2 = self.createQNetwork()
# init Target Q Network
self.stateInputT,self.QValueT,self.W_fc1T,self.b_fc1T,self.W_fc2T,self.b_fc2T = self.createQNetwork()
self.copyTargetQNetworkOperation = [self.W_fc1T.assign(self.W_fc1),self.b_fc1T.assign(self.b_fc1),self.W_fc2T.assign(self.W_fc2),self.b_fc2T.assign(self.b_fc2)]
self.createTrainingMethod()
# saving and loading networks
self.saver = tf.train.Saver()
self.session = tf.InteractiveSession()
self.session.run(tf.initialize_all_variables())
checkpoint = tf.train.get_checkpoint_state("saved_networks")
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.session, checkpoint.model_checkpoint_path)
print "Successfully loaded:", checkpoint.model_checkpoint_path
else:
print "Could not find old network weights"
# Add histogram summaries for weights
tf.histogram_summary("w_h1_summ", self.W_fc1)
tf.histogram_summary("w_h2_summ", self.W_fc2)
# Add histogram summaries for biases
tf.histogram_summary("b_h1_summ", self.b_fc1)
tf.histogram_summary("b_h2_summ", self.b_fc2)
#self.accuracy = self.getAccuracy()
# Add scalar summary for cost
tf.scalar_summary("cost", self.cost)
# create a log writer. run 'tensorboard --logdir=${PWD}'
self.writer = tf.train.SummaryWriter("./logs", self.session.graph)
self.merged = tf.merge_all_summaries()
def createQNetwork(self):
# input layer
stateInput = tf.placeholder("float",[None,8])
W_fc1 = self.weight_variable([n_input,16])
b_fc1 = self.bias_variable([16])
W_fc2 = self.weight_variable([16, self.actions])
b_fc2 = self.bias_variable([self.actions])
# Q Value layer
h_fc1 = tf.nn.relu(tf.matmul(stateInput,W_fc1) + b_fc1)
QValue = tf.matmul(h_fc1,W_fc2) + b_fc2
#QValue = tf.nn.relu(tf.matmul(stateInput, weights) + biases)
return stateInput, QValue, W_fc1, b_fc1, W_fc2, b_fc2
def copyTargetQNetwork(self):
self.session.run(self.copyTargetQNetworkOperation)
def createTrainingMethod(self):
self.actionInput = tf.placeholder("float",[None,self.actions])
self.yInput = tf.placeholder("float", [None])
Q_Action = tf.reduce_sum(tf.mul(self.QValue, self.actionInput), reduction_indices = 1)
self.cost = tf.reduce_mean(tf.square(self.yInput - Q_Action))
self.trainStep = tf.train.AdamOptimizer(1e-2).minimize(self.cost)
def trainQNetwork(self):
# Step 1: obtain random minibatch from replay memory
minibatch = random.sample(self.replayMemory,BATCH_SIZE)
state_batch = [data[0] for data in minibatch]
action_batch = [data[1] for data in minibatch]
reward_batch = [data[2] for data in minibatch]
nextState_batch = [data[3] for data in minibatch]
# Step 2: calculate y
y_batch = []
QValue_batch = self.QValueT.eval(feed_dict={self.stateInputT:nextState_batch})
for i in range(0,BATCH_SIZE):
terminal = minibatch[i][4]
if terminal:
y_batch.append(reward_batch[i])
else:
y_batch.append(reward_batch[i] + GAMMA * np.max(QValue_batch[i]))
self.trainStep.run(feed_dict={
self.yInput : y_batch,
self.actionInput : action_batch,
self.stateInput : state_batch
})
if self.timeStep % 20 == 0:
summary_str = self.session.run(self.merged,feed_dict={self.yInput : y_batch, self.actionInput : action_batch, self.stateInput : state_batch})
self.writer.add_summary(summary_str, self.timeStep)
# save network every 100 iteration
if self.timeStep % 10000 == 0:
self.saver.save(self.session, 'saved_networks/' + 'network' + '-dqn', global_step = self.timeStep)
if self.timeStep % UPDATE_TIME == 0:
self.copyTargetQNetwork()
if self.timeStep == 50001:
print '500001'
self.printExit()
def setPerception(self,observation,action,reward,terminal):
self.replayMemory.append((self.currentState,action,reward,observation,terminal))
if len(self.replayMemory) > REPLAY_MEMORY:
self.replayMemory.popleft()
if self.timeStep > OBSERVE:
# Train the network
self.trainQNetwork()
# print info
state = ""
if self.timeStep <= OBSERVE:
state = "observe"
elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
print "TIMESTEP", self.timeStep, "/ STATE", state, \
"/ EPSILON", self.epsilon
self.currentState = observation
self.timeStep += 1
if self.timeStep % 20 == 0:
self.accuracy = self.getAccuracy()
# Add scalar summary for accuracy
#tf.scalar_summary("accuracy", self.accuracy)
print 'Accuracy at step %s: %s' % (self.timeStep, self.accuracy)
f.write('Accuracy at step %s: %s \n' % (self.timeStep, self.accuracy))
def getAction(self,actions):
QValue = self.QValue.eval(feed_dict= {self.stateInput:[self.currentState]})[0]
action = np.zeros(self.actions)
action_index = 0
if random.random() <= self.epsilon:
# x = len(actions)
action_index = actions[ random.randrange(4) ]
action[action_index] = 1
else:
action_index = np.argmax(QValue)
action[action_index] = 1
# change episilon
if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON)/EXPLORE
return action
def setInitState(self,observation):
self.currentState = observation
def getAccuracy(self):
accuracy = 0.0
predict = [0,0,0,0,0,0,0]
count = 0.0
y = [3,0,3,3,1,3,0]
for i in xrange(7):
sInput = [0,0,0,0,0,0,0,0]
sInput[i] = 1
QValue = self.QValue.eval(feed_dict= {self.stateInput:[sInput]})[0]
predict[i] = np.argmax(QValue)
print predict
for i in xrange(7):
if y[i] == predict[i]:
count +=1.0
accuracy = count/7.0
return accuracy
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev = 0.01)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.constant(0.01, shape = shape)
return tf.Variable(initial)
def getPrintAction(self,a):
action = 'none'
if a == 0:
action ='up'
elif a == 1:
action = 'down'
elif a == 2:
action = 'left'
else:
action = 'right'
return action
def printExit(self):
print '\n Results: actions based on the QValue: '
QValue = self.QValue.eval(feed_dict= {self.stateInput:[[1,0,0,0,0,0,0,0]]})[0]
x = np.argmax(QValue)
print 'in room 1, go: ' + self.getPrintAction(x)
QValue = self.QValue.eval(feed_dict= {self.stateInput:[[0,1,0,0,0,0,0,0]]})[0]
x = np.argmax(QValue)
print 'in room 2, go: ' + self.getPrintAction(x)
QValue = self.QValue.eval(feed_dict= {self.stateInput:[[0,0,1,0,0,0,0,0]]})[0]
x = np.argmax(QValue)
print 'in room 3, go: ' + self.getPrintAction(x)
QValue = self.QValue.eval(feed_dict= {self.stateInput:[[0,0,0,1,0,0,0,0]]})[0]
x = np.argmax(QValue)
print 'in room 4, go: ' + self.getPrintAction(x)
QValue = self.QValue.eval(feed_dict= {self.stateInput:[[0,0,0,0,1,0,0,0]]})[0]
x = np.argmax(QValue)
print 'in room 5, go: ' + self.getPrintAction(x)
QValue = self.QValue.eval(feed_dict= {self.stateInput:[[0,0,0,0,0,1,0,0]]})[0]
x = np.argmax(QValue)
print 'in room 6, go: ' + self.getPrintAction(x)
QValue = self.QValue.eval(feed_dict= {self.stateInput:[[0,0,0,0,0,0,1,0]]})[0]
x = np.argmax(QValue)
print 'in room 7, go: ' + self.getPrintAction(x)
f.close()
exit()
| |
# Copyright 2013 IBM Corp.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Erik Zaadi <erikz@il.ibm.com>
# Avishay Traeger <avishay@il.ibm.com>
import copy
import mox
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import xiv_ds8k
from cinder.volume import volume_types
FAKE = "fake"
VOLUME = {'size': 16,
'name': FAKE,
'id': 1}
MANAGED_FAKE = "managed_fake"
MANAGED_VOLUME = {'size': 16,
'name': MANAGED_FAKE,
'id': 2}
REPLICA_FAKE = "repicated_fake"
REPLICATED_VOLUME = {'size': 64,
'name': REPLICA_FAKE,
'id': 2}
CONTEXT = {}
CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", }
CONF = cfg.CONF
class XIVDS8KFakeProxyDriver(object):
"""Fake IBM XIV and DS8K Proxy Driver."""
def __init__(self, xiv_ds8k_info, logger, expt, driver=None):
"""Initialize Proxy."""
self.xiv_ds8k_info = xiv_ds8k_info
self.logger = logger
self.exception = expt
self.xiv_ds8k_portal = \
self.xiv_ds8k_iqn = FAKE
self.volumes = {}
self.driver = driver
def setup(self, context):
if self.xiv_ds8k_info['xiv_ds8k_user'] != self.driver\
.configuration.san_login:
raise self.exception.NotAuthorized()
if self.xiv_ds8k_info['xiv_ds8k_address'] != self.driver\
.configuration.san_ip:
raise self.exception.HostNotFound(host='fake')
def create_volume(self, volume):
if volume['size'] > 100:
raise self.exception.VolumeBackendAPIException(data='blah')
self.volumes[volume['name']] = volume
def volume_exists(self, volume):
return self.volumes.get(volume['name'], None) is not None
def delete_volume(self, volume):
if self.volumes.get(volume['name'], None) is not None:
del self.volumes[volume['name']]
def manage_volume_get_size(self, volume, existing_ref):
if self.volumes.get(existing_ref['source-name'], None) is None:
raise self.exception.VolumeNotFound(volume_id=volume['id'])
return self.volumes[existing_ref['source-name']]['size']
def manage_volume(self, volume, existing_ref):
if self.volumes.get(existing_ref['source-name'], None) is None:
raise self.exception.VolumeNotFound(volume_id=volume['id'])
volume['size'] = MANAGED_VOLUME['size']
return {}
def unmanage_volume(self, volume):
pass
def initialize_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
lun_id = volume['id']
self.volumes[volume['name']]['attached'] = connector
return {'driver_volume_type': 'iscsi',
'data': {'target_discovered': True,
'target_discovered': True,
'target_portal': self.xiv_ds8k_portal,
'target_iqn': self.xiv_ds8k_iqn,
'target_lun': lun_id,
'volume_id': volume['id'],
'multipath': True,
'provider_location': "%s,1 %s %s" % (
self.xiv_ds8k_portal,
self.xiv_ds8k_iqn,
lun_id), },
}
def terminate_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
if not self.is_volume_attached(volume, connector):
raise self.exception.NotFound(_('Volume not found for '
'instance %(instance_id)s.')
% {'instance_id': 'fake'})
del self.volumes[volume['name']]['attached']
def is_volume_attached(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
return (self.volumes[volume['name']].get('attached', None)
== connector)
def reenable_replication(self, context, volume):
model_update = {}
if volume['replication_status'] == 'inactive':
model_update['replication_status'] = 'active'
elif volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
model_update['replication_extended_status'] = 'some_status'
model_update['replication_driver_data'] = 'some_data'
return model_update
def get_replication_status(self, context, volume):
if volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
return {'replication_status': 'active'}
def promote_replica(self, context, volume):
if volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
return {'replication_status': 'inactive'}
def create_replica_test_volume(self, volume, src_vref):
if volume['size'] != src_vref['size']:
raise exception.InvalidVolume(
reason="Target and source volumes have different size.")
return
def retype(self, ctxt, volume, new_type, diff, host):
volume['easytier'] = new_type['extra_specs']['easytier']
return True, volume
class XIVDS8KVolumeDriverTest(test.TestCase):
"""Test IBM XIV and DS8K volume driver."""
def setUp(self):
"""Initialize IBM XIV and DS8K Driver."""
super(XIVDS8KVolumeDriverTest, self).setUp()
configuration = mox.MockObject(conf.Configuration)
configuration.san_is_local = False
configuration.xiv_ds8k_proxy = \
'cinder.tests.test_ibm_xiv_ds8k.XIVDS8KFakeProxyDriver'
configuration.xiv_ds8k_connection_type = 'iscsi'
configuration.xiv_chap = 'disabled'
configuration.san_ip = FAKE
configuration.san_login = FAKE
configuration.san_clustername = FAKE
configuration.san_password = FAKE
configuration.append_config_values(mox.IgnoreArg())
self.driver = xiv_ds8k.XIVDS8KDriver(
configuration=configuration)
def test_initialized_should_set_xiv_ds8k_info(self):
"""Test that the san flags are passed to the IBM proxy."""
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'],
self.driver.configuration.san_login)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_pass'],
self.driver.configuration.san_password)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'],
self.driver.configuration.san_ip)
self.assertEqual(
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_vol_pool'],
self.driver.configuration.san_clustername)
def test_setup_should_fail_if_credentials_are_invalid(self):
"""Test that the xiv_ds8k_proxy validates credentials."""
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'] = 'invalid'
self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None)
def test_setup_should_fail_if_connection_is_invalid(self):
"""Test that the xiv_ds8k_proxy validates connection."""
self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'] = \
'invalid'
self.assertRaises(exception.HostNotFound, self.driver.do_setup, None)
def test_create_volume(self):
"""Test creating a volume."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME)
self.assertTrue(has_volume)
self.driver.delete_volume(VOLUME)
def test_volume_exists(self):
"""Test the volume exist method with a volume that doesn't exist."""
self.driver.do_setup(None)
self.assertFalse(
self.driver.xiv_ds8k_proxy.volume_exists({'name': FAKE}))
def test_delete_volume(self):
"""Verify that a volume is deleted."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.delete_volume(VOLUME)
has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME)
self.assertFalse(has_volume)
def test_delete_volume_should_fail_for_not_existing_volume(self):
"""Verify that deleting a non-existing volume is OK."""
self.driver.do_setup(None)
self.driver.delete_volume(VOLUME)
def test_create_volume_should_fail_if_no_pool_space_left(self):
"""Vertify that the xiv_ds8k_proxy validates volume pool space."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
{'name': FAKE,
'id': 1,
'size': 12000})
def test_initialize_connection(self):
"""Test that inititialize connection attaches volume to host."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.assertTrue(
self.driver.xiv_ds8k_proxy.is_volume_attached(VOLUME, CONNECTOR))
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver.delete_volume(VOLUME)
def test_initialize_connection_should_fail_for_non_existing_volume(self):
"""Verify that initialize won't work for non-existing volume."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.initialize_connection,
VOLUME,
CONNECTOR)
def test_terminate_connection(self):
"""Test terminating a connection."""
self.driver.do_setup(None)
self.driver.create_volume(VOLUME)
self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.terminate_connection(VOLUME, CONNECTOR)
self.assertFalse(self.driver.xiv_ds8k_proxy.is_volume_attached(
VOLUME,
CONNECTOR))
self.driver.delete_volume(VOLUME)
def test_terminate_connection_should_fail_on_non_existing_volume(self):
"""Test that terminate won't work for non-existing volumes."""
self.driver.do_setup(None)
self.assertRaises(exception.VolumeNotFound,
self.driver.terminate_connection,
VOLUME,
CONNECTOR)
def test_manage_existing_get_size(self):
"""Test that manage_existing_get_size returns the expected size. """
self.driver.do_setup(None)
self.driver.create_volume(MANAGED_VOLUME)
existing_ref = {'source-name': MANAGED_VOLUME['name']}
return_size = self.driver.manage_existing_get_size(
VOLUME,
existing_ref)
self.assertEqual(return_size, MANAGED_VOLUME['size'])
# cover both case, whether driver renames the volume or not
self.driver.delete_volume(VOLUME)
self.driver.delete_volume(MANAGED_VOLUME)
def test_manage_existing_get_size_should_fail_on_non_existing_volume(self):
"""Test that manage_existing_get_size fails on non existing volume. """
self.driver.do_setup(None)
# on purpose - do NOT create managed volume
existing_ref = {'source-name': MANAGED_VOLUME['name']}
self.assertRaises(exception.VolumeNotFound,
self.driver.manage_existing_get_size,
VOLUME,
existing_ref)
def test_manage_existing(self):
"""Test that manage_existing returns successfully. """
self.driver.do_setup(None)
self.driver.create_volume(MANAGED_VOLUME)
existing_ref = {'source-name': MANAGED_VOLUME['name']}
self.driver.manage_existing(VOLUME, existing_ref)
self.assertEqual(VOLUME['size'], MANAGED_VOLUME['size'])
# cover both case, whether driver renames the volume or not
self.driver.delete_volume(VOLUME)
self.driver.delete_volume(MANAGED_VOLUME)
def test_manage_existing_should_fail_on_non_existing_volume(self):
"""Test that manage_existing fails on non existing volume. """
self.driver.do_setup(None)
# on purpose - do NOT create managed volume
existing_ref = {'source-name': MANAGED_VOLUME['name']}
self.assertRaises(exception.VolumeNotFound,
self.driver.manage_existing,
VOLUME,
existing_ref)
def test_reenable_replication(self):
"""Test that reenable_replication returns successfully. """
self.driver.do_setup(None)
# assume the replicated volume is inactive
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
replicated_volume['replication_status'] = 'inactive'
model_update = self.driver.reenable_replication(
CONTEXT,
replicated_volume
)
self.assertEqual(
model_update['replication_status'],
'active'
)
self.assertTrue('replication_extended_status' in model_update)
self.assertTrue('replication_driver_data' in model_update)
def test_reenable_replication_fail_on_cinder_exception(self):
"""Test that reenable_replication fails on driver raising exception."""
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# on purpose - set invalid value to replication_status
# expect an exception.
replicated_volume['replication_status'] = 'invalid_status_val'
self.assertRaises(
exception.CinderException,
self.driver.reenable_replication,
CONTEXT,
replicated_volume
)
def test_get_replication_status(self):
"""Test that get_replication_status return successfully. """
self.driver.do_setup(None)
# assume the replicated volume is inactive
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
replicated_volume['replication_status'] = 'inactive'
model_update = self.driver.get_replication_status(
CONTEXT,
replicated_volume
)
self.assertEqual(
model_update['replication_status'],
'active'
)
def test_get_replication_status_fail_on_exception(self):
"""Test that get_replication_status fails on exception"""
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# on purpose - set invalid value to replication_status
# expect an exception.
replicated_volume['replication_status'] = 'invalid_status_val'
self.assertRaises(
exception.CinderException,
self.driver.get_replication_status,
CONTEXT,
replicated_volume
)
def test_promote_replica(self):
"""Test that promote_replica returns successfully. """
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# assume the replication_status should be active
replicated_volume['replication_status'] = 'active'
model_update = self.driver.promote_replica(
CONTEXT,
replicated_volume
)
# after promoting, replication_status should be inactive
self.assertEqual(
model_update['replication_status'],
'inactive'
)
def test_promote_replica_fail_on_cinder_exception(self):
"""Test that promote_replica fails on CinderException. """
self.driver.do_setup(None)
replicated_volume = copy.deepcopy(REPLICATED_VOLUME)
# on purpose - set invalid value to replication_status
# expect an exception.
replicated_volume['replication_status'] = 'invalid_status_val'
self.assertRaises(
exception.CinderException,
self.driver.promote_replica,
CONTEXT,
replicated_volume
)
def test_create_replica_test_volume(self):
"""Test that create_replica_test_volume returns successfully."""
self.driver.do_setup(None)
tgt_volume = copy.deepcopy(VOLUME)
src_volume = copy.deepcopy(REPLICATED_VOLUME)
tgt_volume['size'] = src_volume['size']
model_update = self.driver.create_replica_test_volume(
tgt_volume,
src_volume
)
self.assertTrue(model_update is None)
def test_create_replica_test_volume_fail_on_diff_size(self):
"""Test that create_replica_test_volume fails on diff size."""
self.driver.do_setup(None)
tgt_volume = copy.deepcopy(VOLUME)
src_volume = copy.deepcopy(REPLICATED_VOLUME)
self.assertRaises(
exception.InvalidVolume,
self.driver.create_replica_test_volume,
tgt_volume,
src_volume
)
def test_retype(self):
"""Test that retype returns successfully."""
self.driver.do_setup(None)
# prepare parameters
ctxt = context.get_admin_context()
host = {
'host': 'foo',
'capabilities': {
'location_info': 'xiv_ds8k_fake_1',
'extent_size': '1024'
}
}
key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True}
key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(
ctxt,
old_type_ref['id'],
new_type_ref['id'],
)
volume = copy.deepcopy(VOLUME)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.create_volume(volume)
ret = self.driver.retype(ctxt, volume, new_type, diff, host)
self.assertTrue(ret)
self.assertTrue(volume['easytier'])
def test_retype_fail_on_exception(self):
"""Test that retype fails on exception."""
self.driver.do_setup(None)
# prepare parameters
ctxt = context.get_admin_context()
host = {
'host': 'foo',
'capabilities': {
'location_info': 'xiv_ds8k_fake_1',
'extent_size': '1024'
}
}
key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new')
diff, equal = volume_types.volume_types_diff(
ctxt,
old_type_ref['id'],
new_type_ref['id'],
)
volume = copy.deepcopy(VOLUME)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.create_volume(volume)
self.assertRaises(
KeyError,
self.driver.retype,
ctxt, volume, new_type, diff, host
)
| |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
#
# Copyright 2015 The Android Open Source Project
# Copyright (C) 2016 The Khronos Group Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
from collections import OrderedDict
from build_caselists import Module, getModuleByName, DEFAULT_BUILD_DIR, DEFAULT_TARGET
from mustpass import Project, Package, Mustpass, Configuration, include, exclude, genMustpassLists
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "scripts"))
from build.common import DEQP_DIR
from build.config import ANY_GENERATOR, BuildConfig
COPYRIGHT_DECLARATION = """\
/* Copyright (C) 2016-2017 The Khronos Group Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/"""
buildPath = DEFAULT_BUILD_DIR.format(targetName = DEFAULT_TARGET, buildType = "Release")
#-------------------------------------------------- ES MUSTPASS----------------------------------------------------------------------
CTS_AOSP_MP_DATA_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "data", "mustpass", "gles", "aosp_mustpass")
CTS_AOSP_MP_DEVICE_DIR = "gl_cts/data/mustpass/gles/aosp_mustpass"
CTS_MP_INC_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "modules", "runner")
CTS_AOSP_MP_ES_PROJECT = Project(name = "AOSP Mustpass ES", path = CTS_AOSP_MP_DATA_DIR, incpath = CTS_MP_INC_DIR, devicepath = CTS_AOSP_MP_DEVICE_DIR, copyright = COPYRIGHT_DECLARATION)
CTS_KHR_MP_DATA_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "data", "mustpass", "gles", "khronos_mustpass")
CTS_KHR_MP_DEVICE_DIR = "gl_cts/data/mustpass/gles/khronos_mustpass"
CTS_KHR_MP_ES_PROJECT = Project(name = "Khronos Mustpass ES", path = CTS_KHR_MP_DATA_DIR, incpath = CTS_MP_INC_DIR, devicepath = CTS_KHR_MP_DEVICE_DIR, copyright = COPYRIGHT_DECLARATION)
CTS_AOSP_MP_EGL_DEVICE_DIR = "gl_cts/data/mustpass/egl/aosp_mustpass"
CTS_AOSP_MP_EGL_DATA_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "data", "mustpass", "egl", "aosp_mustpass")
CTS_AOSP_MP_EGL_PROJECT = Project(name = "AOSP Mustpass EGL", path = CTS_AOSP_MP_EGL_DATA_DIR, incpath = CTS_MP_INC_DIR, devicepath = CTS_AOSP_MP_EGL_DEVICE_DIR, copyright = COPYRIGHT_DECLARATION)
CTS_KHR_MP_NOCTX_DATA_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "data", "mustpass", "gles", "khronos_mustpass_noctx")
CTS_KHR_MP_NOCTX_DEVICE_DIR = "gl_cts/data/mustpass/gles/khronos_mustpass_noctx"
CTS_KHR_MP_NOCTX_ES_PROJECT = Project(name = "Khronos Mustpass ES NoContext", path = CTS_KHR_MP_NOCTX_DATA_DIR, incpath = CTS_MP_INC_DIR, devicepath = CTS_KHR_MP_NOCTX_DEVICE_DIR, copyright = COPYRIGHT_DECLARATION)
CTS_KHR_MP_SINGLE_DATA_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "data", "mustpass", "gles", "khronos_mustpass_single")
CTS_KHR_MP_SINGLE_DEVICE_DIR = "gl_cts/data/mustpass/gles/khronos_mustpass_single"
CTS_KHR_MP_SINGLE_ES_PROJECT = Project(name = "Khronos Mustpass ES Single Config", path = CTS_KHR_MP_SINGLE_DATA_DIR, incpath = CTS_MP_INC_DIR, devicepath = CTS_KHR_MP_SINGLE_DEVICE_DIR, copyright = COPYRIGHT_DECLARATION)
EGL_MODULE = getModuleByName("dEQP-EGL")
ES2CTS_MODULE = getModuleByName("dEQP-GLES2")
ES3CTS_MODULE = getModuleByName("dEQP-GLES3")
ES31CTS_MODULE = getModuleByName("dEQP-GLES31")
ES2KHR_MODULE = getModuleByName("KHR-GLES2")
ES3KHR_MODULE = getModuleByName("KHR-GLES3")
ES31KHR_MODULE = getModuleByName("KHR-GLES31")
ES32KHR_MODULE = getModuleByName("KHR-GLES32")
NOCTX_ES2_KHR_MODULE = getModuleByName("KHR-NOCTX-ES2")
NOCTX_ES32_KHR_MODULE = getModuleByName("KHR-NOCTX-ES32")
SINGLE_ES32_KHR_MODULE = getModuleByName("KHR-Single-GLES32")
ES2GTF_MODULE = getModuleByName("GTF-GLES2")
ES3GTF_MODULE = getModuleByName("GTF-GLES3")
ES31GTF_MODULE = getModuleByName("GTF-GLES31")
GLCTS_GLES2_PKG = Package(module = ES2CTS_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = [include("gles2-master.txt")]),
])
GLCTS_3_2_2_GLES3_PKG = Package(module = ES3CTS_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = [include("gles3-master.txt")]),
# Rotations
Configuration(name = "rotate-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "0",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles3-master.txt"), include("gles3-rotation.txt")]),
Configuration(name = "rotate-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "90",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles3-master.txt"), include("gles3-rotation.txt")]),
Configuration(name = "rotate-reverse-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "180",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles3-master.txt"), include("gles3-rotation.txt")]),
Configuration(name = "rotate-reverse-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "270",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles3-master.txt"), include("gles3-rotation.txt")]),
# MSAA
Configuration(name = "multisample",
glconfig = "rgba8888d24s8ms4",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = [include("gles3-master.txt"),
include("gles3-multisample.txt"),
exclude("gles3-multisample-issues.txt")]),
# Pixel format
Configuration(name = "565-no-depth-no-stencil",
glconfig = "rgb565d0s0ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles3-master.txt"),
include("gles3-pixelformat.txt"),
exclude("gles3-pixelformat-issues.txt")]),
])
GLCTS_3_2_2_GLES31_PKG = Package(module = ES31CTS_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = [include("gles31-master.txt")]),
# Rotations
Configuration(name = "rotate-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "0",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles31-master.txt"), include("gles31-rotation.txt")]),
Configuration(name = "rotate-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "90",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles31-master.txt"), include("gles31-rotation.txt")]),
Configuration(name = "rotate-reverse-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "180",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles31-master.txt"), include("gles31-rotation.txt")]),
Configuration(name = "rotate-reverse-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "270",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles31-master.txt"), include("gles31-rotation.txt")]),
# MSAA
Configuration(name = "multisample",
glconfig = "rgba8888d24s8ms4",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = [include("gles31-master.txt"), include("gles31-multisample.txt")]),
# Pixel format
Configuration(name = "565-no-depth-no-stencil",
glconfig = "rgb565d0s0ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = [include("gles31-master.txt"), include("gles31-pixelformat.txt")]),
])
# 3.2.3.x
GLCTS_3_2_3_EGL_COMMON_FILTERS = [include("egl-master.txt"),
exclude("egl-test-issues.txt"),
exclude("egl-internal-api-tests.txt"),
exclude("egl-driver-issues.txt")
]
GLCTS_3_2_3_EGL_PKG = Package(module = EGL_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = GLCTS_3_2_3_EGL_COMMON_FILTERS),
])
GLCTS_3_2_3_GLES2_COMMON_FILTERS = [
include("gles2-master.txt"),
exclude("gles2-test-issues.txt"),
exclude("gles2-spec-issues.txt"),
exclude("gles2-driver-issues.txt"),
exclude("gles2-hw-issues.txt")
]
GLCTS_3_2_3_GLES2_PKG = Package(module = ES2CTS_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = GLCTS_3_2_3_GLES2_COMMON_FILTERS),
])
GLCTS_3_2_3_GLES3_COMMON_FILTERS = [
include("gles3-master.txt"),
exclude("gles3-test-issues.txt"),
exclude("gles3-spec-issues.txt"),
exclude("gles3-driver-issues.txt"),
]
GLCTS_3_2_3_GLES3_PKG = Package(module = ES3CTS_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = GLCTS_3_2_3_GLES3_COMMON_FILTERS + [exclude("gles3-hw-issues.txt")]),
# Rotations
Configuration(name = "rotate-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "0",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES3_COMMON_FILTERS + [include("gles3-rotation.txt")]),
Configuration(name = "rotate-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "90",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES3_COMMON_FILTERS + [include("gles3-rotation.txt")]),
Configuration(name = "rotate-reverse-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "180",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES3_COMMON_FILTERS + [include("gles3-rotation.txt")]),
Configuration(name = "rotate-reverse-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "270",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES3_COMMON_FILTERS + [include("gles3-rotation.txt")]),
# MSAA
Configuration(name = "multisample",
glconfig = "rgba8888d24s8ms4",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = GLCTS_3_2_3_GLES3_COMMON_FILTERS + [include("gles3-multisample.txt"), exclude("gles3-multisample-hw-issues.txt")]),
# Pixel format
Configuration(name = "565-no-depth-no-stencil",
glconfig = "rgb565d0s0ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES3_COMMON_FILTERS + [include("gles3-pixelformat.txt")]),
])
GLCTS_3_2_3_GLES31_COMMON_FILTERS = [
include("gles31-master.txt"),
exclude("gles31-test-issues.txt"),
exclude("gles31-spec-issues.txt"),
exclude("gles31-driver-issues.txt"),
exclude("gles31-hw-issues.txt")
]
GLCTS_3_2_3_GLES31_PKG = Package(module = ES31CTS_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = GLCTS_3_2_3_GLES31_COMMON_FILTERS),
# Rotations
Configuration(name = "rotate-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "0",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES31_COMMON_FILTERS + [include("gles31-rotation.txt")]),
Configuration(name = "rotate-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "90",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES31_COMMON_FILTERS + [include("gles31-rotation.txt")]),
Configuration(name = "rotate-reverse-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "180",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES31_COMMON_FILTERS + [include("gles31-rotation.txt")]),
Configuration(name = "rotate-reverse-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "270",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES31_COMMON_FILTERS + [include("gles31-rotation.txt")]),
# MSAA
Configuration(name = "multisample",
glconfig = "rgba8888d24s8ms4",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = [include("gles31-master.txt"),
include("gles31-multisample.txt"),
exclude("gles31-multisample-test-issues.txt")]),
# Pixel format
Configuration(name = "565-no-depth-no-stencil",
glconfig = "rgb565d0s0ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = GLCTS_3_2_3_GLES31_COMMON_FILTERS + [include("gles31-pixelformat.txt")]),
])
GLCTS_3_2_3_GLES32_KHR_COMMON_FILTERS = [
include("gles32-khr-master.txt"),
exclude("gles32-khr-test-issues.txt"),
exclude("gles32-khr-spec-issues.txt")
]
GLCTS_3_2_3_GLES32_KHR_PKG_1CFG = Package(module = ES32KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = GLCTS_3_2_3_GLES32_KHR_COMMON_FILTERS),
Configuration(name = "khr-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = GLCTS_3_2_3_GLES32_KHR_COMMON_FILTERS),
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "-1",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = GLCTS_3_2_3_GLES32_KHR_COMMON_FILTERS),
Configuration(name = "khr-master",
surfacewidth = "-1",
surfaceheight = "64",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = GLCTS_3_2_3_GLES32_KHR_COMMON_FILTERS),
])
GLCTS_3_2_3_GLES32_KHR_PKG_N1CFG = Package(module = ES32KHR_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = GLCTS_3_2_3_GLES32_KHR_COMMON_FILTERS),
Configuration(name = "khr-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = GLCTS_3_2_3_GLES32_KHR_COMMON_FILTERS),
])
# master
MASTER_EGL_COMMON_FILTERS = [include("egl-master.txt"),
exclude("egl-test-issues.txt"),
exclude("egl-internal-api-tests.txt")]
MASTER_EGL_PKG = Package(module = EGL_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = MASTER_EGL_COMMON_FILTERS),
])
MASTER_GLES2_COMMON_FILTERS = [
include("gles2-master.txt"),
exclude("gles2-test-issues.txt"),
exclude("gles2-spec-issues.txt")
]
MASTER_GLES2_PKG = Package(module = ES2CTS_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = MASTER_GLES2_COMMON_FILTERS),
])
MASTER_GLES3_COMMON_FILTERS = [
include("gles3-master.txt"),
exclude("gles3-test-issues.txt"),
exclude("gles3-spec-issues.txt")
]
MASTER_GLES3_PKG = Package(module = ES3CTS_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = MASTER_GLES3_COMMON_FILTERS),
# Rotations
Configuration(name = "rotate-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "0",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES3_COMMON_FILTERS + [include("gles3-rotation.txt")]),
Configuration(name = "rotate-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "90",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES3_COMMON_FILTERS + [include("gles3-rotation.txt")]),
Configuration(name = "rotate-reverse-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "180",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES3_COMMON_FILTERS + [include("gles3-rotation.txt")]),
Configuration(name = "rotate-reverse-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "270",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES3_COMMON_FILTERS + [include("gles3-rotation.txt")]),
# MSAA
Configuration(name = "multisample",
glconfig = "rgba8888d24s8ms4",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = MASTER_GLES3_COMMON_FILTERS + [include("gles3-multisample.txt")]),
# Pixel format
Configuration(name = "565-no-depth-no-stencil",
glconfig = "rgb565d0s0ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES3_COMMON_FILTERS + [include("gles3-pixelformat.txt")]),
])
MASTER_GLES31_COMMON_FILTERS = [
include("gles31-master.txt"),
exclude("gles31-test-issues.txt"),
exclude("gles31-spec-issues.txt")
]
MASTER_GLES31_PKG = Package(module = ES31CTS_MODULE, configurations = [
# Master
Configuration(name = "master",
glconfig = "rgba8888d24s8ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = MASTER_GLES31_COMMON_FILTERS),
# Rotations
Configuration(name = "rotate-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "0",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES31_COMMON_FILTERS + [include("gles31-rotation.txt")]),
Configuration(name = "rotate-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "90",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES31_COMMON_FILTERS + [include("gles31-rotation.txt")]),
Configuration(name = "rotate-reverse-portrait",
glconfig = "rgba8888d24s8ms0",
rotation = "180",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES31_COMMON_FILTERS + [include("gles31-rotation.txt")]),
Configuration(name = "rotate-reverse-landscape",
glconfig = "rgba8888d24s8ms0",
rotation = "270",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES31_COMMON_FILTERS + [include("gles31-rotation.txt")]),
# MSAA
Configuration(name = "multisample",
glconfig = "rgba8888d24s8ms4",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
filters = MASTER_GLES31_COMMON_FILTERS + [include("gles31-multisample.txt")]),
# Pixel format
Configuration(name = "565-no-depth-no-stencil",
glconfig = "rgb565d0s0ms0",
rotation = "unspecified",
surfacewidth = "256",
surfaceheight = "256",
os = "android",
filters = MASTER_GLES31_COMMON_FILTERS + [include("gles31-pixelformat.txt")]),
])
GLCTS_GLES2_KHR_PKG_1CFG = Package(module = ES2KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles2-khr-master.txt")]),
])
GLCTS_GLES2_DEQP_PKG_1CFG = Package(module = ES2CTS_MODULE, configurations = [
# Master
Configuration(name = "deqp-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles2-deqp-master.txt")]),
])
GLCTS_GLES2_GTF_PKG_1CFG = Package(module = ES2GTF_MODULE, configurations = [
# Master
Configuration(name = "gtf-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles2-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = [include("gles2-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "64",
surfaceheight = "-1",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = [include("gles2-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "-1",
surfaceheight = "64",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = [include("gles2-gtf-master.txt")]),
Configuration(name = "gtf-egl",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles2-gtf-egl.txt")]),
Configuration(name = "gtf-egl",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = [include("gles2-gtf-egl.txt")]),
])
GLCTS_GLES2_KHR_PKG_N1CFG = Package(module = ES2KHR_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles2-khr-master.txt")]),
])
GLCTS_GLES2_DEQP_PKG_N1CFG = Package(module = ES2CTS_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "deqp-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles2-deqp-master.txt")]),
])
GLCTS_GLES2_GTF_PKG_N1CFG = Package(module = ES2GTF_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "gtf-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles2-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = [include("gles2-gtf-master.txt")]),
])
GLCTS_GLES3_DEQP_PKG_1CFG = Package(module = ES3CTS_MODULE, configurations = [
# Master
Configuration(name = "deqp-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles3-deqp-master.txt")]),
])
GLCTS_GLES3_KHR_PKG_1CFG = Package(module = ES3KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles3-khr-master.txt")]),
])
GLCTS_GLES3_GTF_PKG_1CFG = Package(module = ES3GTF_MODULE, configurations = [
# Master
Configuration(name = "gtf-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles3-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = [include("gles3-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "64",
surfaceheight = "-1",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = [include("gles3-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "-1",
surfaceheight = "64",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = [include("gles3-gtf-master.txt")]),
])
GLCTS_GLES3_DEQP_PKG_N1CFG = Package(module = ES3CTS_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "deqp-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles3-deqp-master.txt")]),
])
GLCTS_GLES3_KHR_PKG_N1CFG = Package(module = ES3KHR_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles3-khr-master.txt")]),
])
GLCTS_GLES3_GTF_PKG_N1CFG = Package(module = ES3GTF_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "gtf-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles3-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = [include("gles3-gtf-master.txt")]),
])
GLCTS_GLES31_DEQP_PKG_1CFG = Package(module = ES31CTS_MODULE, configurations = [
# Master
Configuration(name = "deqp-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles31-deqp-master.txt")]),
])
GLCTS_GLES31_KHR_PKG_1CFG = Package(module = ES31KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles31-khr-master.txt")]),
])
GLCTS_GLES31_GTF_PKG_1CFG = Package(module = ES31GTF_MODULE, configurations = [
# Master
Configuration(name = "gtf-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles31-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = [include("gles31-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "64",
surfaceheight = "-1",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = [include("gles31-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "-1",
surfaceheight = "64",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = [include("gles31-gtf-master.txt")]),
])
GLCTS_GLES31_KHR_PKG_N1CFG = Package(module = ES31KHR_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles31-khr-master.txt")]),
])
GLCTS_GLES31_DEQP_PKG_N1CFG = Package(module = ES31CTS_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "deqp-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles31-deqp-master.txt")]),
])
GLCTS_GLES31_GTF_PKG_N1CFG = Package(module = ES31GTF_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "gtf-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles31-gtf-master.txt")]),
Configuration(name = "gtf-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = [include("gles31-gtf-master.txt")]),
])
MASTER_GLES32_COMMON_FILTERS = [
include("gles32-khr-master.txt"),
exclude("gles32-khr-test-issues.txt"),
exclude("gles32-khr-spec-issues.txt")
]
GLCTS_GLES32_KHR_PKG_1CFG = Package(module = ES32KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = MASTER_GLES32_COMMON_FILTERS),
Configuration(name = "khr-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = MASTER_GLES32_COMMON_FILTERS),
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "-1",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = MASTER_GLES32_COMMON_FILTERS),
Configuration(name = "khr-master",
surfacewidth = "-1",
surfaceheight = "64",
baseseed = "3",
fboconfig = "rgba8888d24s8",
filters = MASTER_GLES32_COMMON_FILTERS),
])
GLCTS_GLES32_KHR_PKG_N1CFG = Package(module = ES32KHR_MODULE, useforfirsteglconfig = False, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = MASTER_GLES32_COMMON_FILTERS),
Configuration(name = "khr-master",
surfacewidth = "113",
surfaceheight = "47",
baseseed = "2",
filters = MASTER_GLES32_COMMON_FILTERS),
])
GLCTS_NOCTX_ES2_KHR_PKG = Package(module = NOCTX_ES2_KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-noctx-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles2-khr-master.txt")]),
])
GLCTS_NOCTX_ES32_KHR_PKG = Package(module = NOCTX_ES32_KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-noctx-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = MASTER_GLES32_COMMON_FILTERS),
])
GLCTS_SINGLE_ES32_KHR_PKG = Package(module = SINGLE_ES32_KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-single",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gles32-khr-single.txt")]),
])
ES_MUSTPASS_LISTS = [
# 3.2.2.x
Mustpass(project = CTS_KHR_MP_ES_PROJECT, version = "3.2.2.x", isCurrent=False,
packages = [GLCTS_GLES2_KHR_PKG_1CFG,
GLCTS_GLES2_DEQP_PKG_1CFG,
GLCTS_GLES2_GTF_PKG_1CFG,
GLCTS_GLES2_KHR_PKG_N1CFG,
GLCTS_GLES2_DEQP_PKG_N1CFG,
GLCTS_GLES2_GTF_PKG_N1CFG,
GLCTS_GLES3_KHR_PKG_1CFG,
GLCTS_GLES3_DEQP_PKG_1CFG,
GLCTS_GLES3_GTF_PKG_1CFG,
GLCTS_GLES3_KHR_PKG_N1CFG,
GLCTS_GLES3_DEQP_PKG_N1CFG,
GLCTS_GLES3_GTF_PKG_N1CFG,
GLCTS_GLES31_KHR_PKG_1CFG,
GLCTS_GLES31_DEQP_PKG_1CFG,
GLCTS_GLES31_GTF_PKG_1CFG,
GLCTS_GLES31_KHR_PKG_N1CFG,
GLCTS_GLES31_DEQP_PKG_N1CFG,
GLCTS_GLES31_GTF_PKG_N1CFG,
GLCTS_GLES32_KHR_PKG_1CFG,
GLCTS_GLES32_KHR_PKG_N1CFG,
]),
Mustpass(project = CTS_AOSP_MP_ES_PROJECT, version = "3.2.2.x", isCurrent=False,
packages = [GLCTS_GLES2_PKG, GLCTS_3_2_2_GLES3_PKG, GLCTS_3_2_2_GLES31_PKG]),
# 3.2.3.x
Mustpass(project = CTS_KHR_MP_ES_PROJECT, version = "3.2.3.x", isCurrent=False,
packages = [GLCTS_GLES2_KHR_PKG_1CFG,
GLCTS_GLES2_GTF_PKG_1CFG,
GLCTS_GLES2_KHR_PKG_N1CFG,
GLCTS_GLES2_GTF_PKG_N1CFG,
GLCTS_GLES3_KHR_PKG_1CFG,
GLCTS_GLES3_GTF_PKG_1CFG,
GLCTS_GLES3_KHR_PKG_N1CFG,
GLCTS_GLES3_GTF_PKG_N1CFG,
GLCTS_GLES31_KHR_PKG_1CFG,
GLCTS_GLES31_GTF_PKG_1CFG,
GLCTS_GLES31_KHR_PKG_N1CFG,
GLCTS_GLES31_GTF_PKG_N1CFG,
GLCTS_3_2_3_GLES32_KHR_PKG_1CFG,
GLCTS_3_2_3_GLES32_KHR_PKG_N1CFG,
]),
Mustpass(project = CTS_AOSP_MP_ES_PROJECT, version = "3.2.3.x", isCurrent=False,
packages = [GLCTS_3_2_3_GLES2_PKG, GLCTS_3_2_3_GLES3_PKG, GLCTS_3_2_3_GLES31_PKG]),
Mustpass(project = CTS_AOSP_MP_EGL_PROJECT, version = "3.2.3.x", isCurrent=False,
packages = [GLCTS_3_2_3_EGL_PKG]),
# 3.2.4.x
Mustpass(project = CTS_KHR_MP_ES_PROJECT, version = "3.2.4.x", isCurrent=False,
packages = [GLCTS_GLES2_KHR_PKG_1CFG,
GLCTS_GLES2_KHR_PKG_N1CFG,
GLCTS_GLES3_KHR_PKG_1CFG,
GLCTS_GLES3_KHR_PKG_N1CFG,
GLCTS_GLES31_KHR_PKG_1CFG,
GLCTS_GLES31_KHR_PKG_N1CFG,
GLCTS_3_2_3_GLES32_KHR_PKG_1CFG,
GLCTS_3_2_3_GLES32_KHR_PKG_N1CFG,
]),
Mustpass(project = CTS_KHR_MP_NOCTX_ES_PROJECT, version = "3.2.4.x", isCurrent=False,
packages = [GLCTS_NOCTX_ES2_KHR_PKG, GLCTS_NOCTX_ES32_KHR_PKG]),
Mustpass(project = CTS_AOSP_MP_ES_PROJECT, version = "3.2.4.x", isCurrent=False,
packages = [GLCTS_3_2_3_GLES2_PKG, GLCTS_3_2_3_GLES3_PKG, GLCTS_3_2_3_GLES31_PKG]),
Mustpass(project = CTS_AOSP_MP_EGL_PROJECT, version = "3.2.4.x", isCurrent=False,
packages = [GLCTS_3_2_3_EGL_PKG]),
# 3.2.5.x
Mustpass(project = CTS_KHR_MP_ES_PROJECT, version = "3.2.5.x", isCurrent=False,
packages = [GLCTS_GLES2_KHR_PKG_1CFG,
GLCTS_GLES2_KHR_PKG_N1CFG,
GLCTS_GLES3_KHR_PKG_1CFG,
GLCTS_GLES3_KHR_PKG_N1CFG,
GLCTS_GLES31_KHR_PKG_1CFG,
GLCTS_GLES31_KHR_PKG_N1CFG,
GLCTS_GLES32_KHR_PKG_1CFG,
GLCTS_GLES32_KHR_PKG_N1CFG,
]),
Mustpass(project = CTS_KHR_MP_NOCTX_ES_PROJECT, version = "3.2.5.x", isCurrent=False,
packages = [GLCTS_NOCTX_ES2_KHR_PKG, GLCTS_NOCTX_ES32_KHR_PKG]),
Mustpass(project = CTS_AOSP_MP_ES_PROJECT, version = "3.2.5.x", isCurrent=False,
packages = [GLCTS_3_2_3_GLES2_PKG, GLCTS_3_2_3_GLES3_PKG, GLCTS_3_2_3_GLES31_PKG]),
Mustpass(project = CTS_AOSP_MP_EGL_PROJECT, version = "3.2.5.x", isCurrent=False,
packages = [GLCTS_3_2_3_EGL_PKG]),
# 3.2.6.x
Mustpass(project = CTS_KHR_MP_ES_PROJECT, version = "3.2.6.x", isCurrent=True,
packages = [GLCTS_GLES2_KHR_PKG_1CFG,
GLCTS_GLES2_KHR_PKG_N1CFG,
GLCTS_GLES3_KHR_PKG_1CFG,
GLCTS_GLES3_KHR_PKG_N1CFG,
GLCTS_GLES31_KHR_PKG_1CFG,
GLCTS_GLES31_KHR_PKG_N1CFG,
GLCTS_GLES32_KHR_PKG_1CFG,
GLCTS_GLES32_KHR_PKG_N1CFG,
]),
Mustpass(project = CTS_KHR_MP_NOCTX_ES_PROJECT, version = "3.2.6.x", isCurrent=True,
packages = [GLCTS_NOCTX_ES2_KHR_PKG, GLCTS_NOCTX_ES32_KHR_PKG]),
Mustpass(project = CTS_KHR_MP_SINGLE_ES_PROJECT, version = "3.2.6.x", isCurrent=True,
packages = [GLCTS_SINGLE_ES32_KHR_PKG]),
Mustpass(project = CTS_AOSP_MP_ES_PROJECT, version = "3.2.6.x", isCurrent=True,
packages = [GLCTS_3_2_3_GLES2_PKG, GLCTS_3_2_3_GLES3_PKG, GLCTS_3_2_3_GLES31_PKG]),
Mustpass(project = CTS_AOSP_MP_EGL_PROJECT, version = "3.2.6.x", isCurrent=True,
packages = [GLCTS_3_2_3_EGL_PKG]),
# master
Mustpass(project = CTS_KHR_MP_ES_PROJECT, version = "master", isCurrent=False,
packages = [GLCTS_GLES2_KHR_PKG_1CFG,
GLCTS_GLES2_KHR_PKG_N1CFG,
GLCTS_GLES3_KHR_PKG_1CFG,
GLCTS_GLES3_KHR_PKG_N1CFG,
GLCTS_GLES31_KHR_PKG_1CFG,
GLCTS_GLES31_KHR_PKG_N1CFG,
GLCTS_GLES32_KHR_PKG_1CFG,
GLCTS_GLES32_KHR_PKG_N1CFG,
]),
Mustpass(project = CTS_KHR_MP_NOCTX_ES_PROJECT, version = "master", isCurrent=False,
packages = [GLCTS_NOCTX_ES2_KHR_PKG, GLCTS_NOCTX_ES32_KHR_PKG]),
Mustpass(project = CTS_KHR_MP_SINGLE_ES_PROJECT, version = "master", isCurrent=False,
packages = [GLCTS_SINGLE_ES32_KHR_PKG]),
Mustpass(project = CTS_AOSP_MP_ES_PROJECT, version = "master", isCurrent=False,
packages = [MASTER_GLES2_PKG, MASTER_GLES3_PKG, MASTER_GLES31_PKG]),
Mustpass(project = CTS_AOSP_MP_EGL_PROJECT, version = "master", isCurrent=False,
packages = [MASTER_EGL_PKG])
]
ES_BUILD_CONFIG = BuildConfig(buildPath, "Debug", ["-DDEQP_TARGET=%s" % DEFAULT_TARGET, "-DGLCTS_GTF_TARGET=gles32"])
#-------------------------------------------------- GL MUSTPASS----------------------------------------------------------------------
GL_CTS_MP_INC_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "modules", "runner")
GL_CTS_KHR_MP_DATA_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "data", "mustpass", "gl", "khronos_mustpass")
GL_CTS_KHR_MP_DEVICE_DIR = "gl_cts/data/mustpass/gl/khronos_mustpass"
GL_CTS_KHR_MP_PROJECT = Project(name = "Khronos Mustpass GL", path = GL_CTS_KHR_MP_DATA_DIR, incpath = GL_CTS_MP_INC_DIR, devicepath = GL_CTS_KHR_MP_DEVICE_DIR, copyright = COPYRIGHT_DECLARATION)
GL_CTS_KHR_MP_NOCTX_DATA_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "data", "mustpass", "gl", "khronos_mustpass_noctx")
GL_CTS_KHR_MP_NOCTX_DEVICE_DIR = "gl_cts/data/mustpass/gl/khronos_mustpass_noctx"
GL_CTS_NOCTX_PROJECT = Project(name = "Khronos Mustpass GL NoContext", path = GL_CTS_KHR_MP_NOCTX_DATA_DIR, incpath = GL_CTS_MP_INC_DIR, devicepath = GL_CTS_KHR_MP_NOCTX_DEVICE_DIR, copyright = COPYRIGHT_DECLARATION)
GL_CTS_KHR_MP_SINGLE_DATA_DIR = os.path.join(DEQP_DIR, "external", "openglcts", "data", "mustpass", "gl", "khronos_mustpass_single")
GL_CTS_KHR_MP_SINGLE_DEVICE_DIR = "gl_cts/data/mustpass/gl/khronos_mustpass_single"
GL_CTS_KHR_SINGLE_PROJECT = Project(name = "Khronos Mustpass GL Single Config", path = GL_CTS_KHR_MP_SINGLE_DATA_DIR, incpath = GL_CTS_MP_INC_DIR, devicepath = GL_CTS_KHR_MP_SINGLE_DEVICE_DIR, copyright = COPYRIGHT_DECLARATION)
GL_MODULES = OrderedDict([
('KHR-GL46', ['master', [include('gl46-master.txt'), exclude('gl46-test-issues.txt')]]),
('KHR-GL45', ['master', [include('gl45-master.txt'), exclude('gl45-test-issues.txt')]]),
('KHR-GL44', ['master', [include('gl44-master.txt'), exclude('gl44-test-issues.txt')]]),
('KHR-GL43', ['master', [include('gl43-master.txt'), exclude('gl43-test-issues.txt')]]),
('KHR-GL42', ['master', [include('gl42-master.txt'), exclude('gl42-test-issues.txt')]]),
('KHR-GL41', ['master', [include('gl41-master.txt'), exclude('gl41-test-issues.txt')]]),
('KHR-GL40', ['master', [include('gl40-master.txt'), exclude('gl40-test-issues.txt')]]),
('KHR-GL33', ['master', [include('gl33-master.txt'), exclude('gl33-test-issues.txt')]]),
('KHR-GL32', ['master', [include('gl32-master.txt'), exclude('gl32-test-issues.txt')]]),
('KHR-GL31', ['master', [include('gl31-master.txt'), exclude('gl31-test-issues.txt')]]),
('KHR-GL30', ['master', [include('gl30-master.txt'), exclude('gl30-test-issues.txt')]]),
('GTF-GL46', ['gtf-master', [include('gl46-gtf-master.txt')]]),
('GTF-GL45', ['gtf-master', [include('gl45-gtf-master.txt')]]),
('GTF-GL44', ['gtf-master', [include('gl44-gtf-master.txt')]]),
('GTF-GL43', ['gtf-master', [include('gl43-gtf-master.txt')]]),
('GTF-GL42', ['gtf-master', [include('gl42-gtf-master.txt')]]),
('GTF-GL41', ['gtf-master', [include('gl41-gtf-master.txt')]]),
('GTF-GL40', ['gtf-master', [include('gl40-gtf-master.txt')]]),
('GTF-GL33', ['gtf-master', [include('gl33-gtf-master.txt')]]),
('GTF-GL32', ['gtf-master', [include('gl32-gtf-master.txt')]]),
('GTF-GL31', ['gtf-master', [include('gl31-gtf-master.txt')]]),
('GTF-GL30', ['gtf-master', [include('gl30-gtf-master.txt')]])
])
NOCTX_GL30_KHR_MODULE = getModuleByName("KHR-NOCTX-GL30")
NOCTX_GL40_KHR_MODULE = getModuleByName("KHR-NOCTX-GL40")
NOCTX_GL43_KHR_MODULE = getModuleByName("KHR-NOCTX-GL43")
NOCTX_GL45_KHR_MODULE = getModuleByName("KHR-NOCTX-GL45")
SINGLE_GL45_KHR_MODULE = getModuleByName("KHR-Single-GL45")
SINGLE_GL46_KHR_MODULE = getModuleByName("KHR-Single-GL46")
GLCTS_NOCTX_GL30_KHR_PKG = Package(module = NOCTX_GL30_KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gl30-khr-master.txt")]),
])
GLCTS_NOCTX_GL40_KHR_PKG = Package(module = NOCTX_GL40_KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gl40-khr-master.txt")]),
])
GLCTS_NOCTX_GL43_KHR_PKG = Package(module = NOCTX_GL43_KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gl43-khr-master.txt")]),
])
GLCTS_NOCTX_GL45_KHR_PKG = Package(module = NOCTX_GL45_KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-master",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gl45-khr-master.txt")]),
])
GLCTS_SINGLE_GL45_KHR_PKG = Package(module = SINGLE_GL45_KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-single",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gl45-khr-single.txt")]),
])
GLCTS_SINGLE_GL46_KHR_PKG = Package(module = SINGLE_GL46_KHR_MODULE, configurations = [
# Master
Configuration(name = "khr-single",
surfacewidth = "64",
surfaceheight = "64",
baseseed = "1",
filters = [include("gl46-khr-single.txt")]),
])
def generateGLMustpass():
gl_packages = []
for packageName in GL_MODULES:
cfgName = GL_MODULES[packageName][0]
cfgFilter = GL_MODULES[packageName][1]
config_w64xh64 = Configuration(name = cfgName, surfacewidth = "64", surfaceheight = "64", baseseed = "1", filters = cfgFilter)
config_w113xh47 = Configuration(name = cfgName, surfacewidth = "113", surfaceheight = "47", baseseed = "2", filters = cfgFilter)
config_w64 = Configuration(name = cfgName, surfacewidth = "64", surfaceheight = "-1", baseseed = "3", fboconfig = "rgba8888d24s8", filters = cfgFilter)
config_h64 = Configuration(name = cfgName, surfacewidth = "-1", surfaceheight = "64", baseseed = "3", fboconfig = "rgba8888d24s8", filters = cfgFilter)
pkgModule = getModuleByName(packageName)
pkg0 = Package(module = pkgModule,
useforfirsteglconfig = True,
configurations = [
config_w64xh64, config_w113xh47, config_w64, config_h64
]
)
pkg1 = Package(module = pkgModule,
useforfirsteglconfig = False,
configurations = [
config_w64xh64, config_w113xh47,
]
)
gl_packages.append(pkg0)
gl_packages.append(pkg1)
mustpass = [Mustpass(project = GL_CTS_KHR_MP_PROJECT, version = "4.6.0.x", isCurrent=False, packages = gl_packages),
Mustpass(project = GL_CTS_NOCTX_PROJECT, version = "4.6.0.x", isCurrent=False, packages = [GLCTS_NOCTX_GL30_KHR_PKG, GLCTS_NOCTX_GL40_KHR_PKG, GLCTS_NOCTX_GL43_KHR_PKG, GLCTS_NOCTX_GL45_KHR_PKG]),
Mustpass(project = GL_CTS_KHR_MP_PROJECT, version = "4.6.1.x", isCurrent=True, packages = gl_packages),
Mustpass(project = GL_CTS_NOCTX_PROJECT, version = "4.6.1.x", isCurrent=True, packages = [GLCTS_NOCTX_GL30_KHR_PKG, GLCTS_NOCTX_GL40_KHR_PKG, GLCTS_NOCTX_GL43_KHR_PKG, GLCTS_NOCTX_GL45_KHR_PKG]),
Mustpass(project = GL_CTS_KHR_SINGLE_PROJECT, version = "4.6.1.x", isCurrent=True, packages = [GLCTS_SINGLE_GL45_KHR_PKG, GLCTS_SINGLE_GL46_KHR_PKG]),
]
return mustpass
GL_BUILD_CONFIG = BuildConfig(buildPath, "Debug", ["-DDEQP_TARGET=%s" % DEFAULT_TARGET, "-DGLCTS_GTF_TARGET=gl"])
if __name__ == "__main__":
gtfCMakeLists = os.path.join(DEQP_DIR, "external", "kc-cts", "src", "GTF_ES", "CMakeLists.txt")
if os.path.isfile(gtfCMakeLists) == False:
raise Exception("GTF sources not found. GTF module is required to build the mustpass files. 'cd external && python fetch_kc_cts.py'")
genMustpassLists(ES_MUSTPASS_LISTS, ANY_GENERATOR, ES_BUILD_CONFIG)
gl_mustpass_lists = generateGLMustpass()
genMustpassLists(gl_mustpass_lists, ANY_GENERATOR, GL_BUILD_CONFIG)
| |
import os
import sys
from types import ModuleType
import unittest
import warnings
from django.conf import LazySettings, Settings, settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import (SimpleTestCase, TransactionTestCase, TestCase,
modify_settings, override_settings, signals)
from django.utils import six
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3],
TEST='override', TEST_OUTER='outer')
class FullyDecoratedTranTestCase(TransactionTestCase):
available_apps = []
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
self.assertEqual(settings.TEST, 'override')
self.assertEqual(settings.TEST_OUTER, 'outer')
@modify_settings(ITEMS={
'append': ['e', 'f'],
'prepend': ['a'],
'remove': ['d', 'c'],
})
def test_method_list_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'e', 'f'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
@modify_settings(ITEMS={
'append': ['b'],
'prepend': ['d'],
'remove': ['a', 'c', 'e'],
})
def test_method_list_override_no_ops(self):
self.assertListEqual(settings.ITEMS, ['b', 'd'])
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
def test_method_list_override_strings(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
@modify_settings(ITEMS={'remove': ['b', 'd']})
@modify_settings(ITEMS={'append': ['b'], 'prepend': ['d']})
def test_method_list_override_nested_order(self):
self.assertListEqual(settings.ITEMS, ['d', 'c', 'b'])
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
self.assertEqual(settings.TEST_OUTER, 'outer')
def test_decorated_testcase_name(self):
self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase')
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertEqual(settings.TEST, 'override')
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
@override_settings(TEST='override2')
def test_method_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
self.assertEqual(settings.TEST, 'override2')
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
@override_settings(TEST='override')
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
def test_override(self):
self.assertEqual(settings.TEST, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
try:
super(ClassDecoratedTestCase, self).test_max_recursion_error()
except RuntimeError:
self.fail()
@modify_settings(ITEMS={'append': 'mother'})
@override_settings(ITEMS=['father'], TEST='override-parent')
class ParentDecoratedTestCase(TestCase):
pass
@modify_settings(ITEMS={'append': ['child']})
@override_settings(TEST='override-child')
class ChildDecoratedTestCase(ParentDecoratedTestCase):
def test_override_settings_inheritance(self):
self.assertEqual(settings.ITEMS, ['father', 'mother', 'child'])
self.assertEqual(settings.TEST, 'override-child')
class SettingsTests(TestCase):
def setUp(self):
self.testvalue = None
signals.setting_changed.connect(self.signal_callback)
def tearDown(self):
signals.setting_changed.disconnect(self.signal_callback)
def signal_callback(self, sender, setting, value, **kwargs):
if setting == 'TEST':
self.testvalue = value
def test_override(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test2'
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test'
self.assertRaises(AttributeError, getattr, settings, 'TEST')
@override_settings(TEST='override')
def test_decorator(self):
self.assertEqual('override', settings.TEST)
def test_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override = override_settings(TEST='override')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override.enable()
self.assertEqual('override', settings.TEST)
override.disable()
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_class_decorator(self):
# SimpleTestCase can be decorated by override_settings, but not ut.TestCase
class SimpleTestCaseSubclass(SimpleTestCase):
pass
class UnittestTestCaseSubclass(unittest.TestCase):
pass
decorated = override_settings(TEST='override')(SimpleTestCaseSubclass)
self.assertIsInstance(decorated, type)
self.assertTrue(issubclass(decorated, SimpleTestCase))
with six.assertRaisesRegex(self, Exception,
"Only subclasses of Django SimpleTestCase*"):
decorated = override_settings(TEST='override')(UnittestTestCaseSubclass)
def test_signal_callback_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual(self.testvalue, 'override')
self.assertEqual(self.testvalue, None)
@override_settings(TEST='override')
def test_signal_callback_decorator(self):
self.assertEqual(self.testvalue, 'override')
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_settings_delete_wrapped(self):
self.assertRaises(TypeError, delattr, settings, '_wrapped')
def test_override_settings_delete(self):
"""
Allow deletion of a setting in an overridden settings set (#18824)
"""
previous_i18n = settings.USE_I18N
with self.settings(USE_I18N=False):
del settings.USE_I18N
self.assertRaises(AttributeError, getattr, settings, 'USE_I18N')
self.assertEqual(settings.USE_I18N, previous_i18n)
def test_override_settings_nested(self):
"""
Test that override_settings uses the actual _wrapped attribute at
runtime, not when it was instantiated.
"""
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
inner = override_settings(TEST2='override')
with override_settings(TEST='override'):
self.assertEqual('override', settings.TEST)
with inner:
self.assertEqual('override', settings.TEST)
self.assertEqual('override', settings.TEST2)
# inner's __exit__ should have restored the settings of the outer
# context manager, not those when the class was instantiated
self.assertEqual('override', settings.TEST)
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
def test_allowed_include_roots_string(self):
"""
ALLOWED_INCLUDE_ROOTS is not allowed to be incorrectly set to a string
rather than a tuple.
"""
self.assertRaises(ValueError, setattr, settings,
'ALLOWED_INCLUDE_ROOTS', '/var/www/ssi/')
class TestComplexSettingOverride(TestCase):
def setUp(self):
self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy()
signals.COMPLEX_OVERRIDE_SETTINGS.add('TEST_WARN')
def tearDown(self):
signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings
self.assertFalse('TEST_WARN' in signals.COMPLEX_OVERRIDE_SETTINGS)
def test_complex_override_warning(self):
"""Regression test for #19031"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with override_settings(TEST_WARN='override'):
self.assertEqual(settings.TEST_WARN, 'override')
self.assertEqual(len(w), 1)
# File extension may by .py, .pyc, etc. Compare only basename.
self.assertEqual(os.path.splitext(w[0].filename)[0],
os.path.splitext(__file__)[0])
self.assertEqual(str(w[0].message),
'Overriding setting TEST_WARN can lead to unexpected behavior.')
class TrailingSlashURLTests(TestCase):
"""
Tests for the MEDIA_URL and STATIC_URL settings.
They must end with a slash to ensure there's a deterministic way to build
paths in templates.
"""
settings_module = settings
def setUp(self):
self._original_media_url = self.settings_module.MEDIA_URL
self._original_static_url = self.settings_module.STATIC_URL
def tearDown(self):
self.settings_module.MEDIA_URL = self._original_media_url
self.settings_module.STATIC_URL = self._original_static_url
def test_blank(self):
"""
The empty string is accepted, even though it doesn't end in a slash.
"""
self.settings_module.MEDIA_URL = ''
self.assertEqual('', self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = ''
self.assertEqual('', self.settings_module.STATIC_URL)
def test_end_slash(self):
"""
It works if the value ends in a slash.
"""
self.settings_module.MEDIA_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/'
self.assertEqual('http://media.foo.com/',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/'
self.assertEqual('http://static.foo.com/',
self.settings_module.STATIC_URL)
def test_no_end_slash(self):
"""
An ImproperlyConfigured exception is raised if the value doesn't end
in a slash.
"""
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = 'http://media.foo.com'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = 'http://static.foo.com'
def test_double_slash(self):
"""
If the value ends in more than one slash, presume they know what
they're doing.
"""
self.settings_module.MEDIA_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/stupid//'
self.assertEqual('http://media.foo.com/stupid//',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/stupid//'
self.assertEqual('http://static.foo.com/stupid//',
self.settings_module.STATIC_URL)
class SecureProxySslHeaderTest(TestCase):
settings_module = settings
def setUp(self):
self._original_setting = self.settings_module.SECURE_PROXY_SSL_HEADER
def tearDown(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = self._original_setting
def test_none(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = None
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_without_xheader(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_wrong(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue'
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_right(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https'
self.assertEqual(req.is_secure(), True)
class IsOverriddenTest(TestCase):
def test_configure(self):
s = LazySettings()
s.configure(SECRET_KEY='foo')
self.assertTrue(s.is_overridden('SECRET_KEY'))
def test_module(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
sys.modules['fake_settings_module'] = settings_module
try:
s = Settings('fake_settings_module')
self.assertTrue(s.is_overridden('SECRET_KEY'))
self.assertFalse(s.is_overridden('TEMPLATE_LOADERS'))
finally:
del sys.modules['fake_settings_module']
def test_override(self):
self.assertFalse(settings.is_overridden('TEMPLATE_LOADERS'))
with override_settings(TEMPLATE_LOADERS=[]):
self.assertTrue(settings.is_overridden('TEMPLATE_LOADERS'))
| |
"""
Functions for querying and modifying a user account and the groups to which it
belongs.
"""
import ctypes
import getpass
import logging
import os
import sys
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.decorators.jinja import jinja_filter
# Conditional imports
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
try:
import grp
HAS_GRP = True
except ImportError:
HAS_GRP = False
try:
import pysss
HAS_PYSSS = True
except ImportError:
HAS_PYSSS = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
log = logging.getLogger(__name__)
def get_user():
"""
Get the current user
"""
if HAS_PWD:
ret = pwd.getpwuid(os.geteuid()).pw_name
elif HAS_WIN_FUNCTIONS and salt.utils.win_functions.HAS_WIN32:
ret = salt.utils.win_functions.get_current_user()
else:
raise CommandExecutionError(
"Required external library (pwd or win32api) not installed"
)
return salt.utils.stringutils.to_unicode(ret)
@jinja_filter("get_uid")
def get_uid(user=None):
"""
Get the uid for a given user name. If no user given, the current euid will
be returned. If the user does not exist, None will be returned. On systems
which do not support pwd or os.geteuid, None will be returned.
"""
if not HAS_PWD:
return None
elif user is None:
try:
return os.geteuid()
except AttributeError:
return None
else:
try:
return pwd.getpwnam(user).pw_uid
except KeyError:
return None
def _win_user_token_is_admin(user_token):
"""
Using the win32 api, determine if the user with token 'user_token' has
administrator rights.
See MSDN entry here:
http://msdn.microsoft.com/en-us/library/aa376389(VS.85).aspx
"""
class SID_IDENTIFIER_AUTHORITY(ctypes.Structure):
_fields_ = [
("byte0", ctypes.c_byte),
("byte1", ctypes.c_byte),
("byte2", ctypes.c_byte),
("byte3", ctypes.c_byte),
("byte4", ctypes.c_byte),
("byte5", ctypes.c_byte),
]
nt_authority = SID_IDENTIFIER_AUTHORITY()
nt_authority.byte5 = 5
SECURITY_BUILTIN_DOMAIN_RID = 0x20
DOMAIN_ALIAS_RID_ADMINS = 0x220
administrators_group = ctypes.c_void_p()
if (
ctypes.windll.advapi32.AllocateAndInitializeSid(
ctypes.byref(nt_authority),
2,
SECURITY_BUILTIN_DOMAIN_RID,
DOMAIN_ALIAS_RID_ADMINS,
0,
0,
0,
0,
0,
0,
ctypes.byref(administrators_group),
)
== 0
):
raise Exception("AllocateAndInitializeSid failed")
try:
is_admin = ctypes.wintypes.BOOL()
if (
ctypes.windll.advapi32.CheckTokenMembership(
user_token, administrators_group, ctypes.byref(is_admin)
)
== 0
):
raise Exception("CheckTokenMembership failed")
return is_admin.value != 0
finally:
ctypes.windll.advapi32.FreeSid(administrators_group)
def _win_current_user_is_admin():
"""
ctypes.windll.shell32.IsUserAnAdmin() is intentionally avoided due to this
function being deprecated.
"""
return _win_user_token_is_admin(0)
def get_specific_user():
"""
Get a user name for publishing. If you find the user is "root" attempt to be
more specific
"""
user = get_user()
if salt.utils.platform.is_windows():
if _win_current_user_is_admin():
return "sudo_{}".format(user)
else:
env_vars = ("SUDO_USER",)
if user == "root":
for evar in env_vars:
if evar in os.environ:
return "sudo_{}".format(os.environ[evar])
return user
def chugid(runas, group=None):
"""
Change the current process to belong to the specified user (and the groups
to which it belongs)
"""
uinfo = pwd.getpwnam(runas)
supgroups = []
supgroups_seen = set()
if group:
try:
target_pw_gid = grp.getgrnam(group).gr_gid
except KeyError as err:
raise CommandExecutionError(
"Failed to fetch the GID for {}. Error: {}".format(group, err)
)
else:
target_pw_gid = uinfo.pw_gid
# The line below used to exclude the current user's primary gid.
# However, when root belongs to more than one group
# this causes root's primary group of '0' to be dropped from
# his grouplist. On FreeBSD, at least, this makes some
# command executions fail with 'access denied'.
#
# The Python documentation says that os.setgroups sets only
# the supplemental groups for a running process. On FreeBSD
# this does not appear to be strictly true.
group_list = get_group_dict(runas, include_default=True)
if sys.platform == "darwin":
group_list = {k: v for k, v in group_list.items() if not k.startswith("_")}
for group_name in group_list:
gid = group_list[group_name]
if gid not in supgroups_seen and not supgroups_seen.add(gid):
supgroups.append(gid)
if os.getgid() != target_pw_gid:
try:
os.setgid(target_pw_gid)
except OSError as err:
raise CommandExecutionError(
"Failed to change from gid {} to {}. Error: {}".format(
os.getgid(), target_pw_gid, err
)
)
# Set supplemental groups
if sorted(os.getgroups()) != sorted(supgroups):
try:
os.setgroups(supgroups)
except OSError as err:
raise CommandExecutionError(
"Failed to set supplemental groups to {}. Error: {}".format(
supgroups, err
)
)
if os.getuid() != uinfo.pw_uid:
try:
os.setuid(uinfo.pw_uid)
except OSError as err:
raise CommandExecutionError(
"Failed to change from uid {} to {}. Error: {}".format(
os.getuid(), uinfo.pw_uid, err
)
)
def chugid_and_umask(runas, umask, group=None):
"""
Helper method for for subprocess.Popen to initialise uid/gid and umask
for the new process.
"""
set_runas = False
set_grp = False
current_user = getpass.getuser()
current_grp = grp.getgrgid(pwd.getpwnam(getpass.getuser()).pw_gid).gr_name
if runas and runas != current_user:
set_runas = True
runas_user = runas
else:
runas_user = current_user
if group:
runas_grp = group
if group != current_grp:
set_grp = True
else:
if runas and runas != current_user:
runas_grp = grp.getgrgid(pwd.getpwnam(runas_user).pw_gid).gr_name
set_grp = True
else:
runas_grp = current_grp
if set_runas or set_grp:
chugid(runas_user, runas_grp)
if umask is not None:
os.umask(umask) # pylint: disable=blacklisted-function
def get_default_group(user):
"""
Returns the specified user's default group. If the user doesn't exist, a
KeyError will be raised.
"""
return (
grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name if HAS_GRP and HAS_PWD else None
)
def get_group_list(user, include_default=True):
"""
Returns a list of all of the system group names of which the user
is a member.
"""
if HAS_GRP is False or HAS_PWD is False:
return []
group_names = None
ugroups = set()
if hasattr(os, "getgrouplist"):
# Try os.getgrouplist, available in python >= 3.3
log.trace("Trying os.getgrouplist for '%s'", user)
try:
group_names = [
grp.getgrgid(grpid).gr_name
for grpid in os.getgrouplist(user, pwd.getpwnam(user).pw_gid)
]
except Exception: # pylint: disable=broad-except
pass
elif HAS_PYSSS:
# Try pysss.getgrouplist
log.trace("Trying pysss.getgrouplist for '%s'", user)
try:
group_names = list(pysss.getgrouplist(user))
except Exception: # pylint: disable=broad-except
pass
if group_names is None:
# Fall back to generic code
# Include the user's default group to match behavior of
# os.getgrouplist() and pysss.getgrouplist()
log.trace("Trying generic group list for '%s'", user)
group_names = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
try:
default_group = get_default_group(user)
if default_group not in group_names:
group_names.append(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
if group_names is not None:
ugroups.update(group_names)
if include_default is False:
# Historically, saltstack code for getting group lists did not
# include the default group. Some things may only want
# supplemental groups, so include_default=False omits the users
# default group.
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
ugroups.remove(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
log.trace("Group list for user '%s': %s", user, sorted(ugroups))
return sorted(ugroups)
def get_group_dict(user=None, include_default=True):
"""
Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27}
"""
if HAS_GRP is False or HAS_PWD is False:
return {}
group_dict = {}
group_names = get_group_list(user, include_default=include_default)
for group in group_names:
group_dict.update({group: grp.getgrnam(group).gr_gid})
return group_dict
def get_gid_list(user, include_default=True):
"""
Returns a list of all of the system group IDs of which the user
is a member.
"""
if HAS_GRP is False or HAS_PWD is False:
return []
gid_list = list(get_group_dict(user, include_default=include_default).values())
return sorted(set(gid_list))
def get_gid(group=None):
"""
Get the gid for a given group name. If no group given, the current egid
will be returned. If the group does not exist, None will be returned. On
systems which do not support grp or os.getegid it will return None.
"""
if not HAS_GRP:
return None
if group is None:
try:
return os.getegid()
except AttributeError:
return None
else:
try:
return grp.getgrnam(group).gr_gid
except KeyError:
return None
| |
# coding=utf-8
# Copyright 2017 The DLT2T Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoding utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import os
# Dependency imports
import numpy as np
import six
from six.moves import input # pylint: disable=redefined-builtin
from DLT2T.data_generators import text_encoder
from DLT2T.utils import devices
from DLT2T.utils import input_fn_builder
import tensorflow as tf
FLAGS = tf.flags.FLAGS
# Number of samples to draw for an image input (in such cases as captioning)
IMAGE_DECODE_LENGTH = 100
def decode_hparams(overrides=""):
"""Hyperparameters for decoding."""
hp = tf.contrib.training.HParams(
use_last_position_only=False,
save_images=False,
problem_idx=0,
extra_length=50,
batch_size=0,
beam_size=4,
alpha=0.6,
return_beams=False,
max_input_size=-1,
identity_output=False,
num_samples=-1,
delimiter="\n")
hp = hp.parse(overrides)
return hp
def log_decode_results(inputs,
outputs,
problem_name,
prediction_idx,
inputs_vocab,
targets_vocab,
targets=None,
save_images=False,
model_dir=None,
identity_output=False):
"""Log inference results."""
if "image" in problem_name and save_images:
save_path = os.path.join(model_dir, "%s_prediction_%d.jpg" %
(problem_name, prediction_idx))
show_and_save_image(inputs / 255., save_path)
elif inputs_vocab:
if identity_output:
decoded_inputs = " ".join(map(str, inputs.flatten()))
else:
decoded_inputs = inputs_vocab.decode(_save_until_eos(inputs.flatten()))
tf.logging.info("Inference results INPUT: %s" % decoded_inputs)
decoded_targets = None
if identity_output:
decoded_outputs = " ".join(map(str, outputs.flatten()))
if targets is not None:
decoded_targets = " ".join(map(str, targets.flatten()))
else:
decoded_outputs = " ".join(
map(str, targets_vocab.decode(_save_until_eos(outputs.flatten()))))
if targets is not None:
decoded_targets = " ".join(
map(str, targets_vocab.decode(_save_until_eos(targets.flatten()))))
tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs)
if targets is not None:
tf.logging.info("Inference results TARGET: %s" % decoded_targets)
return decoded_outputs, decoded_targets
def decode_from_dataset(estimator,
problem_names,
decode_hp,
decode_to_file=None,
dataset_split=None):
tf.logging.info("Performing local inference from dataset for %s.",
str(problem_names))
hparams = estimator.params
for problem_idx, problem_name in enumerate(problem_names):
# Build the inference input function
infer_input_fn = input_fn_builder.build_input_fn(
mode=tf.estimator.ModeKeys.PREDICT,
train_mode=FLAGS.train_mode,
infer_mode=FLAGS.infer_mode,
hparams=hparams,
data_dir=hparams.data_dir,
num_datashards=devices.data_parallelism().n,
fixed_problem=problem_idx,
batch_size=decode_hp.batch_size,
dataset_split=dataset_split)
# Get the predictions as an iterable
predictions = estimator.predict(infer_input_fn)
# Prepare output file writers if decode_to_file passed
if decode_to_file:
output_filepath = _decode_filename(decode_to_file, problem_name,
decode_hp)
parts = output_filepath.split(".")
parts[-1] = "targets"
target_filepath = ".".join(parts)
output_file = tf.gfile.Open(output_filepath, "w")
target_file = tf.gfile.Open(target_filepath, "w")
problem_hparams = hparams.problems[problem_idx]
inputs_vocab = problem_hparams.vocabulary.get("inputs", None)
targets_vocab = problem_hparams.vocabulary["targets"]
for num_predictions, prediction in enumerate(predictions):
num_predictions += 1
inputs = prediction["inputs"]
targets = prediction["targets"]
outputs = prediction["outputs"]
# Log predictions
decoded_outputs = []
if decode_hp.return_beams:
output_beams = np.split(outputs, decode_hp.beam_size, axis=0)
for i, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % i)
decoded = log_decode_results(
inputs,
beam,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
model_dir=estimator.model_dir,
identity_output=decode_hp.identity_output,
targets=targets)
decoded_outputs.append(decoded)
else:
decoded = log_decode_results(
inputs,
outputs,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
model_dir=estimator.model_dir,
identity_output=decode_hp.identity_output,
targets=targets)
decoded_outputs.append(decoded)
# Write out predictions if decode_to_file passed
if decode_to_file:
for decoded_output, decoded_target in decoded_outputs:
output_file.write(str(decoded_output) + decode_hparams.delimiter)
target_file.write(str(decoded_target) + decode_hparams.delimiter)
if (decode_hp.num_samples >= 0 and
num_predictions >= decode_hp.num_samples):
break
if decode_to_file:
output_file.close()
target_file.close()
tf.logging.info("Completed inference on %d samples." % num_predictions) # pylint: disable=undefined-loop-variable
def decode_from_file(estimator, filename, decode_hp, decode_to_file=None):
"""Compute predictions on entries in filename and write them out."""
if not decode_hp.batch_size:
decode_hp.batch_size = 32
tf.logging.info(
"decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size)
hparams = estimator.params
problem_id = decode_hp.problem_idx
inputs_vocab = hparams.problems[problem_id].vocabulary["inputs"]
targets_vocab = hparams.problems[problem_id].vocabulary["targets"]
problem_name = FLAGS.problems.split("-")[problem_id]
tf.logging.info("Performing decoding from a file.")
sorted_inputs, sorted_keys = _get_sorted_inputs(filename, decode_hp.shards,
decode_hp.delimiter)
num_decode_batches = (len(sorted_inputs) - 1) // decode_hp.batch_size + 1
def input_fn():
input_gen = _decode_batch_input_fn(
problem_id, num_decode_batches, sorted_inputs, inputs_vocab,
decode_hp.batch_size, decode_hp.max_input_size)
gen_fn = make_input_fn_from_generator(input_gen)
example = gen_fn()
return _decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = estimator.predict(input_fn)
for result in result_iter:
if decode_hp.return_beams:
beam_decodes = []
output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % k)
decoded_outputs, _ = log_decode_results(result["inputs"], beam,
problem_name, None,
inputs_vocab, targets_vocab)
beam_decodes.append(decoded_outputs)
decodes.append("\t".join(beam_decodes))
else:
decoded_outputs, _ = log_decode_results(result["inputs"],
result["outputs"], problem_name,
None, inputs_vocab, targets_vocab)
decodes.append(decoded_outputs)
# Reversing the decoded inputs and outputs because they were reversed in
# _decode_batch_input_fn
sorted_inputs.reverse()
decodes.reverse()
# Dumping inputs and outputs to file filename.decodes in
# format result\tinput in the same order as original inputs
if decode_to_file:
output_filename = decode_to_file
else:
output_filename = filename
if decode_hp.shards > 1:
base_filename = output_filename + ("%.2d" % FLAGS.worker_id)
else:
base_filename = output_filename
decode_filename = _decode_filename(base_filename, problem_name, decode_hp)
tf.logging.info("Writing decodes into %s" % decode_filename)
outfile = tf.gfile.Open(decode_filename, "w")
for index in range(len(sorted_inputs)):
outfile.write("%s\n" % (decodes[sorted_keys[index]], decode_hp.delimiter))
def _decode_filename(base_filename, problem_name, decode_hp):
return "{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format(
base=base_filename,
model=FLAGS.model,
hp=FLAGS.hparams_set,
problem=problem_name,
beam=str(decode_hp.beam_size),
alpha=str(decode_hp.alpha))
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator."""
first_ex = six.next(gen)
flattened = tf.contrib.framework.nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn
def decode_interactively(estimator, decode_hp):
"""Interactive decoding."""
hparams = estimator.params
def input_fn():
gen_fn = make_input_fn_from_generator(_interactive_input_fn(hparams))
example = gen_fn()
example = _interactive_input_tensor_to_features_dict(example, hparams)
return example
result_iter = estimator.predict(input_fn)
for result in result_iter:
problem_idx = result["problem_choice"]
targets_vocab = hparams.problems[problem_idx].vocabulary["targets"]
if decode_hp.return_beams:
beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(beams):
tf.logging.info("BEAM %d:" % k)
beam_string = targets_vocab.decode(_save_until_eos(beam.flatten()))
if scores is not None:
tf.logging.info("%s\tScore:%f" % (beam_string, scores[k]))
else:
tf.logging.info(beam_string)
else:
if decode_hp.identity_output:
tf.logging.info(" ".join(map(str, result["outputs"].flatten())))
else:
tf.logging.info(
targets_vocab.decode(_save_until_eos(result["outputs"].flatten())))
def _decode_batch_input_fn(problem_id, num_decode_batches, sorted_inputs,
vocabulary, batch_size, max_input_size):
tf.logging.info(" batch %d" % num_decode_batches)
# First reverse all the input sentences so that if you're going to get OOMs,
# you'll see it in the first batch
sorted_inputs.reverse()
for b in range(num_decode_batches):
tf.logging.info("Decoding batch %d" % b)
batch_length = 0
batch_inputs = []
for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]:
input_ids = vocabulary.encode(inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
input_ids.append(text_encoder.EOS_ID)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
x = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(x)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
"problem_choice": np.array(problem_id).astype(np.int32),
}
def _interactive_input_fn(hparams):
"""Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
"""
num_samples = 1
decode_length = 100
input_type = "text"
problem_id = 0
p_hparams = hparams.problems[problem_id]
has_input = "inputs" in p_hparams.input_modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" pr=<problem_num> (set the problem number, default: 0)\n"
" in=<input_problem> (set the input problem number)\n"
" ou=<output_problem> (set the output problem number)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length, "source_string"
if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "pr=":
problem_id = int(input_string[3:])
p_hparams = hparams.problems[problem_id]
has_input = "inputs" in p_hparams.input_modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
elif input_string[:3] == "in=":
problem = int(input_string[3:])
p_hparams.input_modality = hparams.problems[problem].input_modality
p_hparams.input_space_id = hparams.problems[problem].input_space_id
elif input_string[:3] == "ou=":
problem = int(input_string[3:])
p_hparams.target_modality = hparams.problems[problem].target_modality
p_hparams.target_space_id = hparams.problems[problem].target_space_id
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
yield {
"inputs": np.array(x).astype(np.int32),
"problem_choice": np.array(problem_id).astype(np.int32)
}
elif input_type == "image":
input_path = input_string
img = read_image(input_path)
yield {
"inputs": img.astype(np.int32),
"problem_choice": np.array(problem_id).astype(np.int32)
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
yield {
"inputs": np.array(x).astype(np.int32),
"problem_choice": np.array(problem_id).astype(np.int32)
}
else:
raise Exception("Unsupported input type.")
def read_image(path):
try:
import matplotlib.image as im # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Reading an image requires matplotlib to be installed: %s", e)
raise NotImplementedError("Image reading not implemented.")
return im.imread(path)
def show_and_save_image(img, save_path):
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning("Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
plt.imshow(img)
plt.savefig(save_path)
def _get_sorted_inputs(filename, num_shards=1, delimiter="\n"):
"""Returning inputs sorted according to length.
Args:
filename: path to file with inputs, 1 per line.
num_shards: number of input shards. If > 1, will read from file filename.XX,
where XX is FLAGS.worker_id.
delimiter: str, delimits records in the file.
Returns:
a sorted list of inputs
"""
tf.logging.info("Getting sorted inputs")
# read file and sort inputs according them according to input length.
if num_shards > 1:
decode_filename = filename + ("%.2d" % FLAGS.worker_id)
else:
decode_filename = filename
with tf.gfile.Open(decode_filename) as f:
text = f.read()
records = text.split(delimiter)
inputs = [record.strip() for record in records]
input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))
# We'll need the keys to rearrange the inputs back into their original order
sorted_keys = {}
sorted_inputs = []
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return sorted_inputs, sorted_keys
def _save_until_eos(hyp):
"""Strips everything after the first <EOS> token, which is normally 1."""
try:
index = list(hyp).index(text_encoder.EOS_ID)
return hyp[0:index]
except ValueError:
# No EOS_ID: return the array as-is.
return hyp
def _interactive_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: a dictionary with keys `problem_choice` and `input` containing
Tensors.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False if len(inputs.get_shape()) < 3 else True
def input_fn(problem_choice, x=inputs): # pylint: disable=missing-docstring
if input_is_image:
x = tf.image.resize_images(x, [299, 299])
x = tf.reshape(x, [1, 299, 299, -1])
x = tf.to_int32(x)
else:
# Remove the batch dimension.
num_samples = x[0]
length = x[2]
x = tf.slice(x, [3], tf.to_int32([length]))
x = tf.reshape(x, [1, -1, 1, 1])
# Transform into a batch of size num_samples to get that many random
# decodes.
x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))
p_hparams = hparams.problems[problem_choice]
return (tf.constant(p_hparams.input_space_id), tf.constant(
p_hparams.target_space_id), x)
input_space_id, target_space_id, x = input_fn_builder.cond_on_index(
input_fn, feature_map["problem_choice"], len(hparams.problems) - 1)
features = {}
features["problem_choice"] = tf.convert_to_tensor(
feature_map["problem_choice"])
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else inputs[1])
features["inputs"] = x
return features
def _decode_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: a dictionary with keys `problem_choice` and `input` containing
Tensors.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False
def input_fn(problem_choice, x=inputs): # pylint: disable=missing-docstring
p_hparams = hparams.problems[problem_choice]
# Add a third empty dimension dimension
x = tf.expand_dims(x, axis=[2])
x = tf.to_int32(x)
return (tf.constant(p_hparams.input_space_id), tf.constant(
p_hparams.target_space_id), x)
input_space_id, target_space_id, x = input_fn_builder.cond_on_index(
input_fn, feature_map["problem_choice"], len(hparams.problems) - 1)
features = {}
features["problem_choice"] = feature_map["problem_choice"]
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (IMAGE_DECODE_LENGTH
if input_is_image else tf.shape(x)[1] + 50)
features["inputs"] = x
return features
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2012 Samsung SDS Co., LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wrappers around standard crypto data elements.
Includes root and intermediate CAs and SSH key_pairs.
"""
from __future__ import absolute_import
import base64
import hashlib
import os
import string
import Crypto.Cipher.AES
from synaps import context
from synaps import db
from synaps import exception
from synaps import flags
from synaps import log as logging
from synaps.openstack.common import cfg
from synaps import utils
LOG = logging.getLogger(__name__)
crypto_opts = [
cfg.StrOpt('ca_file',
default='cacert.pem',
help=_('Filename of root CA')),
cfg.StrOpt('key_file',
default=os.path.join('private', 'cakey.pem'),
help=_('Filename of private key')),
cfg.StrOpt('crl_file',
default='crl.pem',
help=_('Filename of root Certificate Revocation List')),
cfg.StrOpt('keys_path',
default='$state_path/keys',
help=_('Where we keep our keys')),
cfg.StrOpt('ca_path',
default='$state_path/CA',
help=_('Where we keep our root CA')),
cfg.BoolOpt('use_project_ca',
default=False,
help=_('Should we use a CA for each project?')),
cfg.StrOpt('user_cert_subject',
default='/C=US/ST=California/O=OpenStack/'
'OU=NovaDev/CN=%.16s-%.16s-%s',
help=_('Subject for certificate for users, %s for '
'project, user, timestamp')),
cfg.StrOpt('project_cert_subject',
default='/C=US/ST=California/O=OpenStack/'
'OU=NovaDev/CN=project-ca-%.16s-%s',
help=_('Subject for certificate for projects, %s for '
'project, timestamp')),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(crypto_opts)
def ca_folder(project_id=None):
if FLAGS.use_project_ca and project_id:
return os.path.join(FLAGS.ca_path, 'projects', project_id)
return FLAGS.ca_path
def ca_path(project_id=None):
return os.path.join(ca_folder(project_id), FLAGS.ca_file)
def key_path(project_id=None):
return os.path.join(ca_folder(project_id), FLAGS.key_file)
def crl_path(project_id=None):
return os.path.join(ca_folder(project_id), FLAGS.crl_file)
def fetch_ca(project_id=None):
if not FLAGS.use_project_ca:
project_id = None
with open(ca_path(project_id), 'r') as cafile:
return cafile.read()
def ensure_ca_filesystem():
"""Ensure the CA filesystem exists."""
ca_dir = ca_folder()
if not os.path.exists(ca_path()):
genrootca_sh_path = os.path.join(os.path.dirname(__file__),
'CA',
'genrootca.sh')
start = os.getcwd()
if not os.path.exists(ca_dir):
os.makedirs(ca_dir)
os.chdir(ca_dir)
utils.execute("sh", genrootca_sh_path)
os.chdir(start)
def _generate_fingerprint(public_key_file):
(out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', public_key_file)
fingerprint = out.split(' ')[1]
return fingerprint
def generate_fingerprint(public_key):
with utils.tempdir() as tmpdir:
try:
pubfile = os.path.join(tmpdir, 'temp.pub')
with open(pubfile, 'w') as f:
f.write(public_key)
return _generate_fingerprint(pubfile)
except exception.ProcessExecutionError:
raise exception.InvalidKeypair()
def generate_key_pair(bits=1024):
# what is the magic 65537?
with utils.tempdir() as tmpdir:
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '',
'-t', 'rsa', '-f', keyfile)
fingerprint = _generate_fingerprint('%s.pub' % (keyfile))
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
return (private_key, public_key, fingerprint)
def fetch_crl(project_id):
"""Get crl file for project."""
if not FLAGS.use_project_ca:
project_id = None
with open(crl_path(project_id), 'r') as crlfile:
return crlfile.read()
def decrypt_text(project_id, text):
private_key = key_path(project_id)
if not os.path.exists(private_key):
raise exception.ProjectNotFound(project_id=project_id)
try:
dec, _err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % private_key,
process_input=text)
return dec
except exception.ProcessExecutionError:
raise exception.DecryptionFailure()
def revoke_cert(project_id, file_name):
"""Revoke a cert by file name."""
start = os.getcwd()
os.chdir(ca_folder(project_id))
# NOTE(vish): potential race condition here
utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
file_name)
utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
'-out', FLAGS.crl_file)
os.chdir(start)
def _project_cert_subject(project_id):
"""Helper to generate user cert subject."""
return FLAGS.project_cert_subject % (project_id, utils.isotime())
def _user_cert_subject(user_id, project_id):
"""Helper to generate user cert subject."""
return FLAGS.user_cert_subject % (project_id, user_id, utils.isotime())
def _ensure_project_folder(project_id):
if not os.path.exists(ca_path(project_id)):
geninter_sh_path = os.path.join(os.path.dirname(__file__),
'CA',
'geninter.sh')
start = os.getcwd()
os.chdir(ca_folder())
utils.execute('sh', geninter_sh_path, project_id,
_project_cert_subject(project_id))
os.chdir(start)
def sign_csr(csr_text, project_id=None):
if not FLAGS.use_project_ca:
project_id = None
if not project_id:
return _sign_csr(csr_text, ca_folder())
_ensure_project_folder(project_id)
project_folder = ca_folder(project_id)
return _sign_csr(csr_text, ca_folder(project_id))
def _sign_csr(csr_text, ca_folder):
with utils.tempdir() as tmpdir:
inbound = os.path.join(tmpdir, 'inbound.csr')
outbound = os.path.join(tmpdir, 'outbound.csr')
with open(inbound, 'w') as csrfile:
csrfile.write(csr_text)
LOG.debug(_('Flags path: %s'), ca_folder)
start = os.getcwd()
# Change working dir to CA
if not os.path.exists(ca_folder):
os.makedirs(ca_folder)
os.chdir(ca_folder)
utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
'./openssl.cnf', '-infiles', inbound)
out, _err = utils.execute('openssl', 'x509', '-in', outbound,
'-serial', '-noout')
serial = string.strip(out.rpartition('=')[2])
os.chdir(start)
with open(outbound, 'r') as crtfile:
return (serial, crtfile.read())
def _build_cipher(key, iv):
"""Make a 128bit AES CBC encode/decode Cipher object.
Padding is handled internally."""
return Crypto.Cipher.AES.new(key, IV=iv)
def encryptor(key):
"""Simple symmetric key encryption."""
key = base64.b64decode(key)
iv = '\0' * 16
def encrypt(data):
cipher = _build_cipher(key, iv)
# Must pad string to multiple of 16 chars
padding = (16 - len(data) % 16) * " "
v = cipher.encrypt(data + padding)
del cipher
v = base64.b64encode(v)
return v
return encrypt
def decryptor(key):
"""Simple symmetric key decryption."""
key = base64.b64decode(key)
iv = '\0' * 16
def decrypt(data):
data = base64.b64decode(data)
cipher = _build_cipher(key, iv)
v = cipher.decrypt(data).rstrip()
del cipher
return v
return decrypt
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""Compute an md5 hash.
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
:rtype: tuple
:returns: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
| |
from math import sqrt
import numpy as np
from scipy._lib._util import _validate_int
from scipy.optimize import brentq
from scipy.special import ndtri
from ._discrete_distns import binom
from ._common import ConfidenceInterval
class BinomTestResult:
"""
Result of `scipy.stats.binomtest`.
Attributes
----------
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
Methods
-------
proportion_ci :
Compute the confidence interval for the estimate of the proportion.
"""
def __init__(self, k, n, alternative, pvalue, proportion_estimate):
self.k = k
self.n = n
self.alternative = alternative
self.proportion_estimate = proportion_estimate
self.pvalue = pvalue
def __repr__(self):
s = ("BinomTestResult("
f"k={self.k}, "
f"n={self.n}, "
f"alternative={self.alternative!r}, "
f"proportion_estimate={self.proportion_estimate}, "
f"pvalue={self.pvalue})")
return s
def proportion_ci(self, confidence_level=0.95, method='exact'):
"""
Compute the confidence interval for the estimated proportion.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval
of the estimated proportion. Default is 0.95.
method : {'exact', 'wilson', 'wilsoncc'}, optional
Selects the method used to compute the confidence interval
for the estimate of the proportion:
'exact' :
Use the Clopper-Pearson exact method [1]_.
'wilson' :
Wilson's method, without continuity correction ([2]_, [3]_).
'wilsoncc' :
Wilson's method, with continuity correction ([2]_, [3]_).
Default is ``'exact'``.
Returns
-------
ci : ``ConfidenceInterval`` object
The object has attributes ``low`` and ``high`` that hold the
lower and upper bounds of the confidence interval.
References
----------
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
fiducial limits illustrated in the case of the binomial,
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
.. [2] E. B. Wilson, Probable inference, the law of succession, and
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
(1927).
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
single proportion: comparison of seven methods, Statistics
in Medicine, 17, pp 857-872 (1998).
Examples
--------
>>> from scipy.stats import binomtest
>>> result = binomtest(k=7, n=50, p=0.1)
>>> result.proportion_estimate
0.14
>>> result.proportion_ci()
ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846)
"""
if method not in ('exact', 'wilson', 'wilsoncc'):
raise ValueError("method must be one of 'exact', 'wilson' or "
"'wilsoncc'.")
if not (0 <= confidence_level <= 1):
raise ValueError('confidence_level must be in the interval '
'[0, 1].')
if method == 'exact':
low, high = _binom_exact_conf_int(self.k, self.n,
confidence_level,
self.alternative)
else:
# method is 'wilson' or 'wilsoncc'
low, high = _binom_wilson_conf_int(self.k, self.n,
confidence_level,
self.alternative,
correction=method == 'wilsoncc')
return ConfidenceInterval(low=low, high=high)
def _findp(func):
try:
p = brentq(func, 0, 1)
except RuntimeError:
raise RuntimeError('numerical solver failed to converge when '
'computing the confidence limits') from None
except ValueError as exc:
raise ValueError('brentq raised a ValueError; report this to the '
'SciPy developers') from exc
return p
def _binom_exact_conf_int(k, n, confidence_level, alternative):
"""
Compute the estimate and confidence interval for the binomial test.
Returns proportion, prop_low, prop_high
"""
if alternative == 'two-sided':
alpha = (1 - confidence_level) / 2
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'less':
alpha = 1 - confidence_level
plow = 0.0
if k == n:
phigh = 1.0
else:
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
elif alternative == 'greater':
alpha = 1 - confidence_level
if k == 0:
plow = 0.0
else:
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
phigh = 1.0
return plow, phigh
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
# This function assumes that the arguments have already been validated.
# In particular, `alternative` must be one of 'two-sided', 'less' or
# 'greater'.
p = k / n
if alternative == 'two-sided':
z = ndtri(0.5 + 0.5*confidence_level)
else:
z = ndtri(confidence_level)
# For reference, the formulas implemented here are from
# Newcombe (1998) (ref. [3] in the proportion_ci docstring).
denom = 2*(n + z**2)
center = (2*n*p + z**2)/denom
q = 1 - p
if correction:
if alternative == 'less' or k == 0:
lo = 0.0
else:
dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom
lo = center - dlo
if alternative == 'greater' or k == n:
hi = 1.0
else:
dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom
hi = center + dhi
else:
delta = z/denom * sqrt(4*n*p*q + z**2)
if alternative == 'less' or k == 0:
lo = 0.0
else:
lo = center - delta
if alternative == 'greater' or k == n:
hi = 1.0
else:
hi = center + delta
return lo, hi
def binomtest(k, n, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
The binomial test [1]_ is a test of the null hypothesis that the
probability of success in a Bernoulli experiment is `p`.
Details of the test can be found in many texts on statistics, such
as section 24.5 of [2]_.
Parameters
----------
k : int
The number of successes.
n : int
The number of trials.
p : float, optional
The hypothesized probability of success, i.e. the expected
proportion of successes. The value must be in the interval
``0 <= p <= 1``. The default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
result : `~scipy.stats._result_classes.BinomTestResult` instance
The return value is an object with the following attributes:
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
The object has the following methods:
proportion_ci(confidence_level=0.95, method='exact') :
Compute the confidence interval for ``proportion_estimate``.
Notes
-----
.. versionadded:: 1.7.0
References
----------
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
Examples
--------
>>> from scipy.stats import binomtest
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
>>> result.pvalue
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
The estimated proportion is simply ``3/15``:
>>> result.proportion_estimate
0.2
We can use the `proportion_ci()` method of the result to compute the
confidence interval of the estimate:
>>> result.proportion_ci(confidence_level=0.95)
ConfidenceInterval(low=0.05684686759024681, high=1.0)
"""
k = _validate_int(k, 'k', minimum=0)
n = _validate_int(n, 'n', minimum=1)
if k > n:
raise ValueError('k must not be greater than n.')
if not (0 <= p <= 1):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized; \n"
"must be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = binom.cdf(k, n, p)
elif alternative == 'greater':
pval = binom.sf(k-1, n, p)
else:
# alternative is 'two-sided'
d = binom.pmf(k, n, p)
rerr = 1 + 1e-7
if k == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif k < p * n:
ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p),
-d*rerr, np.ceil(p * n), n)
# y is the number of terms between mode and n that are <= d*rerr.
# ix gave us the first term where a(ix) <= d*rerr < a(ix-1)
# if the first equality doesn't hold, y=n-ix. Otherwise, we
# need to include ix as well as the equality holds. Note that
# the equality will hold in very very rare situations due to rerr.
y = n - ix + int(d*rerr == binom.pmf(ix, n, p))
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
else:
ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p),
d*rerr, 0, np.floor(p * n))
# y is the number of terms between 0 and mode that are <= d*rerr.
# we need to add a 1 to account for the 0 index.
# For comparing this with old behavior, see
# tst_binary_srch_for_binom_tst method in test_morestats.
y = ix + 1
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
pval = min(1.0, pval)
result = BinomTestResult(k=k, n=n, alternative=alternative,
proportion_estimate=k/n, pvalue=pval)
return result
def _binary_search_for_binom_tst(a, d, lo, hi):
"""
Conducts an implicit binary search on a function specified by `a`.
Meant to be used on the binomial PMF for the case of two-sided tests
to obtain the value on the other side of the mode where the tail
probability should be computed. The values on either side of
the mode are always in order, meaning binary search is applicable.
Parameters
----------
a : callable
The function over which to perform binary search. Its values
for inputs lo and hi should be in ascending order.
d : float
The value to search.
lo : int
The lower end of range to search.
hi : int
The higher end of the range to search.
Returns
----------
int
The index, i between lo and hi
such that a(i)<=d<a(i+1)
"""
while lo < hi:
mid = lo + (hi-lo)//2
midval = a(mid)
if midval < d:
lo = mid+1
elif midval > d:
hi = mid-1
else:
return mid
if a(lo) <= d:
return lo
else:
return lo-1
| |
from erlang_python import ErlangPythonServices
from helper import get_host, get_port
from os.path import basename
from thrift import Thrift
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket
from thrift.transport import TTransport
class Dbg(object):
active = False
DEBUGGER = Dbg()
def main():
if DEBUGGER.active:
print("{} - main (): Start".format(basename(__file__)))
# --------------------------------------------------------------------------
# Read network parameters
# --------------------------------------------------------------------------
host = get_host()
port = get_port()
print(("{} - main (): This client will connect to a server with " +
"ip address {} and port number {}").format(basename(__file__), host, port))
# --------------------------------------------------------------------------
# Init thrift connection and protocol handlers
# --------------------------------------------------------------------------
# Make socket
socket = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TBufferedTransport(socket)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = ErlangPythonServices.Client(protocol)
# Connect to server
transport.open()
# --------------------------------------------------------------------------
# XOR Training
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# > annlink:create_neural_network(Conn, [2, 10, 1]).
# {ClientId,<<"A3zfatHw5jIZVsVaNYDKAemgg0qvQ+le">>}
# --------------------------------------------------------------------------
num_inputs = 2
num_outputs = 1
learning_rate = 0.001
model_id = client.initialize_model(num_inputs,
num_outputs,
learning_rate)
size = 10
client.add_layer(model_id,
size)
activation = "sigmoid"
client.add_activation(model_id,
activation)
size = 1
client.add_layer(model_id,
size)
# --------------------------------------------------------------------------
# > Inputs = [[0,0],[0,1],[1,0],[1,1]].
# [[0,0],[0,1],[1,0],[1,1]]
# > Labels = [[0],[1],[1],[0]].
# [[0],[1],[1],[0]]
# > annlink:add_data_chunk(Conn, ClientId, Inputs, Labels).
# ok
# --------------------------------------------------------------------------
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
labels = [[0], [1], [1], [0]]
scale = []
client.add_data_chunk(model_id,
inputs,
labels,
scale)
# --------------------------------------------------------------------------
# > annlink:set_learning_rate(Conn, ClientId, 0.05).
# ok
# --------------------------------------------------------------------------
learning_rate = 0.05
client.set_learning_rate(model_id,
learning_rate)
# --------------------------------------------------------------------------
# > annlink:train(Conn).
# 0.14462602138519287
# --------------------------------------------------------------------------
epochs = 1
batch_size = 512
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
# --------------------------------------------------------------------------
# >[annlink:train(Conn, ClientId, 200) || _ <- lists:seq(1,5)].
# which should produce something close to:
#
# [0.126319688744843,0.05803197836337134,
# 1.3663458995789856e-8,6.92154666914746e-17,
# 6.938893903907228e-18]
# --------------------------------------------------------------------------
epochs = 200
batch_size = 512
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
# --------------------------------------------------------------------------
# >annlink:predict(Conn, ClientId, [[0,0], [0,1], [1,0], [1,1]]).
# [[0.0],[1.0],[1.0],[0.0]]
# --------------------------------------------------------------------------
data = [[0, 0], [0, 1], [1, 0], [1, 1]]
result = client.predict(model_id,
data)
if DEBUGGER.active:
print(
"{} - model {} - main ({}): result from predict".format(basename(__file__), model_id,
result))
client.terminate_model(model_id),
# --------------------------------------------------------------------------
# Terminate client
# --------------------------------------------------------------------------
# Close the connection
transport.close()
if DEBUGGER.active:
print("{} - main (): Done".format(basename(__file__)))
if __name__ == "__main__":
if DEBUGGER.active:
print("{} - __main__ (): Start".format(basename(__file__)))
try:
main()
if DEBUGGER.active:
print("{} - __main__ (): Done".format(basename(__file__)))
except Thrift.TException as tx:
print("{} - __main__ (): Exception: {}".format(basename(__file__), tx.message))
| |
from __future__ import absolute_import
import numpy as nm
import sfepy.linalg as la
from sfepy.discrete.integrals import Integral
from six.moves import range
def prepare_remap(indices, n_full):
"""
Prepare vector for remapping range `[0, n_full]` to its subset given
by `indices`.
"""
remap = nm.empty((n_full,), dtype=nm.int32)
remap.fill(-1)
remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)
return remap
def invert_remap(remap):
"""
Return the inverse of `remap`, i.e. a mapping from a sub-range
indices to a full range, see :func:`prepare_remap()`.
"""
if remap is not None:
inverse = nm.where(remap >= 0)[0].astype(nm.int32)
else:
inverse = None
return inverse
def prepare_translate(old_indices, new_indices):
"""
Prepare vector for translating `old_indices` to `new_indices`.
Returns
-------
translate : array
The translation vector. Then `new_ar = translate[old_ar]`.
"""
old_indices = nm.asarray(old_indices)
new_indices = nm.asarray(new_indices)
translate = nm.zeros(old_indices.max() + 1, dtype=new_indices.dtype)
translate[old_indices] = new_indices
return translate
def compute_nodal_normals(nodes, region, field, return_imap=False):
"""
Nodal normals are computed by simple averaging of element normals of
elements every node is contained in.
"""
dim = region.dim
field.domain.create_surface_group(region)
field.setup_surface_data(region)
# Custom integral with quadrature points very close to facet vertices.
coors = field.gel.surface_facet.coors
centre = coors.sum(axis=0) / coors.shape[0]
qp_coors = coors + 1e-8 * (centre - coors)
# Unit normals -> weights = ones.
qp_weights = nm.ones(qp_coors.shape[0], dtype=nm.float64)
integral = Integral('aux', coors=qp_coors, weights=qp_weights)
normals = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
mask = nm.zeros((nodes.max() + 1,), dtype=nm.int32)
imap = nm.empty_like(mask)
imap.fill(nodes.shape[0]) # out-of-range index for normals.
imap[nodes] = nm.arange(nodes.shape[0], dtype=nm.int32)
cmap, _ = field.get_mapping(region, integral, 'surface')
e_normals = cmap.normal[..., 0]
sd = field.domain.surface_groups[region.name]
econn = sd.get_connectivity()
mask[econn] += 1
# normals[imap[econn]] += e_normals
im = imap[econn]
for ii, en in enumerate(e_normals):
normals[im[ii]] += en
# All nodes must have a normal.
if not nm.all(mask[nodes] > 0):
raise ValueError('region %s has not complete faces!' % region.name)
norm = la.norm_l2_along_axis(normals)[:, nm.newaxis]
if (norm < 1e-15).any():
raise ValueError('zero nodal normal! (a node in volume?)')
normals /= norm
if return_imap:
return normals, imap
else:
return normals
def _get_edge_path(graph, seed, mask, cycle=False):
"""
Get a path in an edge graph starting with seed. The mask is incremented by
one at positions of the path vertices.
"""
if mask[seed]:
return []
path = [seed]
mask[seed] = 1
row = graph[seed].indices
nv = len(row)
while nv:
if nv == 2:
if mask[row[0]]:
if mask[row[1]]:
if cycle:
path.append(seed)
break
else:
vert = row[1]
else:
vert = row[0]
elif mask[row[0]]:
break
else:
vert = row[0]
path.append(vert)
mask[vert] = 1
row = graph[vert].indices
nv = len(row)
path = nm.array(path, dtype=nm.int32)
return path
def get_edge_paths(graph, mask):
"""
Get all edge paths in a graph with non-masked vertices. The mask is
updated.
"""
nodes = nm.unique(graph.indices)
npv = nm.diff(graph.indptr)
if npv.max() > 2:
raise ValueError('more than 2 edges sharing a vertex!')
seeds = nm.where(npv == 1)[0]
# 1. get paths.
paths = []
for seed in seeds:
path = _get_edge_path(graph, seed, mask)
if len(path):
paths.append(path)
# 2. get possible remaing cycles.
while 1:
ii = nm.where(mask[nodes] == 0)[0]
if not len(ii):
break
path = _get_edge_path(graph, nodes[ii[0]], mask, cycle=True)
if len(path):
paths.append(path)
return paths
def compute_nodal_edge_dirs(nodes, region, field, return_imap=False):
"""
Nodal edge directions are computed by simple averaging of direction vectors
of edges a node is contained in. Edges are assumed to be straight and a
node must be on a single edge (a border node) or shared by exactly two
edges.
"""
coors = region.domain.mesh.coors
dim = coors.shape[1]
graph = region.get_edge_graph()
imap = prepare_remap(nodes, nodes.max() + 1)
mask = nm.zeros_like(imap)
try:
paths = get_edge_paths(graph, mask)
except ValueError:
raise ValueError('more than 2 edges sharing a vertex in region %s!'
% region.name)
# All nodes must have an edge direction.
if not nm.all(mask[nodes]):
raise ValueError('region %s has not complete edges!' % region.name)
edge_dirs = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
for path in paths:
pcoors = coors[path]
edirs = nm.diff(pcoors, axis=0)
la.normalize_vectors(edirs, eps=1e-12)
im = imap[nm.c_[path[:-1], path[1:]]]
for ii, edir in enumerate(edirs):
edge_dirs[im[ii]] += edir
la.normalize_vectors(edge_dirs, eps=1e-12)
if return_imap:
return edge_dirs, imap
else:
return edge_dirs
def get_min_value(dofs):
"""
Get a reasonable minimal value of DOFs suitable for extending over a
whole domain.
"""
if dofs.shape[1] > 1: # Vector.
val = 0.0
else: # Scalar.
val = dofs.min()
return val
def extend_cell_data(data, domain, rname, val=None, is_surface=False,
average_surface=True):
"""
Extend cell data defined in a region to the whole domain.
Parameters
----------
data : array
The data defined in the region.
domain : FEDomain instance
The FE domain.
rname : str
The region name.
val : float, optional
The value for filling cells not covered by the region. If not given,
the smallest value in data is used.
is_surface : bool
If True, the data are defined on a surface region. In that case the
values are averaged or summed into the cells containing the region
surface faces (a cell can have several faces of the surface), see
`average_surface`.
average_surface : bool
If True, the data defined on a surface region are averaged, otherwise
the data are summed.
Returns
-------
edata : array
The data extended to all domain elements.
"""
n_el = domain.shape.n_el
if data.shape[0] == n_el: return data
if val is None:
if data.shape[2] > 1: # Vector.
val = nm.amin(nm.abs(data))
else: # Scalar.
val = nm.amin(data)
edata = nm.empty((n_el,) + data.shape[1:], dtype = nm.float64)
edata.fill(val)
region = domain.regions[rname]
if not is_surface:
edata[region.get_cells()] = data
else:
cells = region.get_cells(true_cells_only=False)
ucells = nm.unique(cells)
dii = region.facets
if len(cells) != len(dii):
raise ValueError('region %s has an inner face!'
% region.name)
if average_surface:
avg = nm.bincount(cells, minlength=n_el)[ucells]
else:
avg = 1.0
for ic in range(data.shape[2]):
evals = nm.bincount(cells, weights=data[dii, 0, ic, 0],
minlength=n_el)[ucells]
edata[ucells, 0, ic, 0] = evals / avg
return edata
def refine_mesh(filename, level):
"""
Uniformly refine `level`-times a mesh given by `filename`.
The refined mesh is saved to a file with name constructed from base
name of `filename` and `level`-times appended `'_r'` suffix.
Parameters
----------
filename : str
The mesh file name.
level : int
The refinement level.
"""
import os
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
if level > 0:
mesh = Mesh.from_file(filename)
domain = FEDomain(mesh.name, mesh)
for ii in range(level):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
suffix = os.path.splitext(filename)[1]
filename = domain.name + suffix
domain.mesh.write(filename, io='auto')
return filename
| |
"""Review Bot tool to run shellcheck."""
from __future__ import unicode_literals
import json
import re
from reviewbot.config import config
from reviewbot.tools.base import BaseTool
from reviewbot.utils.process import execute
from reviewbot.utils.text import split_comma_separated
class ShellCheckTool(BaseTool):
"""Review Bot tool to run shellcheck."""
name = 'ShellCheck'
version = '1.0'
description = ('Checks bash/sh shell scripts for style and programming '
'errors.')
timeout = 60
exe_dependencies = ['shellcheck']
file_patterns = ['*.bash', '*.bats', '*.dash', '*.ksh', '*.sh']
options = [
{
'name': 'severity',
'field_type': 'django.forms.ChoiceField',
'field_options': {
'label': 'Minimum Severity',
'help_text': ('Minimum severity of errors to consider '
'(style, info, warning, error).'),
'choices': (
('style', 'style'),
('info', 'info'),
('warning', 'warning'),
('error', 'error'),
),
'initial': 'style',
'required': True,
},
},
{
'name': 'exclude',
'field_type': 'django.forms.CharField',
'default': "",
'field_options': {
'label': 'Exclude',
'help_text': ('A comma-separated of specified codes to be '
'excluded from the report. This will be passed '
'to the --exclude command line argument (e.g. '
'SC1009,SC1073).'),
'required': False,
},
},
]
SHELL_RE = re.compile(
br'^#!(/bin/|/usr/bin/|/usr/local/bin/|/usr/bin/env )'
br'(bash|dash|ksh|sh)')
def get_can_handle_file(self, review_file, **kwargs):
"""Return whether this tool can handle a given file.
Args:
review_file (reviewbot.processing.review.File):
The file to check.
**kwargs (dict, unused):
Additional keyword arguments passed to :py:meth:`execute`.
This is intended for future expansion.
Returns:
bool:
``True`` if the file can be handled. ``False`` if it cannot.
"""
return (
super(ShellCheckTool, self).get_can_handle_file(review_file,
**kwargs) or
self.SHELL_RE.match(review_file.patched_file_contents)
)
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
Args:
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
list of unicode:
The base command line.
"""
settings = self.settings
exclude = settings.get('exclude')
cmdline = [
config['exe_paths']['shellcheck'],
'--color=never',
'--format=json1',
'--severity=%s' % settings['severity'],
]
if exclude:
# Normalize the error list, preventing errors if there are spaces
# or redundant commas.
cmdline.append('--exclude=%s'
% ','.join(split_comma_separated(exclude)))
return cmdline
def handle_file(self, f, path, base_command, **kwargs):
"""Perform a review of a single file.
Args:
f (reviewbot.processing.review.File):
The file to process.
path (unicode):
The local path to the patched file to review.
base_command (list of unicode):
The base command used to run shellcheck.
**kwargs (dict, unused):
Additional keyword arguments.
"""
output = execute(base_command + [path],
ignore_errors=True)
try:
results = json.loads(output)
except ValueError:
self.logger.error(
'The shellcheck returned an unexpected result. Check to '
'make sure that your configured settings in the integration '
'for Review Bot are correct. Shellcheck returned: %s',
output)
f.comment(
text=('The shellcheck returned an unexpected result. Check '
'to make sure that your configured settings in Review '
'Bot are correct.\n'
'\n'
'Error message:\n'
'```%s```'
% output.strip()),
first_line=None,
rich_text=True)
return
for comment in results.get('comments', []):
comment_text = comment['message']
first_line = comment['line']
num_lines = comment.get('endLine', first_line) - first_line + 1
fix = comment.get('fix') or {}
replacements = fix.get('replacements', [])
replacement_lines = []
if replacements:
replacement_lines = f.get_lines(first_line, num_lines)
# Iterate through all replacements in reverse order of
# precedence, and build new strings.
#
# Each replacement should only span one line. If we see
# more, log and scrap any replacement lines.
for replacement in sorted(replacements,
key=lambda r: (r['precedence'],
r['column']),
reverse=True):
replacement_linenum = replacement['line']
if replacement['endLine'] != replacement_linenum:
self.logger.warning(
'Saw multi-line replacement information from '
'ShellCheck, which was not possible when this '
'tool was developed. Please report this along '
'with the file that triggered it (%s) and the '
'comment payload information: %r',
comment['file'],
comment)
replacement_lines = []
break
replacement_insertion_point = replacement['insertionPoint']
if replacement_insertion_point not in ('beforeStart',
'afterEnd'):
self.logger.warning(
'Saw the replacement point "%s" from ShellCheck, '
'which was not available when this tool was '
'developed. Please report this along with the '
'file that triggered it (%s) and the comment '
'payload information: %r',
replacement_insertion_point,
comment['file'],
comment)
replacement_lines = []
break
replacement_norm_linenum = replacement_linenum - first_line
replacement_start_column = replacement['column']
replacement_end_column = replacement['endColumn']
replacement_text = replacement['replacement']
replacement_line = \
replacement_lines[replacement_norm_linenum]
replacement_lines[replacement_norm_linenum] = (
b'%s%s%s'
% (replacement_line[:replacement_start_column - 1],
replacement_text.encode('utf-8'),
replacement_line[replacement_end_column - 1:]))
if replacement_lines:
comment_text = (
'%s\n'
'\n'
'Suggested replacement:\n'
'```%s```'
% (comment_text,
b'\n'.join(replacement_lines).decode('utf-8').strip())
)
f.comment(text=comment_text,
first_line=first_line,
num_lines=num_lines,
start_column=comment.get('column'),
severity=comment.get('level'),
error_code=comment.get('code'),
rich_text=True)
| |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import webob
import webob.exc
from cinder.api import common
from cinder import test
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
class LimiterTest(test.TestCase):
"""Unit tests for the `cinder.api.common.limited` method.
This method takes in a list of items and, depending on the 'offset'
and 'limit' GET params, returns a subset or complete set of the given
items.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
"""Test offset key works with 0."""
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
"""Test offset key works with a medium sized number."""
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
self.assertEqual(common.limited(self.medium, req), self.medium[10:])
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
"""Test offset key works with a number over 1000 (max_limit)."""
req = webob.Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
self.assertEqual(common.limited(self.medium, req), [])
self.assertEqual(
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
"""Test offset key works with a blank offset."""
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""Test offset key works with a BAD offset."""
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
"""Test request with no offset or limit."""
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
"""Test limit of zero."""
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_bad(self):
"""Test with a bad limit."""
req = webob.Request.blank(u'/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_limit_medium(self):
"""Test limit of 10."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium[:10])
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
"""Test limit of 3000."""
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
"""Test request with both limit and offset."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
"""Test a max_limit other than 1000."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
"""Test a negative limit."""
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
"""Test a negative offset."""
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class PaginationParamsTest(test.TestCase):
"""Unit tests for `cinder.api.common.get_pagination_params` method.
This method takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_nonnumerical_limit(self):
"""Test nonnumerical limit param."""
req = webob.Request.blank('/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_no_params(self):
"""Test no params."""
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
"""Test valid marker param."""
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
"""Test valid limit param."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
"""Test invalid limit param."""
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
"""Test valid limit and marker parameters."""
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
{'marker': marker, 'limit': 20})
class SortParamUtilsTest(test.TestCase):
def test_get_sort_params_defaults(self):
'''Verifies the default sort key and direction.'''
sort_keys, sort_dirs = common.get_sort_params({})
self.assertEqual(['created_at'], sort_keys)
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_override_defaults(self):
'''Verifies that the defaults can be overriden.'''
sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
default_dir='dir1')
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_sort_param(self):
'''Verifies a single sort key and direction.'''
params = {'sort': 'key1:dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_old_params(self):
'''Verifies a single sort key and direction.'''
params = {'sort_key': 'key1', 'sort_dir': 'dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_with_default_sort_param(self):
'''Verifies a single sort value with a default direction.'''
params = {'sort': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_single_with_default_old_params(self):
'''Verifies a single sort value with a default direction.'''
params = {'sort_key': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_multiple_values(self):
'''Verifies multiple sort parameter values.'''
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_not_all_dirs(self):
'''Verifies multiple sort keys without all directions.'''
params = {'sort': 'key1:dir1,key2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
# Second key is missing the direction, should be defaulted
self.assertEqual(['dir1', 'desc', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_override_default_dir(self):
'''Verifies multiple sort keys and overriding default direction.'''
params = {'sort': 'key1:dir1,key2,key3'}
sort_keys, sort_dirs = common.get_sort_params(params,
default_dir='foo')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'foo', 'foo'], sort_dirs)
def test_get_sort_params_params_modified(self):
'''Verifies that the input sort parameter are modified.'''
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
common.get_sort_params(params)
self.assertEqual({}, params)
params = {'sort_dir': 'key1', 'sort_dir': 'dir1'}
common.get_sort_params(params)
self.assertEqual({}, params)
def test_get_sort_params_random_spaces(self):
'''Verifies that leading and trailing spaces are removed.'''
params = {'sort': ' key1 : dir1,key2: dir2 , key3 '}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'desc'], sort_dirs)
def test_get_params_mix_sort_and_old_params(self):
'''An exception is raised if both types of sorting params are given.'''
for params in ({'sort': 'k1', 'sort_key': 'k1'},
{'sort': 'k1', 'sort_dir': 'd1'},
{'sort': 'k1', 'sort_key': 'k1', 'sort_dir': 'd2'}):
self.assertRaises(webob.exc.HTTPBadRequest,
common.get_sort_params,
params)
class MiscFunctionsTest(test.TestCase):
def test_remove_major_version_from_href(self):
fixture = 'http://www.testsite.com/v1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1/'
expected = 'http://www.testsite.com/'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_3(self):
fixture = 'http://www.testsite.com/v10.10'
expected = 'http://www.testsite.com'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_4(self):
fixture = 'http://www.testsite.com/v1.1/images/v10.5'
expected = 'http://www.testsite.com/images/v10.5'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_bad_request(self):
fixture = 'http://www.testsite.com/1.1/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_2(self):
fixture = 'http://www.testsite.com/v/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_3(self):
fixture = 'http://www.testsite.com/v1.1images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
| |
import select
import socket
import sys
import threading
import urllib2
import copy
from time import time, sleep
UDP_SOCKET_TIMEOUT = 5
exitFlag = 0
exitValue = 0
def setExitFlag(n):
global exitFlag
exitFlag = n
class Data(object):
"""
The data aggregated from the uwsgi app
"""
def __init__(self):
self.data = {}
self.changed = {}
self.isChanged = False
self.dataLock = threading.Lock()
def parse_packet(self, packet):
tags = None
metadata = packet.split('|')
if (len(metadata) < 2):
raise Exception('Unparseable metric packet: %s' % packet)
name_value = metadata[0].split(':')
metric_type = metadata[1]
if (len(metadata) == 3):
tags = metadata[2].split(',')
if (len(tags) < 1 or not tags[0].startswith('#')):
raise Exception('Unparseable metric packet: %s' % packet)
tags[0] = tags[0][1:]
if (len(name_value) != 2):
raise Exception('Unparseable metric packet: %s' % packet)
metric = {
'name': name_value[0],
'value': name_value[1],
'metric_type': metric_type,
'tags': tags
}
self.dataLock.acquire()
if name_value[0] in self.data and self.data[name_value[0]]['value'] != name_value[1]:
self.changed[name_value[0]] = int(name_value[1])
if name_value[0] == "myapp.worker.requests":
self.isChanged = True
self.data[name_value[0]] = metric
self.dataLock.release()
def new_packets(self, packets):
packets = unicode(packets, 'utf-8', errors='replace')
for packet in packets.splitlines():
if not packet.strip():
continue
self.parse_packet(packet)
def getChangedAttributes(self):
self.dataLock.acquire()
changedCopy = copy.deepcopy(self.changed)
self.dataLock.release()
return changedCopy
def get_data(self):
return self.data
def ready(self):
return self.isChanged
def reset(self):
self.isChanged = False
class Test(threading.Thread):
"""
The class which trigger tests and check results
"""
COUNT_METRICS = [
'myapp.worker.requests',
'myapp.worker.delta_requests',
'myapp.worker.core.requests'
]
INC_METRICS = [
'myapp.worker.total_tx',
'myapp.worker.respawns'
]
VAL_METRICS = {
'myapp.worker.avg_response_time': (50, 250)
}
def __init__(self, data):
threading.Thread.__init__(self)
self.data = data
self.tests = 4
self.success = 0
self.failure = 0
self.errors = []
def setExitValue(self, val):
global exitValue
exitValue = val
def check(self):
attributes_changed = self.data.getChangedAttributes()
if hasattr(self, 'oldData') and self.tests < 4:
for k in self.COUNT_METRICS:
if k in attributes_changed and k in self.oldData and attributes_changed[k] == self.oldData[k] + 1:
self.success += 1
else:
self.setExitValue(1)
self.failure += 1
self.errors.append(k)
for k in self.INC_METRICS:
if k in attributes_changed and k in self.oldData and attributes_changed[k] >= self.oldData[k]:
self.success += 1
else:
self.setExitValue(1)
self.failure += 1
self.errors.append(k)
for k, v in self.VAL_METRICS.iteritems():
if k in attributes_changed and k in self.oldData and v[0] <= attributes_changed[k] <= v[1]:
self.success += 1
else:
self.setExitValue(1)
self.failure += 1
self.errors.append(k)
self.oldData = attributes_changed
def printResult(self):
print "################################################################################"
print "RESULTS"
print "################################################################################"
print ""
print "SUCCESS: %d/%d" % (self.success, self.success + self.failure)
print ""
print "FAILURE: %d/%d" % (self.failure, self.success + self.failure)
print ""
print "################################################################################"
print ""
if self.failure > 0:
print "Metrics failed:"
for m in self.errors:
print "* %s" % m
print ""
print "################################################################################"
def run(self):
print "TEST IN PROGRESS"
sleep(10)
self.check()
while self.tests > 0:
ready = 0
timeout = 30
test = urllib2.urlopen("http://localhost:9090").read()
if test == "Hello World":
while not ready and timeout > 0:
if self.data.ready():
ready = 1
timeout -= 1
sleep(1)
if ready:
self.check()
self.data.reset()
else:
print "Test failed: cannot aggregate metrics change"
else:
print "Error while testing, please check if the web application is running"
self.tests -= 1
self.printResult()
setExitFlag(1)
class Server(threading.Thread):
"""
The process which will listen on the statd port
"""
config = {
'host': 'localhost',
'port': 8125
}
def __init__(self, data):
threading.Thread.__init__(self)
self.data = data
self.buffer_size = 1024 * 8
self.address = (self.config['host'], self.config['port'])
def run(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(0)
try:
self.socket.bind(self.address)
except socket.gaierror:
if self.address[0] == 'localhost':
log.warning("Warning localhost seems undefined in your host file, using 127.0.0.1 instead")
self.address = ('127.0.0.1', self.address[1])
self.socket.bind(self.address)
print "Listening on host & port: %s" % str(self.address)
sock = [self.socket]
select_select = select.select
timeout = UDP_SOCKET_TIMEOUT
while not exitFlag:
try:
ready = select_select(sock, [], [], timeout)
if ready[0]:
message = self.socket.recv(self.buffer_size)
self.data.new_packets(message)
except Exception:
print 'Error receiving datagram'
def main():
data = Data()
server = Server(data)
test = Test(data)
server.start()
test.start()
while not exitFlag:
pass
server.join()
test.join()
print 'END TEST: Exiting'
return exitValue
if __name__ == '__main__':
sys.exit(main())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Project : Tissu - Functions to handle settings with Fabric
# -----------------------------------------------------------------------------
# License : BSD License
# -----------------------------------------------------------------------------
# Authors : Thierry Stiegler <thierry.stiegler@gmail.com>
# -----------------------------------------------------------------------------
"""
Tissu API
"""
import os
from fabric.api import env
import tissu.constants as C
__all__ = [
'current_hostdef',
'env_attr_key_update',
'env_attr_update',
'get_hostdef',
'get_current_tissu',
'host_string',
'hoststring_from_hostdef',
'is_tissu_loaded',
'load_settings',
'set_settings_module',
'set_roledefs',
'set_parallel_execution',
'set_current_tissu'
]
os.environ[C.TISSU_SETTINGS_MODULE] = C.DEFAULT_TISSU_SETTINGS_MODULE
def set_settings_module(envname):
"""
Load the settings module for a specific environement name
"""
filename = "%s.py" % envname
if os.path.exists(os.path.join(".", "settings", filename)):
os.environ[C.TISSU_SETTINGS_MODULE] = "settings.%s" % envname
return True
else:
return False
def set_roledefs():
"""
Load the cluster nodes roles in the fabric env
"""
from tissu.conf import settings as tissu_settings
roledefs = getattr(tissu_settings, C.ROLES, None)
if roledefs is None or roledefs == {}:
return False
hosts = []
for role in roledefs.keys():
hosts = hosts + roledefs[role]
roledefs['all'] = list(set(hosts))
env.roledefs.update(roledefs)
return True
def get_hostdef(hoststr):
"""
Get the host definition by it's host string (user@host:port)
If not found it returns an empty dict
"""
return env.hostdefs.get(hoststr, {})
def current_hostdef():
"""
Return the current host definition used by Fabric
"""
return get_hostdef(env.host_string)
def set_parallel_execution():
"""
Enable parallel execution for Fabric tasks
"""
from tissu.conf import settings as tissu_settings
env.parallel = tissu_settings.FABRIC_PARALLEL_EXECUTION
env.pool_size = tissu_settings.FABRIC_PARALLEL_POOLSIZE
def host_string(username, hostname, port=C.DEFAULT_SSH_PORT):
"""
Format host string with arguments username, hostname and port to
username@hostname:port
"""
return "%s@%s:%s" % (username, hostname, port)
def hoststring_from_hostdef(hostdef):
"""
Return the hostring with a host definition dictionnary
"""
return host_string(
hostdef.get(C.USER),
hostdef.get(C.HOSTNAME),
hostdef.get(C.SSH_PORT, C.DEFAULT_SSH_PORT)
)
def set_current_tissu(envname):
"""
Set the current tissu settings environnement name
"""
setattr(env, C.CURRENT_TISSU, envname)
def get_current_tissu():
"""
Get the current tissu settings environnement name
"""
return getattr(env, C.CURRENT_TISSU, None)
def is_tissu_loaded():
"""
Test if a tissu settings environnement is loaded
"""
return get_current_tissu() is not None
def env_attr_key_update(attr, key, value):
"""
Updateone key of a dict attribut of the fabric environnement
"""
item = getattr(env, attr)
item[key] = value
setattr(env, attr, item)
def env_attr_update(attr, value):
"""
Update a dict attribute of the fabric environnement
"""
item = getattr(env, attr)
item.update(value)
setattr(env, attr, item)
def sshconfig_exists(sshconfig_path):
"""
Test if the ssh config path exists
"""
return os.path.isfile(os.path.expanduser(sshconfig_path))
def load_settings(envname=None):
"""
Load settings into the fabric environnement and the tissu
settings from a python file
"""
set_current_tissu(None)
if set_settings_module(envname) is False:
msg = "Unable to find %s settings" % envname
raise ValueError(msg)
from tissu.conf import settings as tissu_settings
env.use_ssh_config = False
if getattr(tissu_settings, C.FABRIC_USE_SSH_CONFIG, None):
if env.ssh_config_path and sshconfig_exists(env.ssh_config_path):
env.use_ssh_config = True
roledefs = getattr(tissu_settings, C.ROLES, {})
all_hosts = []
setattr(env, C.HOSTDEFS, {})
for role, hosts in roledefs.items():
role_hosts = [hoststring_from_hostdef(host) for host in hosts]
all_hosts += role_hosts
env_attr_key_update(C.ROLEDEFS, role, role_hosts)
passwords = {}
for host in hosts:
password = host.get(C.PASSWORD)
if password is not None:
key = hoststring_from_hostdef(host)
passwords[key] = password
env_attr_update(C.PASSWORDS, passwords)
hostdefs = dict((hoststring_from_hostdef(host), host) for host in hosts)
env_attr_update(C.HOSTDEFS, hostdefs)
keys = [host.get(C.HOSTKEY) for host in hosts]
for key in keys:
if key is not None:
if env.key_filename is None:
env.key_filename = [key, ]
elif key not in env.key_filename:
env.key_filename.append(key)
if len(getattr(env, C.ROLEDEFS)) == 0:
msg = "Unable to load roles is '%s' defined in your settings file ?"
raise KeyError(msg % C.ROLES)
else:
env_attr_key_update(C.ROLEDEFS, C.ALL_ROLE, list(set(all_hosts)))
set_parallel_execution()
setattr(env, C.MY_SETTINGS, tissu_settings)
set_current_tissu(envname)
return True
# EOF - vim: ts=4 sw=4 noet
| |
# -*- test-case-name: twisted.python.test.test_zipstream -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An incremental approach to unzipping files. This allows you to unzip a little
bit of a file at a time, which means you can report progress as a file unzips.
"""
import warnings
import zipfile
import os.path
import zlib
import struct
_fileHeaderSize = struct.calcsize(zipfile.structFileHeader)
class ChunkingZipFile(zipfile.ZipFile):
"""
A ZipFile object which, with readfile(), also gives you access to a
filelike object for each entry.
"""
def readfile(self, name):
"""
Return file-like object for name.
"""
if self.mode not in ("r", "a"):
raise RuntimeError('read() requires mode "r" or "a"')
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
zinfo = self.getinfo(name)
self.fp.seek(zinfo.header_offset, 0)
fheader = self.fp.read(_fileHeaderSize)
if fheader[0:4] != zipfile.stringFileHeader:
raise zipfile.BadZipfile("Bad magic number for file header")
fheader = struct.unpack(zipfile.structFileHeader, fheader)
fname = self.fp.read(fheader[zipfile._FH_FILENAME_LENGTH])
if fheader[zipfile._FH_EXTRA_FIELD_LENGTH]:
self.fp.read(fheader[zipfile._FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename:
raise zipfile.BadZipfile(
'File name in directory "%s" and header "%s" differ.' % (
zinfo.orig_filename, fname))
if zinfo.compress_type == zipfile.ZIP_STORED:
return ZipFileEntry(self, zinfo.compress_size)
elif zinfo.compress_type == zipfile.ZIP_DEFLATED:
return DeflatedZipFileEntry(self, zinfo.compress_size)
else:
raise zipfile.BadZipfile(
"Unsupported compression method %d for file %s" %
(zinfo.compress_type, name))
class _FileEntry(object):
"""
Abstract superclass of both compressed and uncompressed variants of
file-like objects within a zip archive.
@ivar chunkingZipFile: a chunking zip file.
@type chunkingZipFile: L{ChunkingZipFile}
@ivar length: The number of bytes within the zip file that represent this
file. (This is the size on disk, not the number of decompressed bytes
which will result from reading it.)
@ivar fp: the underlying file object (that contains pkzip data). Do not
touch this, please. It will quite likely move or go away.
@ivar closed: File-like 'closed' attribute; True before this file has been
closed, False after.
@type closed: L{bool}
@ivar finished: An older, broken synonym for 'closed'. Do not touch this,
please.
@type finished: L{int}
"""
def __init__(self, chunkingZipFile, length):
"""
Create a L{_FileEntry} from a L{ChunkingZipFile}.
"""
self.chunkingZipFile = chunkingZipFile
self.fp = self.chunkingZipFile.fp
self.length = length
self.finished = 0
self.closed = False
def isatty(self):
"""
Returns false because zip files should not be ttys
"""
return False
def close(self):
"""
Close self (file-like object)
"""
self.closed = True
self.finished = 1
del self.fp
def readline(self):
"""
Read a line.
"""
bytes = ""
for byte in iter(lambda : self.read(1), ""):
bytes += byte
if byte == "\n":
break
return bytes
def next(self):
"""
Implement next as file does (like readline, except raises StopIteration
at EOF)
"""
nextline = self.readline()
if nextline:
return nextline
raise StopIteration()
def readlines(self):
"""
Returns a list of all the lines
"""
return list(self)
def xreadlines(self):
"""
Returns an iterator (so self)
"""
return self
def __iter__(self):
"""
Returns an iterator (so self)
"""
return self
class ZipFileEntry(_FileEntry):
"""
File-like object used to read an uncompressed entry in a ZipFile
"""
def __init__(self, chunkingZipFile, length):
_FileEntry.__init__(self, chunkingZipFile, length)
self.readBytes = 0
def tell(self):
return self.readBytes
def read(self, n=None):
if n is None:
n = self.length - self.readBytes
if n == 0 or self.finished:
return ''
data = self.chunkingZipFile.fp.read(
min(n, self.length - self.readBytes))
self.readBytes += len(data)
if self.readBytes == self.length or len(data) < n:
self.finished = 1
return data
class DeflatedZipFileEntry(_FileEntry):
"""
File-like object used to read a deflated entry in a ZipFile
"""
def __init__(self, chunkingZipFile, length):
_FileEntry.__init__(self, chunkingZipFile, length)
self.returnedBytes = 0
self.readBytes = 0
self.decomp = zlib.decompressobj(-15)
self.buffer = ""
def tell(self):
return self.returnedBytes
def read(self, n=None):
if self.finished:
return ""
if n is None:
result = [self.buffer,]
result.append(
self.decomp.decompress(
self.chunkingZipFile.fp.read(
self.length - self.readBytes)))
result.append(self.decomp.decompress("Z"))
result.append(self.decomp.flush())
self.buffer = ""
self.finished = 1
result = "".join(result)
self.returnedBytes += len(result)
return result
else:
while len(self.buffer) < n:
data = self.chunkingZipFile.fp.read(
min(n, 1024, self.length - self.readBytes))
self.readBytes += len(data)
if not data:
result = (self.buffer
+ self.decomp.decompress("Z")
+ self.decomp.flush())
self.finished = 1
self.buffer = ""
self.returnedBytes += len(result)
return result
else:
self.buffer += self.decomp.decompress(data)
result = self.buffer[:n]
self.buffer = self.buffer[n:]
self.returnedBytes += len(result)
return result
def unzip(filename, directory=".", overwrite=0):
"""
Unzip the file
@param filename: the name of the zip file
@param directory: the directory into which the files will be
extracted
@param overwrite: if on, overwrite files when they exist. You can
still get an error if you try to create a directory over a file
with the same name or vice-versa.
"""
for i in unzipIter(filename, directory, overwrite):
pass
DIR_BIT = 16
def unzipIter(filename, directory='.', overwrite=0):
"""
Return a generator for the zipfile. This implementation will yield
after every file.
The value it yields is the number of files left to unzip.
"""
zf = zipfile.ZipFile(filename, 'r')
names = zf.namelist()
if not os.path.exists(directory):
os.makedirs(directory)
remaining = len(zf.namelist())
for entry in names:
remaining -= 1
isdir = zf.getinfo(entry).external_attr & DIR_BIT
f = os.path.join(directory, entry)
if isdir:
# overwrite flag only applies to files
if not os.path.exists(f):
os.makedirs(f)
else:
# create the directory the file will be in first,
# since we can't guarantee it exists
fdir = os.path.split(f)[0]
if not os.path.exists(fdir):
os.makedirs(f)
if overwrite or not os.path.exists(f):
outfile = file(f, 'wb')
outfile.write(zf.read(entry))
outfile.close()
yield remaining
def countZipFileChunks(filename, chunksize):
"""
Predict the number of chunks that will be extracted from the entire
zipfile, given chunksize blocks.
"""
totalchunks = 0
zf = ChunkingZipFile(filename)
for info in zf.infolist():
totalchunks += countFileChunks(info, chunksize)
return totalchunks
def countFileChunks(zipinfo, chunksize):
"""
Count the number of chunks that will result from the given L{ZipInfo}.
@param zipinfo: a L{zipfile.ZipInfo} instance describing an entry in a zip
archive to be counted.
@return: the number of chunks present in the zip file. (Even an empty file
counts as one chunk.)
@rtype: L{int}
"""
count, extra = divmod(zipinfo.file_size, chunksize)
if extra > 0:
count += 1
return count or 1
def countZipFileEntries(filename):
"""
Count the number of entries in a zip archive. (Don't use this function.)
@param filename: The filename of a zip archive.
@type filename: L{str}
"""
warnings.warn("countZipFileEntries is deprecated.",
DeprecationWarning, 2)
zf = zipfile.ZipFile(filename)
return len(zf.namelist())
def unzipIterChunky(filename, directory='.', overwrite=0,
chunksize=4096):
"""
Return a generator for the zipfile. This implementation will yield after
every chunksize uncompressed bytes, or at the end of a file, whichever
comes first.
The value it yields is the number of chunks left to unzip.
"""
czf = ChunkingZipFile(filename, 'r')
if not os.path.exists(directory):
os.makedirs(directory)
remaining = countZipFileChunks(filename, chunksize)
names = czf.namelist()
infos = czf.infolist()
for entry, info in zip(names, infos):
isdir = info.external_attr & DIR_BIT
f = os.path.join(directory, entry)
if isdir:
# overwrite flag only applies to files
if not os.path.exists(f):
os.makedirs(f)
remaining -= 1
yield remaining
else:
# create the directory the file will be in first,
# since we can't guarantee it exists
fdir = os.path.split(f)[0]
if not os.path.exists(fdir):
os.makedirs(f)
if overwrite or not os.path.exists(f):
outfile = file(f, 'wb')
fp = czf.readfile(entry)
if info.file_size == 0:
remaining -= 1
yield remaining
while fp.tell() < info.file_size:
hunk = fp.read(chunksize)
outfile.write(hunk)
remaining -= 1
yield remaining
outfile.close()
else:
remaining -= countFileChunks(info, chunksize)
yield remaining
| |
"""deCONZ climate platform tests."""
from copy import deepcopy
import pytest
from homeassistant.components.climate import (
DOMAIN as CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
ATTR_HVAC_MODE,
ATTR_PRESET_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_OFF,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_COMFORT,
)
from homeassistant.components.deconz.climate import (
DECONZ_FAN_SMART,
DECONZ_PRESET_MANUAL,
)
from homeassistant.components.deconz.const import (
CONF_ALLOW_CLIP_SENSOR,
DOMAIN as DECONZ_DOMAIN,
)
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from homeassistant.const import ATTR_ENTITY_ID, ATTR_TEMPERATURE, STATE_OFF
from homeassistant.setup import async_setup_component
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.async_mock import patch
SENSORS = {
"1": {
"id": "Thermostat id",
"name": "Thermostat",
"type": "ZHAThermostat",
"state": {"on": True, "temperature": 2260, "valve": 30},
"config": {
"battery": 100,
"heatsetpoint": 2200,
"mode": "auto",
"offset": 10,
"reachable": True,
},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"id": "CLIP thermostat id",
"name": "CLIP thermostat",
"type": "CLIPThermostat",
"state": {"on": True, "temperature": 2260, "valve": 30},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
}
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, CLIMATE_DOMAIN, {"climate": {"platform": DECONZ_DOMAIN}}
)
is True
)
assert DECONZ_DOMAIN not in hass.data
async def test_no_sensors(hass):
"""Test that no sensors in deconz results in no climate entities."""
await setup_deconz_integration(hass)
assert len(hass.states.async_all()) == 0
async def test_simple_climate_device(hass):
"""Test successful creation of climate entities.
This is a simple water heater that only supports setting temperature and on and off.
"""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {
"config": {
"battery": 59,
"displayflipped": None,
"heatsetpoint": 2100,
"locked": None,
"mountingmode": None,
"offset": 0,
"on": True,
"reachable": True,
},
"ep": 1,
"etag": "6130553ac247174809bae47144ee23f8",
"lastseen": "2020-11-29T19:31Z",
"manufacturername": "Danfoss",
"modelid": "eTRV0100",
"name": "thermostat",
"state": {
"errorcode": None,
"lastupdated": "2020-11-29T19:28:40.665",
"mountingmodeactive": False,
"on": True,
"temperature": 2102,
"valve": 24,
"windowopen": "Closed",
},
"swversion": "01.02.0008 01.02",
"type": "ZHAThermostat",
"uniqueid": "14:b4:57:ff:fe:d5:4e:77-01-0201",
}
}
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 2
climate_thermostat = hass.states.get("climate.thermostat")
assert climate_thermostat.state == HVAC_MODE_HEAT
assert climate_thermostat.attributes["hvac_modes"] == [
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
]
assert climate_thermostat.attributes["current_temperature"] == 21.0
assert climate_thermostat.attributes["temperature"] == 21.0
assert hass.states.get("sensor.thermostat_battery_level").state == "59"
# Event signals thermostat configured off
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "0",
"state": {"on": False},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == STATE_OFF
# Event signals thermostat state on
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "0",
"state": {"on": True},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == HVAC_MODE_HEAT
# Verify service calls
thermostat_device = gateway.api.sensors["0"]
# Service turn on thermostat
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/sensors/0/config", json={"on": True})
# Service turn on thermostat
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/sensors/0/config", json={"on": False})
# Service set HVAC mode to unsupported value
with patch.object(
thermostat_device, "_request", return_value=True
) as set_callback, pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
async def test_climate_device_without_cooling_support(hass):
"""Test successful creation of sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 2
climate_thermostat = hass.states.get("climate.thermostat")
assert climate_thermostat.state == HVAC_MODE_AUTO
assert climate_thermostat.attributes["hvac_modes"] == [
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
]
assert climate_thermostat.attributes["current_temperature"] == 22.6
assert climate_thermostat.attributes["temperature"] == 22.0
assert hass.states.get("sensor.thermostat") is None
assert hass.states.get("sensor.thermostat_battery_level").state == "100"
assert hass.states.get("climate.presence_sensor") is None
assert hass.states.get("climate.clip_thermostat") is None
# Event signals thermostat configured off
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"config": {"mode": "off"},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == STATE_OFF
# Event signals thermostat state on
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"config": {"mode": "other"},
"state": {"on": True},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == HVAC_MODE_HEAT
# Event signals thermostat state off
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"on": False},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == STATE_OFF
# Verify service calls
thermostat_device = gateway.api.sensors["1"]
# Service set HVAC mode to auto
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put", "/sensors/1/config", json={"mode": "auto"}
)
# Service set HVAC mode to heat
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put", "/sensors/1/config", json={"mode": "heat"}
)
# Service set HVAC mode to off
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
set_callback.assert_called_with(
"put", "/sensors/1/config", json={"mode": "off"}
)
# Service set HVAC mode to unsupported value
with patch.object(
thermostat_device, "_request", return_value=True
) as set_callback, pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_COOL},
blocking=True,
)
# Service set temperature to 20
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_TEMPERATURE: 20},
blocking=True,
)
set_callback.assert_called_with(
"put", "/sensors/1/config", json={"heatsetpoint": 2000.0}
)
# Service set temperature without providing temperature attribute
with patch.object(
thermostat_device, "_request", return_value=True
) as set_callback, pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.thermostat",
ATTR_TARGET_TEMP_HIGH: 30,
ATTR_TARGET_TEMP_LOW: 10,
},
blocking=True,
)
await hass.config_entries.async_unload(config_entry.entry_id)
assert len(hass.states.async_all()) == 0
async def test_climate_device_with_cooling_support(hass):
"""Test successful creation of sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {
"config": {
"battery": 25,
"coolsetpoint": None,
"fanmode": None,
"heatsetpoint": 2222,
"mode": "heat",
"offset": 0,
"on": True,
"reachable": True,
},
"ep": 1,
"etag": "074549903686a77a12ef0f06c499b1ef",
"lastseen": "2020-11-27T13:45Z",
"manufacturername": "Zen Within",
"modelid": "Zen-01",
"name": "Zen-01",
"state": {
"lastupdated": "2020-11-27T13:42:40.863",
"on": False,
"temperature": 2320,
},
"type": "ZHAThermostat",
"uniqueid": "00:24:46:00:00:11:6f:56-01-0201",
}
}
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 2
climate_thermostat = hass.states.get("climate.zen_01")
assert climate_thermostat.state == HVAC_MODE_HEAT
assert climate_thermostat.attributes["hvac_modes"] == [
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
]
assert climate_thermostat.attributes["current_temperature"] == 23.2
assert climate_thermostat.attributes["temperature"] == 22.2
assert hass.states.get("sensor.zen_01_battery_level").state == "25"
# Event signals thermostat state cool
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "0",
"config": {"mode": "cool"},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.zen_01").state == HVAC_MODE_COOL
# Verify service calls
thermostat_device = gateway.api.sensors["0"]
# Service set temperature to 20
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.zen_01", ATTR_TEMPERATURE: 20},
blocking=True,
)
set_callback.assert_called_with(
"put", "/sensors/0/config", json={"coolsetpoint": 2000.0}
)
async def test_climate_device_with_fan_support(hass):
"""Test successful creation of sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {
"config": {
"battery": 25,
"coolsetpoint": None,
"fanmode": "auto",
"heatsetpoint": 2222,
"mode": "heat",
"offset": 0,
"on": True,
"reachable": True,
},
"ep": 1,
"etag": "074549903686a77a12ef0f06c499b1ef",
"lastseen": "2020-11-27T13:45Z",
"manufacturername": "Zen Within",
"modelid": "Zen-01",
"name": "Zen-01",
"state": {
"lastupdated": "2020-11-27T13:42:40.863",
"on": False,
"temperature": 2320,
},
"type": "ZHAThermostat",
"uniqueid": "00:24:46:00:00:11:6f:56-01-0201",
}
}
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 2
climate_thermostat = hass.states.get("climate.zen_01")
assert climate_thermostat.state == HVAC_MODE_HEAT
assert climate_thermostat.attributes["fan_mode"] == FAN_AUTO
assert climate_thermostat.attributes["fan_modes"] == [
DECONZ_FAN_SMART,
FAN_AUTO,
FAN_HIGH,
FAN_MEDIUM,
FAN_LOW,
FAN_ON,
FAN_OFF,
]
# Event signals fan mode defaults to off
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "0",
"config": {"fanmode": "unsupported"},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.zen_01").attributes["fan_mode"] == FAN_OFF
# Event signals unsupported fan mode
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "0",
"config": {"fanmode": "unsupported"},
"state": {"on": True},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.zen_01").attributes["fan_mode"] == FAN_ON
# Event signals unsupported fan mode
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "0",
"config": {"fanmode": "unsupported"},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.zen_01").attributes["fan_mode"] == FAN_ON
# Verify service calls
thermostat_device = gateway.api.sensors["0"]
# Service set fan mode to off
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.zen_01", ATTR_FAN_MODE: FAN_OFF},
blocking=True,
)
set_callback.assert_called_with(
"put", "/sensors/0/config", json={"fanmode": "off"}
)
# Service set fan mode to custom deCONZ mode smart
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.zen_01", ATTR_FAN_MODE: DECONZ_FAN_SMART},
blocking=True,
)
set_callback.assert_called_with(
"put", "/sensors/0/config", json={"fanmode": "smart"}
)
# Service set fan mode to unsupported value
with patch.object(
thermostat_device, "_request", return_value=True
) as set_callback, pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: "climate.zen_01", ATTR_FAN_MODE: "unsupported"},
blocking=True,
)
async def test_climate_device_with_preset(hass):
"""Test successful creation of sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = {
"0": {
"config": {
"battery": 25,
"coolsetpoint": None,
"fanmode": None,
"heatsetpoint": 2222,
"mode": "heat",
"preset": "auto",
"offset": 0,
"on": True,
"reachable": True,
},
"ep": 1,
"etag": "074549903686a77a12ef0f06c499b1ef",
"lastseen": "2020-11-27T13:45Z",
"manufacturername": "Zen Within",
"modelid": "Zen-01",
"name": "Zen-01",
"state": {
"lastupdated": "2020-11-27T13:42:40.863",
"on": False,
"temperature": 2320,
},
"type": "ZHAThermostat",
"uniqueid": "00:24:46:00:00:11:6f:56-01-0201",
}
}
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 2
climate_zen_01 = hass.states.get("climate.zen_01")
assert climate_zen_01.state == HVAC_MODE_HEAT
assert climate_zen_01.attributes["current_temperature"] == 23.2
assert climate_zen_01.attributes["temperature"] == 22.2
assert climate_zen_01.attributes["preset_mode"] == "auto"
assert climate_zen_01.attributes["preset_modes"] == [
"auto",
"boost",
"comfort",
"complex",
"eco",
"holiday",
"manual",
]
# Event signals deCONZ preset
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "0",
"config": {"preset": "manual"},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert (
hass.states.get("climate.zen_01").attributes["preset_mode"]
== DECONZ_PRESET_MANUAL
)
# Event signals unknown preset
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "0",
"config": {"preset": "unsupported"},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.zen_01").attributes["preset_mode"] is None
# Verify service calls
thermostat_device = gateway.api.sensors["0"]
# Service set preset to HASS preset
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: "climate.zen_01", ATTR_PRESET_MODE: PRESET_COMFORT},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put", "/sensors/0/config", json={"preset": "comfort"}
)
# Service set preset to custom deCONZ preset
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: "climate.zen_01", ATTR_PRESET_MODE: DECONZ_PRESET_MANUAL},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put", "/sensors/0/config", json={"preset": "manual"}
)
# Service set preset to unsupported value
with patch.object(
thermostat_device, "_request", return_value=True
) as set_callback, pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: "climate.zen_01", ATTR_PRESET_MODE: "unsupported"},
blocking=True,
)
async def test_clip_climate_device(hass):
"""Test successful creation of sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(
hass,
options={CONF_ALLOW_CLIP_SENSOR: True},
get_state_response=data,
)
assert len(hass.states.async_all()) == 3
assert hass.states.get("climate.thermostat").state == HVAC_MODE_AUTO
assert hass.states.get("sensor.thermostat") is None
assert hass.states.get("sensor.thermostat_battery_level").state == "100"
assert hass.states.get("climate.clip_thermostat").state == HVAC_MODE_HEAT
# Disallow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: False}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("climate.clip_thermostat") is None
# Allow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: True}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 3
assert hass.states.get("climate.clip_thermostat").state == HVAC_MODE_HEAT
async def test_verify_state_update(hass):
"""Test that state update properly."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert hass.states.get("climate.thermostat").state == HVAC_MODE_AUTO
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"on": False},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == HVAC_MODE_AUTO
assert gateway.api.sensors["1"].changed_keys == {"state", "r", "t", "on", "e", "id"}
async def test_add_new_climate_device(hass):
"""Test that adding a new climate device works."""
config_entry = await setup_deconz_integration(hass)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 0
state_added_event = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": deepcopy(SENSORS["1"]),
}
gateway.api.event_handler(state_added_event)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("climate.thermostat").state == HVAC_MODE_AUTO
assert hass.states.get("sensor.thermostat_battery_level").state == "100"
| |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IAM Resource Policy Checker
---------------------------
When securing resources with iam policies, we want to parse and evaluate
the resource's policy for any cross account or public access grants that
are not intended.
In general, iam policies can be complex, and where possible using iam
simulate is preferrable, but requires passing the caller's arn, which
is not feasible when we're evaluating who the valid set of callers
are.
References
- IAM Policy Evaluation - http://goo.gl/sH5Dt5
- IAM Policy Reference - http://goo.gl/U0a06y
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import json
import six
from c7n.filters import Filter
from c7n.resolver import ValuesFrom
from c7n.utils import type_schema
def _account(arn):
# we could try except but some minor runtime cost, basically flag
# invalids values
if ':' not in arn:
return arn
return arn.split(':', 5)[4]
class PolicyChecker(object):
"""
checker_config:
- check_actions: only check one of the specified actions
- everyone_only: only check for wildcard permission grants
- allowed_accounts: permission grants to these accounts are okay
- whitelist_conditions: a list of conditions that are considered
sufficient enough to whitelist the statement.
"""
def __init__(self, checker_config):
self.checker_config = checker_config
# Config properties
@property
def allowed_accounts(self):
return self.checker_config.get('allowed_accounts', ())
@property
def everyone_only(self):
return self.checker_config.get('everyone_only', False)
@property
def check_actions(self):
return self.checker_config.get('check_actions', ())
@property
def whitelist_conditions(self):
return self.checker_config.get('whitelist_conditions', ())
# Policy statement handling
def check(self, policy_text):
if isinstance(policy_text, six.string_types):
policy = json.loads(policy_text)
else:
policy = policy_text
violations = []
for s in policy.get('Statement', ()):
if self.handle_statement(s):
violations.append(s)
return violations
def handle_statement(self, s):
if (all((self.handle_principal(s),
self.handle_effect(s),
self.handle_action(s))) and not self.handle_condition(s)):
return s
def handle_action(self, s):
if self.check_actions:
actions = s.get('Action')
actions = isinstance(actions, six.string_types) and (actions,) or actions
for a in actions:
if fnmatch.filter(self.check_actions, a):
return True
return False
return True
def handle_effect(self, s):
if s['Effect'] == 'Allow':
return True
def handle_principal(self, s):
if 'NotPrincipal' in s:
return True
if 'Principal' not in s:
return True
# Skip service principals
if 'Service' in s['Principal']:
s['Principal'].pop('Service')
if not s['Principal']:
return False
if isinstance(s['Principal'], six.string_types):
p = s['Principal']
else:
p = s['Principal']['AWS']
principal_ok = True
p = isinstance(p, six.string_types) and (p,) or p
for pid in p:
if pid == '*':
principal_ok = False
elif self.everyone_only:
continue
elif pid.startswith('arn:aws:iam::cloudfront:user'):
continue
else:
account_id = _account(pid)
if account_id not in self.allowed_accounts:
principal_ok = False
return not principal_ok
def handle_condition(self, s):
op, key, value = self.normalize_condition(s)
if not op:
return False
if key in self.whitelist_conditions:
return True
handler_name = "handle_%s" % key.replace('-', '_').replace(':', '_')
handler = getattr(self, handler_name, None)
if handler is None:
print("no handler:%s op:%s key:%s value:%s" % (
handler_name, op, key, value))
return
return not handler(s, op, key, value)
def normalize_condition(self, s):
if 'Condition' not in s:
return None, None, None
conditions = (
'StringEquals',
'StringEqualsIgnoreCase',
'StringLike',
'ArnEquals',
'ArnLike',
'IpAddress',
'NotIpAddress')
set_conditions = ('ForAllValues', 'ForAnyValues')
assert len(s.get('Condition').keys()) == 1, "Multiple conditions present in iam statement"
s_cond_op = list(s['Condition'].keys())[0]
if s_cond_op not in conditions:
for s in set_conditions:
if not s_cond_op.startswith(s_cond_op):
return None, None, None
assert len(s['Condition'][s_cond_op]) == 1, "Multiple keys on condition"
s_cond_key = list(s['Condition'][s_cond_op].keys())[0]
s_cond_value = s['Condition'][s_cond_op][s_cond_key]
s_cond_value = (
isinstance(s_cond_value, six.string_types) and (s_cond_value,) or s_cond_value)
return s_cond_op, s_cond_key.lower(), s_cond_value
# Condition handlers
# kms specific
def handle_kms_calleraccount(self, s, op, key, values):
return bool(set(map(_account, values)).difference(self.allowed_accounts))
# sns default policy
def handle_aws_sourceowner(self, s, op, key, values):
return bool(set(map(_account, values)).difference(self.allowed_accounts))
# s3 logging
def handle_aws_sourcearn(self, s, op, key, values):
return bool(set(map(_account, values)).difference(self.allowed_accounts))
def handle_aws_sourceip(self, s, op, key, values):
return False
class CrossAccountAccessFilter(Filter):
"""Check a resource's embedded iam policy for cross account access.
"""
schema = type_schema(
'cross-account',
# only consider policies that grant one of the given actions.
actions={'type': 'array', 'items': {'type': 'string'}},
# only consider policies which grant to *
everyone_only={'type': 'boolean'},
# disregard statements using these conditions.
whitelist_conditions={'type': 'array', 'items': {'type': 'string'}},
# white list accounts
whitelist_from=ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
policy_attribute = 'Policy'
annotation_key = 'CrossAccountViolations'
checker_factory = PolicyChecker
def process(self, resources, event=None):
self.everyone_only = self.data.get('everyone_only', False)
self.conditions = set(self.data.get(
'whitelist_conditions',
("aws:sourcevpce", "aws:sourcevpc", "aws:userid", "aws:username")))
self.actions = self.data.get('actions', ())
self.accounts = self.get_accounts()
self.checker = self.checker_factory(
{'allowed_accounts': self.accounts,
'check_actions': self.actions,
'everyone_only': self.everyone_only,
'whitelist_conditions': self.conditions})
return super(CrossAccountAccessFilter, self).process(resources, event)
def get_accounts(self):
owner_id = self.manager.config.account_id
accounts = set(self.data.get('whitelist', ()))
if 'whitelist_from' in self.data:
values = ValuesFrom(self.data['whitelist_from'], self.manager)
accounts = accounts.union(values.get_values())
accounts.add(owner_id)
return accounts
def get_resource_policy(self, r):
return r.get(self.policy_attribute, None)
def __call__(self, r):
p = self.get_resource_policy(r)
if p is None:
return False
violations = self.checker.check(p)
if violations:
r[self.annotation_key] = violations
return True
| |
#!/usr/bin/env python
"""GUI elements allowing launching and management of flows."""
import os
import StringIO
from grr.gui import plot_lib
from grr.gui import renderers
from grr.gui.plugins import crash_view
from grr.gui.plugins import fileview
from grr.gui.plugins import forms
from grr.gui.plugins import semantic
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import flow_runner
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import utils
class LaunchFlows(renderers.Splitter):
"""Launches a new flow."""
description = "Start new flows"
behaviours = frozenset(["Host"])
order = 10
left_renderer = "FlowTree"
top_right_renderer = "SemanticProtoFlowForm"
bottom_right_renderer = "FlowManagementTabs"
class FlowTree(renderers.TreeRenderer):
"""Show all flows in a tree.
Generated Javascript Events:
- flow_select(flow_path) - The full path for the flow name (category +
name).
"""
publish_select_queue = "flow_select"
# Only show flows in the tree that specify all of these behaviours in their
# behaviours attribute.
flow_behaviors_to_render = flow.FlowBehaviour("Client Flow")
def EnumerateCategories(self, path, request, flow_behaviors_to_render):
"""Search through all flows for categories starting with path."""
categories = set()
flows = set()
# Use an object for path manipulations.
path = rdfvalue.RDFURN(path)
for name, cls in flow.GRRFlow.classes.items():
# Flows without a category do not show up in the GUI.
if not getattr(cls, "category", None):
continue
# If a flow is tagged as AUTHORIZED_LABELS, the user must have the correct
# label to see it.
if cls.AUTHORIZED_LABELS:
try:
data_store.DB.security_manager.CheckUserLabels(
request.token.username, cls.AUTHORIZED_LABELS,
token=request.token)
except access_control.UnauthorizedAccess:
continue
# Skip if there are behaviours that are not supported by the class.
if not flow_behaviors_to_render.IsSupported(cls.behaviours):
continue
category = rdfvalue.RDFURN(cls.category)
if category == path:
flows.add((name, cls.friendly_name))
else:
relative_path = category.RelativeName(path)
# This category starts with this path
if relative_path is not None:
categories.add(relative_path.split("/")[0])
return categories, flows
def RenderBranch(self, path, request):
"""Renders tree leafs for flows."""
# Retrieve the user's GUI mode preferences.
self.user = request.user
try:
user_record = aff4.FACTORY.Open(
aff4.ROOT_URN.Add("users").Add(self.user), "GRRUser",
token=request.token)
user_preferences = user_record.Get(user_record.Schema.GUI_SETTINGS)
except IOError:
user_preferences = aff4.GRRUser.SchemaCls.GUI_SETTINGS()
flow_behaviors_to_render = (self.flow_behaviors_to_render +
user_preferences.mode)
categories, flows = self.EnumerateCategories(path, request,
flow_behaviors_to_render)
for category in sorted(categories):
self.AddElement(category)
for name, friendly_name in sorted(flows):
self.AddElement(name, behaviour="leaf", friendly_name=friendly_name)
class FlowManagementTabs(renderers.TabLayout):
"""Show information about the flows.
Listening Javascript Events:
- flow_select(flow_path) - A selection event on the tree informing us of the
flow path. The basename of flow_path is the name of the flow.
Internal State:
- flow_path - The category and name of the flow we display.
"""
names = ["Flow Information", "Current Running Flows"]
delegated_renderers = ["FlowInformation", "ListFlowsTable"]
tab_hash = "ft"
def Layout(self, request, response):
self.state = dict(flow_path=request.REQ.get("flow_path"),
client_id=request.REQ.get("client_id"))
response = super(FlowManagementTabs, self).Layout(request, response)
return self.CallJavascript(response, "FlowManagementTabs.Layout")
class FlowInformation(renderers.TemplateRenderer):
"""Displays information about the flow.
Post Parameters:
- flow_path: The category + flow name for use to display.
"""
layout_template = renderers.Template("""
<h3>{{ this.flow_name|escape }}</h3>
<p>{{ this.flow_doc|linebreaks }}</p>
<pre>
Prototype: {{ this.prototype|escape }}
{{ this.prototype_doc|escape }}
</pre>
<table class="table table-condensed table-bordered full-width fixed-columns">
<colgroup>
<col style="width: 20%" />
<col style="width: 60%" />
<col style="width: 20%" />
</colgroup>
<thead>
<tr>
<th class="ui-state-default">State</th>
<th class="ui-state-default">Description</th>
<th class="ui-state-default">Next States</th></tr>
</thead>
<tbody>
{% for state, doc, next in this.states %}
<tr><td class='state'>{{ state|escape }}</td>
<td class='description'>{{ doc|escape }}</td>
<td class='text'>{{ next|escape }}</td></tr>
{% endfor %}
</tbody>
</table>
""")
# This is prepended to flow args to eliminate clashes with other parameters.
arg_prefix = "v_"
def Layout(self, request, response):
"""Update the progress bar based on the progress reported."""
self.flow_name = request.REQ.get("flow_path", "").split("/")[-1]
try:
flow_class = flow.GRRFlow.classes[self.flow_name]
if not aff4.issubclass(flow_class, flow.GRRFlow):
return response
except KeyError:
return response
self.states = []
# Fill in information about each state
for state_method in flow_class.__dict__.values():
try:
next_states = state_method.next_states
# Only show the first line of the doc string.
try:
func_doc = state_method.func_doc.split("\n")[0].strip()
except AttributeError:
func_doc = ""
self.states.append((state_method.func_name,
func_doc, ", ".join(next_states)))
except AttributeError:
pass
# Now fill in information about each arg to this flow.
prototypes = []
for type_descriptor in flow_class.args_type.type_infos:
if not type_descriptor.hidden:
prototypes.append("%s" % (type_descriptor.name))
self.prototype = "%s(%s)" % (flow_class.__name__, ", ".join(prototypes))
self.flow_doc = flow_class.__doc__
return super(FlowInformation, self).Layout(request, response)
class SemanticProtoFlowForm(renderers.TemplateRenderer):
"""Render a flow based on its semantic information."""
layout_template = renderers.Template("""
<div class="FormBody" id="{{unique|escape}}">
{% if this.flow_found %}
<form id='form_{{unique|escape}}' class="form-horizontal FormData"
data-flow_path='{{this.flow_name|escape}}'
data-dom_node='{{id|escape}}'
>
{{this.form|safe}}
<hr/>
{{this.runner_form|safe}}
<div class="form-group">
<div class="col-sm-offset-2 col-sm-3" style="padding-top: 1em">
<button id='submit_{{unique|escape}}' class="btn btn-success Launch" >
Launch
</button>
</div>
</div>
</form>
{% else %}
Please Select a flow to launch from the tree on the left.
{% endif %}
</div>
<div id="contents_{{unique}}"></div>
""") + renderers.TemplateRenderer.help_template
ajax_template = renderers.Template("""
Launched Flow {{this.flow_name}} with the following args:<br>
<div>
{{this.args_html|safe}}
{{this.runner_args_html|safe}}
</div>
""")
context_help_url = "user_manual.html#_flows"
def _GetFlowName(self, request):
return os.path.basename(request.REQ.get("flow_path", ""))
def Layout(self, request, response):
"""Render the form for creating the flow args."""
self.flow_name = self._GetFlowName(request)
self.flow_cls = flow.GRRFlow.classes.get(self.flow_name)
if aff4.issubclass(self.flow_cls, flow.GRRFlow):
self.flow_found = True
self.form = forms.SemanticProtoFormRenderer(
self.flow_cls.GetDefaultArgs(token=request.token),
prefix="args").RawHTML(request)
self.runner_form = forms.SemanticProtoFormRenderer(
flow_runner.FlowRunnerArgs(flow_name=self.flow_name),
prefix="runner").RawHTML(request)
response = super(SemanticProtoFlowForm, self).Layout(request, response)
return self.CallJavascript(response, "SemanticProtoFlowForm.Layout",
renderer=self.__class__.__name__)
def RenderAjax(self, request, response):
"""Parse the flow args from the form and launch the flow."""
self.flow_name = self._GetFlowName(request)
self.client_id = request.REQ.get("client_id", None)
self.dom_node = request.REQ.get("dom_node")
flow_cls = flow.GRRFlow.classes.get(self.flow_name)
if flow_cls is not None:
self.args = forms.SemanticProtoFormRenderer(
flow_cls.args_type(), prefix="args").ParseArgs(request)
try:
self.args.Validate()
except ValueError as e:
return self.CallJavascript(
response, "SemanticProtoFlowForm.RenderAjaxError", error=str(e))
self.runner_args = forms.SemanticProtoFormRenderer(
flow_runner.FlowRunnerArgs(), prefix="runner_").ParseArgs(request)
self.runner_args.Validate()
self.flow_id = flow.GRRFlow.StartFlow(client_id=self.client_id,
flow_name=self.flow_name,
token=request.token,
args=self.args,
runner_args=self.runner_args)
self.args_html = semantic.FindRendererForObject(self.args).RawHTML(request)
self.runner_args_html = semantic.FindRendererForObject(
self.runner_args).RawHTML(request)
response = renderers.TemplateRenderer.Layout(
self, request, response, apply_template=self.ajax_template)
return self.CallJavascript(response, "SemanticProtoFlowForm.RenderAjax",
renderer=self.__class__.__name__,
dom_node=self.dom_node)
class FlowFormCancelAction(renderers.TemplateRenderer):
"""Handle submission of a Cancel Flow button press.
Post Parameters:
- flow_id: The flow to cancel.
"""
layout_template = renderers.Template("")
def Layout(self, request, response):
# We can't terminate flow directly through flow.GRRFlow.TerminateFlow as
# it requires writing to the datastore. We're not allowed to do it from
# the GUI. Therefore we use dedicated TerminateFlow flow.
flow.GRRFlow.StartFlow(
flow_name="TerminateFlow",
flow_urn=rdfvalue.RDFURN(request.REQ.get("flow_id")),
reason="Cancelled in GUI", token=request.token)
return super(FlowFormCancelAction, self).Layout(request, response)
class FlowStateIcon(semantic.RDFValueRenderer):
"""Render the flow state by using an icon."""
layout_template = renderers.Template("""
<div class="centered">
<img class='grr-icon grr-flow-icon'
src='/static/images/{{this.icon|escape}}'
title='{{this.title|escape}}'
/>
</div>""")
# Maps the flow states to icons we can show
state_map = {"TERMINATED": ("stock_yes.png", "Flow finished normally."),
"RUNNING": ("clock.png", "Flow is still running."),
"ERROR": ("nuke.png", "Flow terminated with an error."),
"CLIENT_CRASHED": (
"skull-icon.png",
"The client crashed while executing this flow.")}
icon = "question-red.png"
def Layout(self, request, response):
try:
self.icon, self.title = self.state_map[str(self.proxy)]
except (KeyError, ValueError):
pass
super(FlowStateIcon, self).Layout(request, response)
class ManageFlows(renderers.Splitter2Way):
"""View launched flows in a tree."""
description = "Manage launched flows"
behaviours = frozenset(["Host"])
order = 20
top_renderer = "ListFlowsTable"
bottom_renderer = "FlowTabView"
class FlowLogView(renderers.AngularDirectiveRenderer):
post_parameters = ["flow"]
directive = "grr-flow-log"
def Layout(self, request, response):
self.directive_args = {}
self.directive_args["flow-urn"] = request.REQ.get("flow")
return super(FlowLogView, self).Layout(request, response)
class FlowTabView(renderers.TabLayout):
"""Show various flow information in a Tab view.
Listening Javascript Events:
- flow_table_select(flow_aff4_path) - A selection event on the tree
informing us of the flow aff4 path. The basename of flow_path is the name
of the flow.
Internal State:
- flow_path - The category and name of the flow we display.
"""
names = ["Flow Information", "Requests", "Results", "Log", "Export"]
delegated_renderers = ["ShowFlowInformation", "FlowRequestView",
"FlowResultsView", "FlowLogView",
"FlowResultsExportView"]
tab_hash = "ftv"
def IsOutputExportable(self, flow_urn, token=None):
flow_obj = aff4.FACTORY.Open(flow_urn, token=token)
runner = flow_obj.GetRunner()
if getattr(runner, "output_urn", None):
return fileview.CollectionExportView.IsCollectionExportable(
runner.output_urn, token=token)
return False
def Layout(self, request, response):
req_flow = request.REQ.get("flow")
if req_flow:
self.state["flow"] = req_flow
client_id = request.REQ.get("client_id")
if client_id:
self.state["client_id"] = client_id
if req_flow and not self.IsOutputExportable(req_flow, token=request.token):
self.disabled = ["FlowResultsExportView"]
response = super(FlowTabView, self).Layout(request, response)
return self.CallJavascript(response, "FlowTabView.Layout",
renderer=self.__class__.__name__)
class FlowRequestView(renderers.TableRenderer):
"""View outstanding requests for a flow.
Post Parameters:
- client_id: The client to show the flows for.
- flow: The flow to show.
"""
post_parameters = ["flow", "client_id"]
def __init__(self, **kwargs):
super(FlowRequestView, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn("ID"))
self.AddColumn(semantic.RDFValueColumn("Request", width="100%"))
self.AddColumn(semantic.RDFValueColumn("Last Response", width="100%"))
def BuildTable(self, start_row, end_row, request):
session_id = request.REQ.get("flow", "")
if not session_id:
return
manager = queue_manager.QueueManager(token=request.token)
for i, (request, responses) in enumerate(
manager.FetchRequestsAndResponses(
rdfvalue.RDFURN(session_id))):
if request.id == 0:
continue
if i < start_row:
continue
if i > end_row:
break
# Tie up the request to each response to make it easier to render.
self.AddCell(i, "ID",
manager.FLOW_REQUEST_TEMPLATE % request.id)
self.AddCell(i, "Request", request)
if responses:
self.AddCell(i, "Last Response", responses[-1])
class FlowResultsView(semantic.RDFValueCollectionRenderer):
"""Displays a collection of flow's results."""
error_template = renderers.Template("""
<p>This flow hasn't stored any results yet.</p>
""")
context_help_url = "user_manual.html#_exporting_a_collection"
def Layout(self, request, response):
session_id = request.REQ.get("flow", "")
if not session_id:
return
flow_obj = aff4.FACTORY.Open(session_id, token=request.token)
runner = flow_obj.GetRunner()
# If there's collection in the runner, use it, otherwise display
# 'no results' message.
if getattr(runner, "output_urn", None):
return super(FlowResultsView, self).Layout(
request, response, aff4_path=runner.output_urn)
else:
return self.RenderFromTemplate(self.error_template, response,
unique=self.unique)
class FlowResultsExportView(fileview.CollectionExportView):
"""Displays export command to export flow's results."""
def Layout(self, request, response):
session_id = request.REQ.get("flow", "")
if not session_id:
return
flow_obj = aff4.FACTORY.Open(session_id, token=request.token)
runner = flow_obj.GetRunner()
if runner.output_urn is not None:
return super(FlowResultsExportView, self).Layout(
request, response, aff4_path=runner.output_urn)
class TreeColumn(semantic.RDFValueColumn, renderers.TemplateRenderer):
"""A specialized column which adds tree controls."""
template = renderers.Template("""
{% if this.branch %}
<span depth='{{this.depth|escape}}'
onclick='grr.table.toggleChildRows(this, "{{this.value|escapejs}}");'
style='margin-left: {{this.depth|escape}}em;'
class='tree_closed tree_branch'/>
{% else %}
<span depth='{{this.depth|escape}}' class='tree_leaf'
style='margin-left: {{this.depth|escape}}em;' />
{% endif %}
""")
def AddElement(self, index, element, depth, row_type):
self.rows[index] = (element, depth, row_type == "branch")
def RenderRow(self, index, request, row_options):
"""Renders the cell with additional tree controls."""
self.value, self.depth, self.branch = self.rows.get(index, ("", 0, "leaf"))
self.index = index
row_options["row_id"] = index
renderer = self.renderer
if renderer is None:
# What is the RDFValueRenderer for this attribute?
renderer = semantic.RDFValueRenderer.RendererForRDFValue(
self.value.__class__.__name__)
# Intantiate the renderer and return the HTML
if renderer:
result = renderer(self.value).RawHTML(request)
else:
result = utils.SmartStr(self.value)
return self.FormatFromTemplate(self.template, value=result,
index=index, this=self)
class FlowColumn(TreeColumn):
"""A specialized tree/column for sessions."""
template = """
<div id='cancel_{{this.index|escape}}' flow_id="{{this.value|escape}}"
style='float: left'>
</div>""" + TreeColumn.template + """
{{this.row_name|safe}}
"""
def __init__(self, *args, **kwargs):
super(FlowColumn, self).__init__(*args, **kwargs)
self.rows_names = {}
def AddElement(self, index, element, depth, row_type, row_name):
self.rows_names[index] = row_name
super(FlowColumn, self).AddElement(index, element, depth, row_type)
def RenderRow(self, index, request, row_options):
self.row_name = self.rows_names.get(index, "")
return super(FlowColumn, self).RenderRow(index, request, row_options)
class ListFlowsTable(renderers.TableRenderer):
"""List all flows for a client in a table.
Generated Javascript Events:
- flow_table_select(flow): The flow id that the user has selected.
Post Parameters:
- client_id: The client to show the flows for.
"""
selection_publish_queue = "flow_table_select"
with_toolbar = True
layout_template = """
{% if this.with_toolbar %}
<div id="toolbar_{{unique|escape}}" class="breadcrumb">
<li>
<button id="cancel_flow_{{unique|escape}}" title="Cancel Selected Flows"
class="btn btn-default" name="cancel_flow">
<img src="/static/images/editdelete.png" class="toolbar_icon">
</button>
</li>
</div>
{% endif %}
""" + renderers.TableRenderer.layout_template
def _GetCreationTime(self, obj):
try:
return obj.state.context.get("create_time")
except AttributeError:
return obj.Get(obj.Schema.LAST, 0)
def __init__(self, **kwargs):
super(ListFlowsTable, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn(
"State", renderer=FlowStateIcon, width="40px"))
self.AddColumn(FlowColumn("Path", renderer=semantic.SubjectRenderer,
width="20%"))
self.AddColumn(semantic.RDFValueColumn("Flow Name", width="20%"))
self.AddColumn(semantic.RDFValueColumn("Creation Time", width="20%"))
self.AddColumn(semantic.RDFValueColumn("Last Active", width="20%"))
self.AddColumn(semantic.RDFValueColumn("Creator", width="20%"))
def BuildTable(self, start_row, end_row, request):
"""Renders the table."""
depth = request.REQ.get("depth", 0)
flow_urn = self.state.get("value", request.REQ.get("value"))
if flow_urn is None:
client_id = request.REQ.get("client_id")
if not client_id: return
flow_urn = rdfvalue.ClientURN(client_id).Add("flows")
flow_root = aff4.FACTORY.Open(flow_urn, mode="r", token=request.token)
root_children_paths = sorted(flow_root.ListChildren(),
key=lambda x: x.age, reverse=True)
additional_rows = (depth == 0 and len(root_children_paths) > end_row)
if not depth:
root_children_paths = root_children_paths[start_row:end_row]
root_children = aff4.FACTORY.MultiOpen(
root_children_paths, token=request.token)
root_children = sorted(root_children, key=self._GetCreationTime,
reverse=True)
level2_children = dict(aff4.FACTORY.MultiListChildren(
[f.urn for f in root_children], token=request.token))
self.size = len(root_children)
row_index = start_row
for flow_obj in root_children:
if level2_children.get(flow_obj.urn, None):
row_type = "branch"
else:
row_type = "leaf"
row = {}
last = flow_obj.Get(flow_obj.Schema.LAST)
if last:
row["Last Active"] = last
if isinstance(flow_obj, aff4.AFF4Object.GRRFlow):
row_name = flow_obj.urn.Basename()
try:
if flow_obj.Get(flow_obj.Schema.CLIENT_CRASH):
row["State"] = "CLIENT_CRASHED"
else:
row["State"] = flow_obj.state.context.state
row["Flow Name"] = flow_obj.state.context.args.flow_name
row["Creation Time"] = flow_obj.state.context.create_time
row["Creator"] = flow_obj.state.context.creator
except AttributeError:
row["Flow Name"] = "Failed to open flow."
elif isinstance(flow_obj, aff4.AFF4Object.GRRHunt):
row_name = flow_obj.urn.Dirname()
row["Flow Name"] = "Hunt"
else:
# A logs collection, skip, it will be rendered separately
continue
self.columns[1].AddElement(row_index, flow_obj.urn, depth, row_type,
row_name)
self.AddRow(row, row_index)
row_index += 1
return additional_rows
def Layout(self, request, response):
response = super(ListFlowsTable, self).Layout(request, response)
return self.CallJavascript(
response, "ListFlowsTable.Layout",
selection_publish_queue=self.selection_publish_queue)
class ShowFlowInformation(fileview.AFF4Stats):
"""Display information about the flow.
Post Parameters:
- flow: The flow id we will display.
Internal State:
- client_id, flow
"""
selection_publish_queue = "flow_table_select"
historical_renderer = "HistoricalFlowView"
# Embed the regular AFF4Stats inside a container to allow scrolling
layout_template = renderers.Template("""
<div id="container_{{unique|escapejs}}">
{% if this.path %}
""" + str(fileview.AFF4Stats.layout_template) + """
<br/>
{% else %}
Please select a flow to manage from the above table.
{% endif %}
</div>
""")
def Layout(self, request, response):
"""Introspect the Schema for flow objects."""
try:
self.state["flow"] = session_id = request.REQ["flow"]
self.fd = aff4.FACTORY.Open(session_id, token=request.token,
age=aff4.ALL_TIMES)
self.classes = self.RenderAFF4Attributes(self.fd, request)
self.path = self.fd.urn
except (KeyError, IOError):
self.path = None
# Skip our parent's Layout method and install parent's javascript code.
response = super(fileview.AFF4Stats, self).Layout(request, response)
return self.CallJavascript(response, "AFF4Stats.Layout",
historical_renderer=self.historical_renderer,
historical_renderer_state=self.state)
class HistoricalFlowView(fileview.HistoricalView):
"""View historical attributes for the flow."""
def Layout(self, request, response):
self.state = dict(flow=request.REQ.get("flow"),
attribute=request.REQ.get("attribute"))
self.AddColumn(semantic.RDFValueColumn(self.state["attribute"]))
return renderers.TableRenderer.Layout(self, request, response)
def BuildTable(self, start_row, end_row, request):
"""Populate the table with attribute values."""
flow_name = request.REQ.get("flow")
attribute_name = request.REQ.get("attribute")
if attribute_name is None:
return
self.AddColumn(semantic.RDFValueColumn(attribute_name))
fd = aff4.FACTORY.Open(flow_name, token=request.token, age=aff4.ALL_TIMES)
return self.BuildTableFromAttribute(attribute_name, fd, start_row, end_row)
class FlowPBRenderer(semantic.RDFProtoRenderer):
"""Format the FlowPB protobuf."""
classname = "Flow"
name = "Flow Protobuf"
backtrace_template = renderers.Template("""
<div id='hidden_pre_{{name|escape}}'>
<ins class='fg-button ui-icon ui-icon-minus'/>
{{error_msg|escape}}
<div class='contents'>
<pre>{{value|escape}}</pre>
</div>
</div>
""")
def RenderBacktrace(self, descriptor, value):
error_msg = value.rstrip().split("\n")[-1]
response = self.FormatFromTemplate(self.backtrace_template, value=value,
name=descriptor.name,
error_msg=error_msg)
return self.CallJavascript(response, "FlowPBRenderer.RenderBacktrace",
name=descriptor.name)
# Pretty print these special fields.
translator = dict(
backtrace=RenderBacktrace,
pickle=semantic.RDFProtoRenderer.Ignore,
children=semantic.RDFProtoRenderer.Ignore,
network_bytes_sent=semantic.RDFProtoRenderer.HumanReadableBytes)
class FlowNotificationRenderer(semantic.RDFValueRenderer):
"""Renders notifications inside the FlowRenderer."""
classname = "Notification"
# Note here that following href e.g. right click new tab will give a fresh URL
# but clicking will maintain state of other tabs.
layout_template = renderers.Template("""
{% if this.proxy.type == "ViewObject" %}
<a id="{{unique}}" href="/#{{this.BuildHash|escape}}"
target_hash="{{this.BuildHash|escape}}">
{{this.proxy.subject|escape}}</a>
{% endif %}
{{this.proxy.message|escape}}
""")
def BuildHash(self):
"""Build hash string to navigate to the appropriate location."""
return renderers.ViewNotifications.BuildHashFromNotification(self.proxy)
def Layout(self, request, response):
response = super(FlowNotificationRenderer, self).Layout(request, response)
return self.CallJavascript(response, "FlowNotificationRenderer.Layout")
class ClientCrashesRenderer(crash_view.ClientCrashCollectionRenderer):
"""View launched flows in a tree."""
description = "Crashes"
behaviours = frozenset(["HostAdvanced"])
order = 50
def Layout(self, request, response):
client_id = request.REQ.get("client_id")
self.crashes_urn = rdfvalue.ClientURN(client_id).Add("crashes")
super(ClientCrashesRenderer, self).Layout(request, response)
class ProgressGraphRenderer(renderers.ImageDownloadRenderer):
def Content(self, request, _):
"""Generates the actual image to display."""
flow_id = request.REQ.get("flow_id")
flow_obj = aff4.FACTORY.Open(flow_id, age=aff4.ALL_TIMES,
token=request.token)
log = list(flow_obj.GetValuesForAttribute(flow_obj.Schema.LOG))
create_time = flow_obj.state.context.create_time / 1000000
plot_data = [(int(x.age) / 1000000, int(str(x).split(" ")[1]))
for x in log if "bytes" in str(x)]
plot_data.append((create_time, 0))
plot_data = sorted([(x - create_time, y) for (x, y) in plot_data])
x = [a for (a, b) in plot_data]
y = [b for (a, b) in plot_data]
params = {"backend": "png"}
plot_lib.plt.rcParams.update(params)
plot_lib.plt.figure(1)
plot_lib.plt.clf()
plot_lib.plt.plot(x, y)
plot_lib.plt.title("Progress for flow %s" % flow_id)
plot_lib.plt.xlabel("Time (s)")
plot_lib.plt.ylabel("Bytes downloaded")
plot_lib.plt.grid(True)
buf = StringIO.StringIO()
plot_lib.plt.savefig(buf)
buf.seek(0)
return buf.read()
class GlobalLaunchFlows(renderers.Splitter):
"""Launches flows that apply across clients."""
description = "Start Global Flows"
behaviours = frozenset(["General"])
order = 10
left_renderer = "GlobalFlowTree"
top_right_renderer = "SemanticProtoFlowForm"
bottom_right_renderer = "FlowManagementTabs"
class GlobalFlowTree(FlowTree):
"""Show flows that work across clients."""
publish_select_queue = "flow_select"
flow_behaviors_to_render = flow.FlowBehaviour("Global Flow")
class GlobExpressionListFormRenderer(forms.RepeatedFieldFormRenderer):
type = rdfvalue.GlobExpression
context_help_url = "user_manual.html#_specifying_file_paths"
class GlobExpressionFormRenderer(forms.ProtoRDFValueFormRenderer):
"""A renderer for glob expressions with autocomplete."""
type = rdfvalue.GlobExpression
layout_template = ("""<div class="form-group">
""" + forms.TypeDescriptorFormRenderer.default_description_view + """
<div class="controls">
<input id='{{this.prefix}}'
type=text
{% if this.default %}
value='{{ this.default|escape }}'
{% endif %}
onchange="grr.forms.inputOnChange(this)"
class="form-control unset input-xxlarge"/>
</div>
</div>
""")
def Layout(self, request, response):
self.completions = rdfvalue.KnowledgeBase().GetKbFieldNames()
response = super(GlobExpressionFormRenderer, self).Layout(request, response)
return self.CallJavascript(
response, "GlobExpressionFormRenderer.Layout", prefix=self.prefix,
completions=self.completions)
class FileFinderConditionFormRenderer(forms.UnionMultiFormRenderer):
"""Renders a single option in a list of conditions."""
type = rdfvalue.FileFinderCondition
union_by_field = "condition_type"
class FileFinderConditionListFormRenderer(forms.RepeatedFieldFormRenderer):
"""Renders multiple conditions. Doesn't display a "default" condition."""
type = rdfvalue.FileFinderCondition
# We want list of conditions to be empty by default.
add_element_on_first_show = False
class FileFinderActionFormRenderer(forms.UnionMultiFormRenderer):
"""Renders a file finder action selector."""
type = rdfvalue.FileFinderAction
union_by_field = "action_type"
class MemoryCollectorConditionFormRenderer(forms.UnionMultiFormRenderer):
"""Renders a single option in a list of conditions."""
type = rdfvalue.MemoryCollectorCondition
union_by_field = "condition_type"
class MemoryCollectorConditionListFormRenderer(forms.RepeatedFieldFormRenderer):
"""Renders multiple conditions. Doesn't display a "default" condition."""
type = rdfvalue.MemoryCollectorCondition
# We want list of conditions to be empty by default.
add_element_on_first_show = False
class MemoryCollectorDumpOptionFormRenderer(forms.UnionMultiFormRenderer):
"""Renders a memory collector dump option selector."""
type = rdfvalue.MemoryCollectorDumpOption
union_by_field = "option_type"
class MemoryCollectorActionFormRenderer(forms.UnionMultiFormRenderer):
"""Renders a memory collector action selector."""
type = rdfvalue.MemoryCollectorAction
union_by_field = "action_type"
class RegistryFinderConditionFormRenderer(forms.UnionMultiFormRenderer):
"""Renders a single option in a list of conditions."""
type = rdfvalue.RegistryFinderCondition
union_by_field = "condition_type"
class RegistryFinderConditionListFormRenderer(forms.RepeatedFieldFormRenderer):
"""Renders multiple conditions. Doesn't display a "default" condition."""
type = rdfvalue.RegistryFinderCondition
# We want list of conditions to be empty by default.
add_element_on_first_show = False
class RegularExpressionFormRenderer(forms.ProtoRDFValueFormRenderer):
type = rdfvalue.RegularExpression
context_help_url = "user_manual.html#_regex_matches"
class LiteralExpressionFormRenderer(forms.BinaryStringTypeFormRenderer):
type = rdfvalue.LiteralExpression
context_help_url = "user_manual.html#_literal_matches"
| |
"""
DO NOT WRITE ANY NEW FUNCTIONALITY BASED ON THIS FILE
This is being kept around only to support legacy reports
"""
from django.template.context import Context
from django.template.loader import render_to_string
import pytz
import warnings
from corehq.apps.programs.models import Program
from corehq.apps.reports import util
from corehq.apps.groups.models import Group
from corehq.apps.reports.filters.users import get_user_toggle
from corehq.apps.reports.models import HQUserType
from django.utils.translation import ugettext_noop
from django.utils.translation import ugettext as _
import uuid
from corehq.apps.users.models import WebUser
class ReportField(object):
slug = ""
template = ""
is_cacheable = False
def __init__(self, request, domain=None, timezone=pytz.utc, parent_report=None):
warnings.warn(
"ReportField (%s) is deprecated. Use ReportFilter instead." % (
self.__class__.__name__
),
DeprecationWarning,
)
self.context = Context()
self.request = request
self.domain = domain
self.timezone = timezone
self.parent_report = parent_report
def render(self):
if not self.template: return ""
self.context["slug"] = self.slug
self.update_context()
return render_to_string(self.template, self.context)
def update_context(self):
"""
If your select field needs some context (for example, to set the default) you can set that up here.
"""
pass
class ReportSelectField(ReportField):
slug = "generic_select"
name = ugettext_noop("Generic Select")
template = "reports/dont_use_fields/select_generic.html"
default_option = ugettext_noop("Select Something...")
options = [dict(val="val", text="text")]
cssId = "generic_select_box"
cssClasses = "span4"
selected = None
hide_field = False
as_combo = False
placeholder = ''
help_text = ''
def __init__(self, *args, **kwargs):
super(ReportSelectField, self).__init__(*args, **kwargs)
# need to randomize cssId so knockout bindings won't clobber each other
# when multiple select controls on screen at once
nonce = uuid.uuid4().hex[-12:]
self.cssId = '%s-%s' % (self.cssId, nonce)
def update_params(self):
self.selected = self.request.GET.get(self.slug)
def update_context(self):
self.update_params()
self.context['hide_field'] = self.hide_field
self.context['help_text'] = self.help_text
self.context['select'] = dict(
options=self.options,
default=self.default_option,
cssId=self.cssId,
cssClasses=self.cssClasses,
label=self.name,
selected=self.selected,
use_combo_box=self.as_combo,
placeholder=self.placeholder,
)
class FilterUsersField(ReportField):
# TODO: move all this to UserTypeFilter
slug = "ufilter"
template = "reports/dont_use_fields/filter_users.html"
always_show_filter = False
can_be_empty = False
def update_context(self):
toggle, show_filter = self.get_user_filter(self.request)
self.context['show_user_filter'] = show_filter
self.context['toggle_users'] = toggle
self.context['can_be_empty'] = self.can_be_empty
@classmethod
def get_user_filter(cls, request):
return get_user_toggle(request)
class SelectMobileWorkerMixin(object):
slug = "select_mw"
name = ugettext_noop("Select Mobile Worker")
@classmethod
def get_default_text(cls, user_filter, default_option=None):
default = default_option or cls.default_option
if user_filter[HQUserType.ADMIN].show or \
user_filter[HQUserType.DEMO_USER].show or user_filter[HQUserType.UNKNOWN].show:
default = _('%s & Others') % _(default)
return default
class SelectMobileWorkerField(SelectMobileWorkerMixin, ReportField):
template = "reports/dont_use_fields/select_mobile_worker.html"
default_option = ugettext_noop("All Mobile Workers")
filter_users_field_class = FilterUsersField
def __init__(self, request, domain=None, timezone=pytz.utc, parent_report=None, filter_users_field_class=None):
super(SelectMobileWorkerField, self).__init__(request, domain, timezone, parent_report)
if filter_users_field_class:
self.filter_users_field_class = filter_users_field_class
def update_params(self):
pass
def update_context(self):
self.user_filter, _ = self.filter_users_field_class.get_user_filter(self.request)
self.individual = self.request.GET.get('individual', '')
self.default_option = self.get_default_text(self.user_filter)
self.users = util.user_list(self.domain)
self.update_params()
self.context['field_name'] = self.name
self.context['default_option'] = self.default_option
self.context['users'] = self.users
self.context['individual'] = self.individual
class SelectFilteredMobileWorkerField(SelectMobileWorkerField):
"""
This is a little field for use when a client really wants to filter by
individuals from a specific group. Since by default we still want to
show all the data, no filtering is done unless the special group filter
is selected.
"""
slug = "select_filtered_mw"
name = ugettext_noop("Select Mobile Worker")
template = "reports/dont_use_fields/select_filtered_mobile_worker.html"
default_option = ugettext_noop("All Mobile Workers...")
# Whether to display both the default option and "Only <group> Mobile
# Workers" or just the default option (useful when using a single
# group_name and changing default_option to All <group> Workers)
show_only_group_option = True
group_names = []
def update_params(self):
if not self.individual:
self.individual = self.request.GET.get('filtered_individual', '')
self.users = []
self.group_options = []
for group in self.group_names:
filtered_group = Group.by_name(self.domain, group)
if filtered_group:
if self.show_only_group_option:
self.group_options.append(dict(group_id=filtered_group._id,
name=_("Only %s Mobile Workers") % group))
self.users.extend(filtered_group.get_users(is_active=True, only_commcare=True))
def update_context(self):
super(SelectFilteredMobileWorkerField, self).update_context()
self.context['users'] = self.users_to_options(self.users)
self.context['group_options'] = self.group_options
@staticmethod
def users_to_options(user_list):
return [dict(val=user.user_id,
text=user.raw_username,
is_active=user.is_active) for user in user_list]
class BooleanField(ReportField):
slug = "checkbox"
label = "hello"
template = "reports/partials/checkbox.html"
def update_context(self):
self.context['label'] = self.label
self.context[self.slug] = self.request.GET.get(self.slug, False)
self.context['checked'] = self.request.GET.get(self.slug, False)
class StrongFilterUsersField(FilterUsersField):
"""
Version of the FilterUsersField that always actually uses and shows this filter
When using this field:
use SelectMobileWorkerFieldHack instead of SelectMobileWorkerField
if using ProjectReportParametersMixin make sure filter_users_field_class is set to this
"""
always_show_filter = True
can_be_empty = True
class UserOrGroupField(ReportSelectField):
"""
To Use: Subclass and specify what the field options should be
"""
slug = "view_by"
name = ugettext_noop("View by Users or Groups")
cssId = "view_by_select"
cssClasses = "span2"
default_option = "Users"
def update_params(self):
self.selected = self.request.GET.get(self.slug, '')
self.options = [{'val': 'groups', 'text': 'Groups'}]
class SelectProgramField(ReportSelectField):
slug = "program"
name = ugettext_noop("Program")
cssId = "program_select"
default_option = 'All'
def update_params(self):
self.selected = self.request.GET.get('program')
user = WebUser.get_by_username(str(self.request.user))
if not self.selected and \
self.selected != '' and \
user.get_domain_membership(self.domain):
self.selected = user.get_domain_membership(self.domain).program_id
self.programs = Program.by_domain(self.domain)
opts = [dict(val=program.get_id, text=program.name) for program in self.programs]
self.options = opts
class GroupFieldMixin():
slug = "group"
name = ugettext_noop("Group")
cssId = "group_select"
class ReportMultiSelectField(ReportSelectField):
template = "reports/dont_use_fields/multiselect_generic.html"
selected = []
# auto_select
default_option = []
# enfore as_combo = False ?
def update_params(self):
self.selected = self.request.GET.getlist(self.slug) or self.default_option
class MultiSelectGroupField(GroupFieldMixin, ReportMultiSelectField):
default_option = ['_all']
placeholder = 'Click to select groups'
help_text = "Start typing to select one or more groups"
@property
def options(self):
self.groups = Group.get_reporting_groups(self.domain)
opts = [dict(val=group.get_id, text=group.name) for group in self.groups]
opts.insert(0, {'text': 'All', 'val': '_all'})
return opts
| |
import pygame
import math
import sys
import background
import tower
import process_ev
import player1
import other
import globalz
import button
import inputbox
import Grid
import anim
import database
import time
class Game:
ina = 0
won = 0
def __init__(self):
# Resolution
self.width = 1366
self.height = 768
self.resolution = (self.width,self.height)
#self.fullscreen = pygame.FULLSCREEN
pygame.init() # Makes pygame work
# Set the resolution
self.screen = pygame.display.set_mode(self.resolution)
# Set Title
self.caption = pygame.display.set_caption('Opseilen!')
# Set default font
self.font = pygame.font.Font(None, 20)
# Player draw and update key
self.Is1Down = False
self.Is2Down = False
self.Is3Down = False
self.Is4Down = False
self.RedGridTop = Grid.RedGridTop(self, (float(((self.width / 3) + ((self.width / 3) / 8))- 57.5)), ((float((self.height) - (self.height / 40)))-450))
self.YellowGridTop = Grid.YellowGridTop(self, (float(((self.width / 3) + ((self.width / 3) / 8))- 57.5)+ 450), ((float((self.height) - (self.height / 40)))- 450))
self.BlueGridTop = Grid.BlueGridTop(self, (float(((self.width / 3) + ((self.width / 3) / 8))- 57.5)- 225), ((float((self.height) - (self.height / 40)))- 450))
self.GreenGridTop = Grid.GreenGridTop(self, (float(((self.width / 3) + ((self.width / 3) / 8))- 57.5)+ 225), ((float((self.height) - (self.height / 40)))- 450))
self.RedGrid = Grid.RedGrid(self, (float(((self.width / 3) + ((self.width / 3) / 8))- 115)), (float((self.height) - (self.height / 40))))
self.YellowGrid = Grid.YellowGrid(self, (float(((self.width / 3) + ((self.width / 3) / 8))- 115)+ 450), (float((self.height) - (self.height / 40))))
self.BlueGrid = Grid.BlueGrid(self, (float(((self.width / 3) + ((self.width / 3) / 8))- 115)- 225), (float((self.height) - (self.height / 40))))
self.GreenGrid = Grid.GreenGrid(self, (float(((self.width / 3) + ((self.width / 3) / 8))- 115)+ 225), (float(self.height) - (self.height / 40)))
# Player update key
self.Is1Update = False
self.Is2Update = False
self.Is3Update = False
self.Is4Update = False
# Create front layers
self.frontlayer1 = background.frontlayer1(0, (0 + float(self.height / 20)), float(((self.width / 3) / 2) - self.width * 0.002), self.height)
self.frontlayer2 = background.frontlayer2(float(((self.width / 3) * 2) + ((self.width / 3) / 2) + self.width * 0.002), (0 + float(self.height / 20)), float(self.width / 3), self.height)
self.frontlayer3 = background.frontlayer3(0, (float(self.height - (self.height / 20))) , self.width, (float(self.height / 20)))
#Create the tower
self.Tower_red = tower.Tower_red(float(self.width / 3), (0 + float(self.height / 20)), (float(self.width / 3) / 2), (float(self.height - (self.height / 10))))
self.Tower_green = tower.Tower_green(float(self.width / 3) + (float(self.width / 3) / 2), (0 + float(self.height / 20)), (float(self.width / 3) / 2), (float(self.height - (self.height / 10))))
self.Tower_blue = tower.Tower_blue(float((self.width / 3) / 2), (0 + float(self.height / 20)), (float(self.width / 3) / 2), (float(self.height - (self.height / 10))))
self.Tower_yellow = tower.Tower_yellow(float((self.width / 3) * 2), (0 + float(self.height / 20)), (float(self.width / 3) / 2), (float(self.height - (self.height / 10))))
self.player_1 = player1.player_1((float((self.width / 3) + ((self.width / 3) / 8))), (float(self.height) - (self.height / 40)),
(float(self.width * 0.015)), (float(self.width * 0.005)),float((self.width / 3) / 2),
float(self.width / 3),
float(self.width / 3) + (float(self.width / 3) / 2),
float((self.width / 3) * 2),
float((self.width / 3) / 4),
float((self.height / 17)),
float((self.width / 3) + ((self.width / 3) / 4)),
float((self.width / 3) / 2),
(float((self.width / 3) + ((self.width / 3) / 8)) - (float((self.width / 3) / 4))),
(float((self.width / 3) + ((self.width / 3) / 8)) + (float((self.width / 3) / 4)) * 4),
float((self.width / 3) / 8, ),(float(self.width * 0.0125) * 2), (float(self.width * 0.0125) * 2),1)
self.player_2 = player1.player_1((float((self.width / 3) + ((self.width / 3) / 8)) - (float(self.width * 0.0125))),
(float(self.height) - (self.height / 40)) - (float(self.width * 0.0125)), (float(self.width * 0.015)),
(float(self.width * 0.005)), float((self.width / 3) / 2),
float(self.width / 3),
float(self.width / 3) + (float(self.width / 3) / 2),
float((self.width / 3) * 2),
float((self.width / 3) / 4),
float((self.height / 17)),
float((self.width / 3) + ((self.width / 3) / 4) - (float(self.width * 0.0125))),
float((self.width / 3) / 2),
(float((self.width / 3) + ((self.width / 3) / 8)) - (float((self.width / 3) / 4)) - (float(self.width * 0.0125) * 2)),
(float((self.width / 3) + ((self.width / 3) / 8)) + (float((self.width / 3) / 4)) * 4),
float((self.width / 3) / 8, ),(float(self.width * 0.0125) * 2), (float(self.width * 0.0125) * 2),2)
self.player_3 = player1.player_1((float((self.width / 3) + ((self.width / 3) / 8))), (float(self.height) - (self.height / 40)),
(float(self.width * 0.015)), 0,float((self.width / 3) / 2),
float(self.width / 3),
float(self.width / 3) + (float(self.width / 3) / 2),
float((self.width / 3) * 2),
float((self.width / 3) / 4),
float((self.height / 17)),
float((self.width / 3) + ((self.width / 3) / 4)),
float((self.width / 3) / 2),
(float((self.width / 3) + ((self.width / 3) / 8)) - (float((self.width / 3) / 4))),
(float((self.width / 3) + ((self.width / 3) / 8)) + (float((self.width / 3) / 4)) * 4),
float((self.width / 3) / 8, ),(float(self.width * 0.0125) * 2), (float(self.width * 0.0125) * 2),3)
self.player_4 = player1.player_1((float((self.width / 3) + ((self.width / 3) / 8)) - (float(self.width * 0.0125))),
(float(self.height) - (self.height / 40)) - (float(self.width * 0.0125)), (float(self.width * 0.015)),
0, float((self.width / 3) / 2),
float(self.width / 3),
float(self.width / 3) + (float(self.width / 3) / 2),
float((self.width / 3) * 2),
float((self.width / 3) / 4),
float((self.height / 17)),
float((self.width / 3) + ((self.width / 3) / 4) - (float(self.width * 0.0125))),
float((self.width / 3) / 2),
(float((self.width / 3) + ((self.width / 3) / 8)) - (float((self.width / 3) / 4)) - (float(self.width * 0.0125) * 2)),
(float((self.width / 3) + ((self.width / 3) / 8)) + (float((self.width / 3) / 4)) * 4),
float((self.width / 3) / 8, ),(float(self.width * 0.0125) * 2), (float(self.width * 0.0125) * 2),4)
#Create Dice
self.anima = anim.Animation(1,2,self)
#smenu variable
self.smenu_active = False
# Update logic of game
def update(self):
self.frontlayer1.update()
self.frontlayer2.update()
self.frontlayer3.update()
print(other.dice.dice_result)
tower.Tower_red.update(self.Tower_red)
tower.Tower_green.update(self.Tower_green)
tower.Tower_blue.update(self.Tower_blue)
tower.Tower_yellow.update(self.Tower_yellow)
Grid.RedGrid.update(self.RedGrid)
Grid.YellowGrid.update(self.YellowGrid)
Grid.BlueGrid.update(self.BlueGrid)
Grid.GreenGrid.update(self.GreenGrid)
Grid.RedGridTop.update(self.RedGridTop)
Grid.YellowGridTop.update(self.YellowGridTop)
Grid.BlueGridTop.update(self.BlueGridTop)
Grid.GreenGridTop.update(self.GreenGridTop)
while Game.ina == 0:
other.dice.dice_roll()
time.sleep(0.3)
Game.ina = 1
if Game.ina == 1:
self.anima.update()
time.sleep(0.1)
Game.ina = 2
self.anima.update()
keys = pygame.key.get_pressed()
if other.turns.current_player == 1:
if not self.Is1Update:
self.Is2Update = True
self.Is3Update = True
self.Is4Update = True
if keys[pygame.K_UP]:
self.player_1.up()
elif keys[pygame.K_DOWN]:
self.player_1.down()
elif keys[pygame.K_LEFT]:
self.player_1.left()
elif keys[pygame.K_RIGHT]:
self.player_1.right()
else:
if not keys[pygame.K_1]:
self.Is1Update = False
if other.turns.current_player == 2:
if not self.Is2Update:
self.Is1Update = True
self.Is3Update = True
self.Is4Update = True
if keys[pygame.K_UP]:
self.player_2.up()
elif keys[pygame.K_DOWN]:
self.player_2.down()
elif keys[pygame.K_LEFT]:
self.player_2.left()
elif keys[pygame.K_RIGHT]:
self.player_2.right()
else:
if not keys[pygame.K_2]:
self.Is2Update = False
if other.turns.current_player == 3:
if not self.Is3Update:
self.Is1Update = True
self.Is2Update = True
self.Is4Update = True
if keys[pygame.K_UP]:
self.player_3.up()
elif keys[pygame.K_DOWN]:
self.player_3.down()
elif keys[pygame.K_LEFT]:
self.player_3.left()
elif keys[pygame.K_RIGHT]:
self.player_3.right()
else:
if not keys[pygame.K_3]:
self.Is3Update = False
if other.turns.current_player == 4:
if not self.Is4Update:
self.Is1Update = True
self.Is2Update = True
self.Is3Update = True
if keys[pygame.K_UP]:
self.player_4.up()
elif keys[pygame.K_DOWN]:
self.player_4.down()
elif keys[pygame.K_LEFT]:
self.player_4.left()
elif keys[pygame.K_RIGHT]:
self.player_4.right()
else:
if not keys[pygame.K_4]:
self.Is4Update = False
if keys[pygame.K_n]:
other.turns.turn()
def draw(self):
# Clearing the screen
self.screen.fill((0, 0, 0))
button.update(self)
# Draw elements
self.frontlayer1.draw(self.screen)
self.frontlayer2.draw(self.screen)
self.frontlayer3.draw(self.screen)
#Draw tower
tower.Tower_red.draw(self.Tower_red)
tower.Tower_green.draw(self.Tower_green)
tower.Tower_blue.draw(self.Tower_blue)
tower.Tower_yellow.draw(self.Tower_yellow)
Grid.RedGrid.draw()
Grid.YellowGrid.draw()
Grid.BlueGrid.draw()
Grid.GreenGrid.draw()
Grid.RedGridTop.draw()
Grid.YellowGridTop.draw()
Grid.BlueGridTop.draw()
Grid.GreenGridTop.draw()
## Draw Players
keys = pygame.key.get_pressed()
if keys[pygame.K_1] and not self.Is1Down:
if other.turns.player1_name == "":
other.turns.naming(1)
database.update(other.turns.player1_name)
self.Is1Down = True
self.player_1.drawcircle(self.screen)
else:
if self.Is1Down:
self.player_1.drawcircle(self.screen)
if keys[pygame.K_2] and not self.Is2Down:
if other.turns.player2_name == "":
other.turns.naming(2)
database.update(other.turns.player2_name)
self.Is2Down = True
self.player_2.drawcube(self.screen)
else:
if self.Is2Down:
self.player_2.drawcube(self.screen)
if keys[pygame.K_3] and not self.Is3Down:
if other.turns.player3_name == "":
other.turns.naming(3)
database.update(other.turns.player3_name)
self.Is3Down = True
self.player_3.drawcircle(self.screen)
else:
if self.Is3Down:
self.player_3.drawcircle(self.screen)
if keys[pygame.K_4] and not self.Is4Down:
if other.turns.player4_name == "":
other.turns.naming(4)
database.update(other.turns.player4_name)
self.Is4Down = True
self.player_4.drawcube(self.screen)
else:
if self.Is4Down:
self.player_4.drawcube(self.screen)
# (game, x, y, width, height, text, size, backcolor, frontcolor, callback):
# button.draw(game, 45, game.height * 0.9, 100, 32, "Start", 20, (0,0,0), (255,255,255), lambda game: start_chosen(game, 1))
#button.draw(self,25,100,150,25,"open sport",20,(0,0,0),(255,255,255), lambda game: other.questions.question_open(globalz.o_sport,globalz.ori_o_sport,globalz.key_o_sport))
button.draw(self,25,100,150,25,"open settings menu",20,(0,0,0),(255,255,255), lambda game: self.smenu())
if not other.turns.match_started:
button.draw(self,25,150,150,25,"Player 1 goes first",20,(0,0,0),(255,255,255), lambda game: other.turns.firstturn(1))
button.draw(self,25,200,150,25,"Player 2 goes first",20,(0,0,0),(255,255,255), lambda game: other.turns.firstturn(2))
button.draw(self,25,250,150,25,"Player 3 goes first",20,(0,0,0),(255,255,255), lambda game: other.turns.firstturn(3))
button.draw(self,25,300,150,25,"Player 4 goes first",20,(0,0,0),(255,255,255), lambda game: other.turns.firstturn(4))
#button.draw(self,25,350,150,25,"mc entertainment",20,(0,0,0),(255,255,255), lambda game: other.questions.question_mc(globalz.mc_entert,globalz.ori_mc_entert,globalz.ans_mc_entert))
#button.draw(self,25,400,150,25,"mc history",20,(0,0,0),(255,255,255), lambda game: other.questions.question_mc(globalz.mc_history,globalz.ori_mc_history,globalz.ans_mc_history))
#button.draw(self,25,450,150,25,"mc geograhpy",20,(0,0,0),(255,255,255), lambda game: other.questions.question_mc(globalz.mc_geo,globalz.ori_mc_geo,globalz.ans_mc_geo))
# changed backgroundcolor of dice
button.draw(self,25,550,165,25,"ROLL THE DICE",20,(95,158,160),(255,255,255), lambda game: other.dice.dice_roll())
button.draw(self,25,550,165,25,"ROLL THE DICE",20,(95,158,160),(255,255,255), lambda game: anim.Animation.update())
# rect to clarify where the answer of questions is shown
button.draw(self,0.85*self.width,275,200,25,"RIGHT OR WRONG ANSWER?",20,(0,0,0),(255,255,255), lambda game: None)
if other.questions.correct == 1:
button.draw(self,0.85*self.width,300,200,25,"YOUR ANSWER IS CORRECT!",20,(124,252,0),(0,0,0), lambda game: None)
elif other.questions.correct == 0:
button.draw(self,0.85*self.width,300,200,25,"YOUR ANSWER IS WRONG",20,(255,0,0),(0,0,0), lambda game: None)
# import MainMenuNew
# #save button
# button.draw(self, 0.85*self.width,50,200,25,"SAVE THE GAME",20,(0,0,0),(255,255,255), lambda game: other.SaveGame.save())
# if other.SaveGame.saved:
# button.draw(self, 0.85*self.width,80,200,25,"GAME SAVED",20,(255,255,255),(124,252,0), lambda game: None)
# time.sleep(0.5)
# other.SaveGame.saved = False
#displays the result of the dice roll
#button.draw(self,25,600,150,25,str(other.dice.dice_result),20,(95,158,160),(255,255,255), lambda game: None)
#displays the name of the current player
button.draw(self,0.85*self.width,70,200,30,"CURRENT PLAYER IS",25,(0,0,0),(255,255,255), lambda game: None)
button.draw(self,0.85*self.width,100,200,30,str(other.turns.current_player_name),25,(0,0,0),(255,255,255), lambda game: None)
#shows the name each player has entered for themselves
button.draw(self,0.85*self.width,525,200,25,'PLAYER 1 = '+str(other.turns.player1_name),20,(255,255,255),(0,0,0), lambda game: None)
button.draw(self,0.85*self.width,550,200,25,'PLAYER 2 = '+str(other.turns.player2_name),20,(255,255,255),(0,0,0), lambda game: None)
button.draw(self,0.85*self.width,575,200,25,'PLAYER 3 = '+str(other.turns.player3_name),20,(255,255,255),(0,0,0), lambda game: None)
button.draw(self,0.85*self.width,600,200,25,'PLAYER 4 = '+str(other.turns.player4_name),20,(255,255,255),(0,0,0), lambda game: None)
# Dice animation
self.anima.Draw(self.screen)
#checks whether a player has reached the finish, and if so, draws the termination screen and displays the name of the winner
if self.player_1.cnt >= 15 or self.player_2.cnt >= 15 or self.player_3.cnt >= 15 or self.player_4.cnt >= 15:
self.screen.fill((255,255,255))
button.update(self)
if self.player_1.cnt >= 15:
button.draw(self,0.3*self.width,0.25*self.height,500,100,'The winner is: '+str(other.turns.player1_name),50,(255,255,255),(0,0,0), lambda game: None)
if Game.won == 0:
database.increment_wins(other.turns.player1_name)
database.increment_loses(other.turns.player2_name)
database.increment_loses(other.turns.player3_name)
database.increment_loses(other.turns.player4_name)
Game.won = 1
elif self.player_2.cnt >= 15:
button.draw(self,0.2*self.width,0.25*self.height,500,100,'The winner is: '+str(other.turns.player2_name),50,(255,255,255),(0,0,0), lambda game: None)
if Game.won == 0:
database.increment_wins(other.turns.player2_name)
database.increment_loses(other.turns.player1_name)
database.increment_loses(other.turns.player3_name)
database.increment_loses(other.turns.player4_name)
Game.won = 1
elif self.player_3.cnt >= 15:
button.draw(self,0.2*self.width,0.25*self.height,500,100,'The winner is: '+str(other.turns.player3_name),50,(255,255,255),(0,0,0), lambda game: None)
if Game.won == 0:
database.increment_wins(other.turns.player3_name)
database.increment_loses(other.turns.player2_name)
database.increment_loses(other.turns.player1_name)
database.increment_loses(other.turns.player4_name)
Game.won = 1
elif self.player_4.cnt >= 15:
button.draw(self,0.2*self.width,0.25*self.height,500,100,'The winner is: '+str(other.turns.player4_name),50,(255,255,255),(0,0,0), lambda game: None)
if Game.won == 0:
database.increment_wins(other.turns.player4_name)
database.increment_loses(other.turns.player2_name)
database.increment_loses(other.turns.player3_name)
database.increment_loses(other.turns.player1_name)
Game.won = 1
#These are the buttons on the termination screen
import MainMenuNew
button.draw(self,0.1*self.width,0.75*self.height,500,100,'RETURN TO MAIN MENU',50,(0,0,0),(255,255,255), lambda game: MainMenuNew.reloop())
button.draw(self,0.5*self.width,0.75*self.height,500,100,'QUIT GAME',50,(0,0,0),(255,255,255), lambda game: sys.exit())
#checks whether the settings menu has been requested by the user, and if so, opens it.
if self.smenu_active:
self.screen.fill((255,255,255))
import MainMenuNew
button.update(self)
button.draw(self,0.1*self.width,0.75*self.height,500,100,'Start background music',50,(0,0,0),(255,255,255), lambda game: other.music())
button.draw(self,0.5*self.width,0.75*self.height,500,100,'Stop background music',50,(0,0,0),(255,255,255), lambda game: other.stop_music())
button.draw(self,0.3*self.width,0.25*self.height,500,100,'back',50,(0,0,0),(255,255,255), lambda game: self.smenu())
button.draw(self,0.3*self.width,0.5*self.height,500,100,"return to main menu",50,(0,0,0),(255,255,255), lambda game: MainMenuNew.reloop())
# Flipping the screen
pygame.display.flip()
def smenu(self):
if self.smenu_active:
self.smenu_active = False
elif not self.smenu_active:
self.smenu_active = True
# Actual loop
def program_loop(self):
while not process_ev.process_events():
self.update()
self.draw()
# Handeling pygame events
#def process_events():
# for event in pygame.event.get():
# if event.type == pygame.QUIT:
# return True
# #elif event.type == pygame.KEYDOWN:
# # game.Player1.key_event(event)
# return False
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import optparse
import os
import stat
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 1000
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/valgrind/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
self._command_preamble = ["--source_dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build_dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build_dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build_dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
tool_name = tool.ToolName();
suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
cmd.append(os.path.join(self._options.build_dir, exe))
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed his own filter mentioning only one test, just use it.
Othewise, filter out tests listed in the appropriate gtest_exclude files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
@staticmethod
def ShowTests():
test_to_names = {}
for name, test_function in ChromeTests._test_list.iteritems():
test_to_names.setdefault(test_function, []).append(name)
name_to_aliases = {}
for names in test_to_names.itervalues():
names.sort(key=lambda name: len(name))
name_to_aliases[names[0]] = names[1:]
print
print "Available tests:"
print "----------------"
for name, aliases in sorted(name_to_aliases.iteritems()):
if aliases:
print " {} (aka {})".format(name, ', '.join(aliases))
else:
print " {}".format(name)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestAsh(self):
return self.SimpleTest("ash", "ash_unittests")
def TestAura(self):
return self.SimpleTest("aura", "aura_unittests")
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestChromeOS(self):
return self.SimpleTest("chromeos", "chromeos_unittests")
def TestComponents(self):
return self.SimpleTest("components", "components_unittests")
def TestCompositor(self):
return self.SimpleTest("compositor", "compositor_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestContentBrowser(self):
return self.SimpleTest("content", "content_browsertests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestDevice(self):
return self.SimpleTest("device", "device_unittests")
def TestFFmpeg(self):
return self.SimpleTest("chrome", "ffmpeg_unittests")
def TestFFmpegRegressions(self):
return self.SimpleTest("chrome", "ffmpeg_regression_tests")
def TestGPU(self):
return self.SimpleTest("gpu", "gpu_unittests")
def TestGURL(self):
return self.SimpleTest("chrome", "googleurl_unittests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests",
valgrind_test_args=["--trace_children"])
def TestJingle(self):
return self.SimpleTest("chrome", "jingle_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestPPAPI(self):
return self.SimpleTest("chrome", "ppapi_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests",
cmd_args=[
"--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000"])
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestSync(self):
return self.SimpleTest("chrome", "sync_unit_tests")
def TestLinuxSandbox(self):
return self.SimpleTest("sandbox", "sandbox_linux_unittests")
def TestTestShell(self):
return self.SimpleTest("webkit", "test_shell_tests")
def TestUnit(self):
# http://crbug.com/51716
# Disabling all unit tests
# Problems reappeared after r119922
if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
logging.warning("unit_tests are disabled for memcheck on MacOS.")
return 0;
return self.SimpleTest("chrome", "unit_tests")
def TestUIUnit(self):
return self.SimpleTest("chrome", "ui_unittests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
# Valgrind timeouts are in seconds.
UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
# UI test timeouts are in milliseconds.
UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000",
"--no-sandbox"]
# TODO(thestig) fine-tune these values.
# Valgrind timeouts are in seconds.
BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
# Browser test timeouts are in milliseconds.
BROWSER_TEST_ARGS = ["--ui-test-action-timeout=200000",
"--ui-test-action-max-timeout=400000",
"--no-sandbox"]
def TestAutomatedUI(self):
return self.SimpleTest("chrome", "automated_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestInteractiveUI(self):
return self.SimpleTest("chrome", "interactive_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestReliability(self):
script_dir = path_utils.ScriptDir()
url_list_file = os.path.join(script_dir, "reliability", "url_list.txt")
return self.SimpleTest("chrome", "reliability_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(self.UI_TEST_ARGS +
["--list=%s" % url_list_file]))
def TestSafeBrowsing(self):
return self.SimpleTest("chrome", "safe_browsing_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestSyncIntegration(self):
return self.SimpleTest("chrome", "sync_integration_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestLayoutChunk(self, chunk_num, chunk_size):
# Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
# list of tests. Wrap around to beginning of list at end.
# If chunk_size is zero, run all tests in the list once.
# If a text file is given as argument, it is used as the list of tests.
#
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_webkit_layout")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
test_shell = os.path.join(self._options.build_dir, "test_shell")
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
script_cmd = ["python", script, "-v",
"--run-singly", # run a separate DumpRenderTree for each test
"--fully-parallel",
"--time-out-ms=200000",
"--no-retry-failures", # retrying takes too much time
# http://crbug.com/176908: Don't launch a browser when done.
"--no-show-results",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build_dir / --debug)
if self._options.build_dir.endswith("Debug"):
script_cmd.append("--debug");
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._AppendGtestFilter(tool, "layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
# Layout tests often times fail quickly, but the buildbot remains green.
# Detect this situation when running with the default chunk size.
if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
min_runtime_in_seconds=120
else:
min_runtime_in_seconds=0
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
return ret
def TestLayout(self):
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under valgrind rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if (chunk_size == 0):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("valgrind_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
str = f.read()
if len(str):
chunk_num = int(str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
# Save the new chunk size before running the tests. Otherwise if a
# particular chunk hangs the bot, the chunk number will never get
# incremented and the bot will be wedged.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return self.TestLayoutChunk(chunk_num, chunk_size)
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
_test_list = {
"cmdline" : RunCmdLine,
"ash": TestAsh, "ash_unittests": TestAsh,
"aura": TestAura, "aura_unittests": TestAura,
"automated_ui" : TestAutomatedUI,
"base": TestBase, "base_unittests": TestBase,
"browser": TestBrowser, "browser_tests": TestBrowser,
"chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
"components": TestComponents,"components_unittests": TestComponents,
"compositor": TestCompositor,"compositor_unittests": TestCompositor,
"content": TestContent, "content_unittests": TestContent,
"content_browsertests": TestContentBrowser,
"courgette": TestCourgette, "courgette_unittests": TestCourgette,
"crypto": TestCrypto, "crypto_unittests": TestCrypto,
"device": TestDevice, "device_unittests": TestDevice,
"ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg,
"ffmpeg_regression_tests": TestFFmpegRegressions,
"googleurl": TestGURL, "googleurl_unittests": TestGURL,
"gpu": TestGPU, "gpu_unittests": TestGPU,
"ipc": TestIpc, "ipc_tests": TestIpc,
"interactive_ui": TestInteractiveUI,
"layout": TestLayout, "layout_tests": TestLayout,
"webkit": TestLayout,
"media": TestMedia, "media_unittests": TestMedia,
"net": TestNet, "net_unittests": TestNet,
"jingle": TestJingle, "jingle_unittests": TestJingle,
"ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
"printing": TestPrinting, "printing_unittests": TestPrinting,
"reliability": TestReliability, "reliability_tests": TestReliability,
"remoting": TestRemoting, "remoting_unittests": TestRemoting,
"safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
"sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
"sql": TestSql, "sql_unittests": TestSql,
"sync": TestSync, "sync_unit_tests": TestSync,
"sync_integration_tests": TestSyncIntegration,
"sync_integration": TestSyncIntegration,
"test_shell": TestTestShell, "test_shell_tests": TestTestShell,
"ui_unit": TestUIUnit, "ui_unittests": TestUIUnit,
"unit": TestUnit, "unit_tests": TestUnit,
"views": TestViews, "views_unittests": TestViews,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.disable_interspersed_args()
parser.add_option("", "--help-tests", dest="help_tests", action="store_true",
default=False, help="List all available tests")
parser.add_option("-b", "--build_dir",
help="the location of the compiler output")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("", "--baseline", action="store_true", default=False,
help="generate baseline data instead of validating")
parser.add_option("", "--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("", "--gtest_repeat",
help="argument for --gtest_repeat")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("", "--tool", dest="valgrind_tool", default="memcheck",
help="specify a valgrind tool to run the tests under")
parser.add_option("", "--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("", "--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("-n", "--num_tests", type="int",
default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
help="for layout tests: # of subtests per run. 0 for all.")
# TODO(thestig) Remove this if we can.
parser.add_option("", "--gtest_color", dest="gtest_color", default="no",
help="dummy compatibility flag for sharding_supervisor.")
options, args = parser.parse_args()
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if options.help_tests:
ChromeTests.ShowTests()
return 0
if not options.test:
parser.error("--test not specified")
if len(options.test) != 1 and options.gtest_filter:
parser.error("--gtest_filter and multiple tests don't make sense together")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
sys.exit(_main())
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import categorical
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: `Boolean`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
is_continuous = components[0].is_continuous
if not all(d.is_continuous == is_continuous for d in components):
raise TypeError(
"All components must either be continuous or not, but continuity "
"values are: %s" % [(d.name, d.is_continuous) for d in components])
static_event_shape = components[0].get_event_shape()
static_batch_shape = cat.get_batch_shape()
for d in components:
static_event_shape = static_event_shape.merge_with(d.get_event_shape())
static_batch_shape = static_batch_shape.merge_with(d.get_batch_shape())
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]):
num_components = cat.num_classes
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
super(Mixture, self).__init__(
dtype=dtype,
parameters={"cat": self._cat, "components": self._components,
"num_components": self._num_components},
is_reparameterized=False,
is_continuous=is_continuous,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape(self):
return self._cat.batch_shape()
def _get_batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._components[0].event_shape()
def _get_event_shape(self):
return self._static_event_shape
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
# This was checked to not be None at construction time.
static_event_rank = self.get_event_shape().ndims
# Expand the rank of x up to static_event_rank times so that
# broadcasting works correctly.
def expand(x):
expanded_x = x
for _ in range(static_event_rank):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
cat_probs = [expand(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.pack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample_n(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.get_batch_shape()
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape()
batch_size = array_ops.reduce_prod(batch_shape)
static_event_shape = self.get_event_shape()
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
samples_class_c = self.components[c].sample_n(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat(0, ([n_class * batch_size], event_shape)))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat(0, (samples_shape,
self.event_shape())))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.get_event_shape()))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unpack(
cat_probs, num=self.num_components, axis=-1)
return cat_probs
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from collections import defaultdict
from heapq import nlargest
from luigi import six
import luigi
import luigi.hadoop
import luigi.hdfs
import luigi.postgres
class ExternalStreams(luigi.ExternalTask):
"""
Example of a possible external data dump
To depend on external targets (typically at the top of your dependency graph), you can define
an ExternalTask like this.
"""
date = luigi.DateParameter()
def output(self):
"""
Returns the target output for this task.
In this case, it expects a file to be present in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.hdfs.HdfsTarget(self.date.strftime('data/streams_%Y-%m-%d.tsv'))
class Streams(luigi.Task):
"""
Faked version right now, just generates bogus data.
"""
date = luigi.DateParameter()
def run(self):
"""
Generates bogus data and writes it into the :py:meth:`~.Streams.output` target.
"""
with self.output().open('w') as output:
for _ in range(1000):
output.write('{} {} {}\n'.format(
random.randint(0, 999),
random.randint(0, 999),
random.randint(0, 999)))
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in the local file system.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget(self.date.strftime('data/streams_%Y_%m_%d_faked.tsv'))
class StreamsHdfs(Streams):
"""
This task performs the same work as :py:class:`~.Streams` but its output is written to HDFS.
This class uses :py:meth:`~.Streams.run` and
overrides :py:meth:`~.Streams.output` so redefine HDFS as its target.
"""
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.hdfs.HdfsTarget(self.date.strftime('data/streams_%Y_%m_%d_faked.tsv'))
class AggregateArtists(luigi.Task):
"""
This task runs over the target data returned by :py:meth:`~/.Streams.output` and
writes the result into its :py:meth:`~.AggregateArtists.output` target (local file).
"""
date_interval = luigi.DateIntervalParameter()
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file on the local filesystem.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget("data/artist_streams_{}.tsv".format(self.date_interval))
def requires(self):
"""
This task's dependencies:
* :py:class:`~.Streams`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return [Streams(date) for date in self.date_interval]
def run(self):
artist_count = defaultdict(int)
for t in self.input():
with t.open('r') as in_file:
for line in in_file:
_, artist, track = line.strip().split()
artist_count[artist] += 1
with self.output().open('w') as out_file:
for artist, count in six.iteritems(artist_count):
out_file.write('{}\t{}\n'.format(artist, count))
class AggregateArtistsHadoop(luigi.hadoop.JobTask):
"""
This task runs a :py:class:`luigi.hadoop.JobTask` task
over each target data returned by :py:meth:`~/.StreamsHdfs.output` and
writes the result into its :py:meth:`~.AggregateArtistsHadoop.output` target (a file in HDFS).
This class uses :py:meth:`luigi.contrib.spark.SparkJob.run`.
"""
date_interval = luigi.DateIntervalParameter()
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.hdfs.HdfsTarget(
"data/artist_streams_%s.tsv" % self.date_interval,
format=luigi.hdfs.PlainDir
)
def requires(self):
"""
This task's dependencies:
* :py:class:`~.StreamsHdfs`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return [StreamsHdfs(date) for date in self.date_interval]
def mapper(self, line):
"""
The implementation of the map phase of the Hadoop job.
:param line: the input.
:return: tuple ((key, value) or, in this case, (artist, 1 stream count))
"""
_, artist, _ = line.strip().split()
yield artist, 1
def reducer(self, key, values):
"""
The implementation of the reducer phase of the Hadoop job.
:param key: the artist.
:param values: the stream count.
:return: tuple (artist, count of streams)
"""
yield key, sum(values)
class Top10Artists(luigi.Task):
"""
This task runs over the target data returned by :py:meth:`~/.AggregateArtists.output` or
:py:meth:`~/.AggregateArtistsHadoop.output` in case :py:attr:`~/.Top10Artists.use_hadoop` is set and
writes the result into its :py:meth:`~.Top10Artists.output` target (a file in local filesystem).
"""
date_interval = luigi.DateIntervalParameter()
use_hadoop = luigi.BoolParameter()
def requires(self):
"""
This task's dependencies:
* :py:class:`~.AggregateArtists` or
* :py:class:`~.AggregateArtistsHadoop` if :py:attr:`~/.Top10Artists.use_hadoop` is set.
:return: object (:py:class:`luigi.task.Task`)
"""
if self.use_hadoop:
return AggregateArtistsHadoop(self.date_interval)
else:
return AggregateArtists(self.date_interval)
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file on the local filesystem.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget("data/top_artists_%s.tsv" % self.date_interval)
def run(self):
top_10 = nlargest(10, self._input_iterator())
with self.output().open('w') as out_file:
for streams, artist in top_10:
out_line = '\t'.join([
str(self.date_interval.date_a),
str(self.date_interval.date_b),
artist,
str(streams)
])
out_file.write((out_line + '\n'))
def _input_iterator(self):
with self.input().open('r') as in_file:
for line in in_file:
artist, streams = line.strip().split()
yield int(streams), artist
class ArtistToplistToDatabase(luigi.postgres.CopyToTable):
"""
This task runs a :py:class:`luigi.postgres.CopyToTable` task
over the target data returned by :py:meth:`~/.Top10Artists.output` and
writes the result into its :py:meth:`~.ArtistToplistToDatabase.output` target which,
by default, is :py:class:`luigi.postgres.PostgresTarget` (a table in PostgreSQL).
This class uses :py:meth:`luigi.postgres.CopyToTable.run` and :py:meth:`luigi.postgres.CopyToTable.output`.
"""
date_interval = luigi.DateIntervalParameter()
use_hadoop = luigi.BoolParameter()
host = "localhost"
database = "toplists"
user = "luigi"
password = "abc123" # ;)
table = "top10"
columns = [("date_from", "DATE"),
("date_to", "DATE"),
("artist", "TEXT"),
("streams", "INT")]
def requires(self):
"""
This task's dependencies:
* :py:class:`~.Top10Artists`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return Top10Artists(self.date_interval, self.use_hadoop)
if __name__ == "__main__":
luigi.run()
| |
# encoding: utf-8
# pylint: disable=invalid-name,missing-docstring
from mock import Mock
import pytest
from werkzeug.exceptions import HTTPException
from app.modules.users import permissions
def test_DenyAbortMixin():
with pytest.raises(HTTPException):
permissions.rules.DenyAbortMixin().deny()
def test_WriteAccessRule_authenticated_user(authenticated_user_instance):
authenticated_user_instance.is_regular_user = True
assert permissions.rules.WriteAccessRule().check() is True
authenticated_user_instance.is_regular_user = False
assert permissions.rules.WriteAccessRule().check() is False
def test_ActiveUserRoleRule_anonymous(anonymous_user_instance):
# pylint: disable=unused-argument
assert permissions.rules.ActiveUserRoleRule().check() is False
def test_ActiveUserRoleRule_authenticated_user(authenticated_user_instance):
authenticated_user_instance.is_active = True
assert permissions.rules.ActiveUserRoleRule().check() is True
authenticated_user_instance.is_active = False
assert permissions.rules.ActiveUserRoleRule().check() is False
def test_PasswordRequiredRule(authenticated_user_instance):
authenticated_user_instance.password = "correct_password"
assert permissions.rules.PasswordRequiredRule(password="correct_password").check() is True
assert permissions.rules.PasswordRequiredRule(password="wrong_password").check() is False
def test_AdminRoleRule_authenticated_user(authenticated_user_instance):
authenticated_user_instance.is_admin = True
assert permissions.rules.AdminRoleRule().check() is True
authenticated_user_instance.is_admin = False
assert permissions.rules.AdminRoleRule().check() is False
def test_SupervisorRoleRule_authenticated_user(authenticated_user_instance):
obj = Mock()
del obj.check_supervisor
assert permissions.rules.SupervisorRoleRule(obj).check() is False
obj.check_supervisor = lambda user: user == authenticated_user_instance
assert permissions.rules.SupervisorRoleRule(obj).check() is True
obj.check_supervisor = lambda user: False
assert permissions.rules.SupervisorRoleRule(obj).check() is False
def test_OwnerRoleRule_authenticated_user(authenticated_user_instance):
obj = Mock()
del obj.check_owner
assert permissions.rules.OwnerRoleRule(obj).check() is False
obj.check_owner = lambda user: user == authenticated_user_instance
assert permissions.rules.OwnerRoleRule(obj).check() is True
obj.check_owner = lambda user: False
assert permissions.rules.OwnerRoleRule(obj).check() is False
def test_PartialPermissionDeniedRule():
with pytest.raises(RuntimeError):
permissions.rules.PartialPermissionDeniedRule().check()
def test_PasswordRequiredPermissionMixin():
mixin = permissions.PasswordRequiredPermissionMixin(
password_required=False
)
with pytest.raises(AttributeError):
mixin.rule()
def test_WriteAccessPermission_authenticated_user(authenticated_user_instance):
authenticated_user_instance.is_regular_user = True
with permissions.WriteAccessPermission():
pass
authenticated_user_instance.is_regular_user = False
with pytest.raises(HTTPException):
with permissions.WriteAccessPermission():
pass
def test_RolePermission():
with permissions.RolePermission():
pass
with pytest.raises(RuntimeError):
with permissions.RolePermission(partial=True):
pass
def test_ActiveUserRolePermission_anonymous_user(anonymous_user_instance):
# pylint: disable=unused-argument
with pytest.raises(HTTPException):
with permissions.ActiveUserRolePermission():
pass
def test_ActiveUserRolePermission_authenticated_user(authenticated_user_instance):
authenticated_user_instance.is_active = True
with permissions.ActiveUserRolePermission():
pass
authenticated_user_instance.is_active = False
with pytest.raises(HTTPException):
with permissions.ActiveUserRolePermission():
pass
def test_AdminRolePermission_anonymous_user(anonymous_user_instance):
# pylint: disable=unused-argument
with pytest.raises(HTTPException):
with permissions.AdminRolePermission():
pass
def test_AdminRolePermission_authenticated_user(authenticated_user_instance):
authenticated_user_instance.is_admin = True
with permissions.AdminRolePermission():
pass
authenticated_user_instance.is_admin = False
with pytest.raises(HTTPException):
with permissions.AdminRolePermission():
pass
def test_AdminRolePermission_anonymous_user_with_password(anonymous_user_instance):
# pylint: disable=unused-argument
with pytest.raises(HTTPException):
with permissions.AdminRolePermission(password_required=True, password="any_password"):
pass
def test_AdminRolePermission_authenticated_user_with_password_is_admin(
authenticated_user_instance
):
authenticated_user_instance.password = "correct_password"
authenticated_user_instance.is_admin = True
with permissions.AdminRolePermission(password_required=True, password="correct_password"):
pass
with pytest.raises(HTTPException):
with permissions.AdminRolePermission(password_required=True, password="wrong_password"):
pass
def test_AdminRolePermission_authenticated_user_with_password_not_admin(
authenticated_user_instance
):
authenticated_user_instance.password = "correct_password"
authenticated_user_instance.is_admin = False
with pytest.raises(HTTPException):
with permissions.AdminRolePermission(password_required=True, password="correct_password"):
pass
with pytest.raises(HTTPException):
with permissions.AdminRolePermission(password_required=True, password="wrong_password"):
pass
def test_SupervisorRolePermission_anonymous_user(anonymous_user_instance):
# pylint: disable=unused-argument
with pytest.raises(HTTPException):
with permissions.SupervisorRolePermission():
pass
def test_SupervisorRolePermission_authenticated_user(authenticated_user_instance):
obj = Mock()
obj.check_supervisor = lambda user: user == authenticated_user_instance
with permissions.SupervisorRolePermission(obj=obj):
pass
del obj.check_supervisor
with pytest.raises(HTTPException):
with permissions.SupervisorRolePermission():
pass
def test_SupervisorRolePermission_anonymous_user_with_password(anonymous_user_instance):
# pylint: disable=unused-argument
obj = Mock()
obj.check_supervisor = lambda user: False
with pytest.raises(HTTPException):
with permissions.SupervisorRolePermission(
obj=obj,
password_required=True,
password="any_password"
):
pass
def test_SupervisorRolePermission_authenticated_user_with_password_with_check_supervisor(
authenticated_user_instance
):
authenticated_user_instance.password = "correct_password"
obj = Mock()
obj.check_supervisor = lambda user: user == authenticated_user_instance
with permissions.SupervisorRolePermission(
obj=obj,
password_required=True,
password="correct_password"
):
pass
with pytest.raises(HTTPException):
with permissions.SupervisorRolePermission(
obj=obj,
password_required=True,
password="wrong_password"
):
pass
def test_SupervisorRolePermission_authenticated_user_with_password_without_check_supervisor(
authenticated_user_instance
):
authenticated_user_instance.password = "correct_password"
obj = Mock()
del obj.check_supervisor
with pytest.raises(HTTPException):
with permissions.SupervisorRolePermission(
obj=obj,
password_required=True,
password="correct_password"
):
pass
with pytest.raises(HTTPException):
with permissions.SupervisorRolePermission(
obj=obj,
password_required=True,
password="wrong_password"
):
pass
def test_OwnerRolePermission_anonymous_user(anonymous_user_instance):
# pylint: disable=unused-argument
with pytest.raises(HTTPException):
with permissions.OwnerRolePermission():
pass
def test_OwnerRolePermission_authenticated_user(authenticated_user_instance):
obj = Mock()
obj.check_owner = lambda user: user == authenticated_user_instance
with permissions.OwnerRolePermission(obj=obj):
pass
del obj.check_Owner
with pytest.raises(HTTPException):
with permissions.OwnerRolePermission():
pass
def test_OwnerRolePermission_anonymous_user_with_password(anonymous_user_instance):
# pylint: disable=unused-argument
obj = Mock()
obj.check_owner = lambda user: False
with pytest.raises(HTTPException):
with permissions.OwnerRolePermission(
obj=obj,
password_required=True,
password="any_password"
):
pass
def test_OwnerRolePermission_authenticated_user_with_password_with_check_owner(
authenticated_user_instance
):
authenticated_user_instance.password = "correct_password"
obj = Mock()
obj.check_owner = lambda user: user == authenticated_user_instance
with permissions.OwnerRolePermission(
obj=obj,
password_required=True,
password="correct_password"
):
pass
with pytest.raises(HTTPException):
with permissions.OwnerRolePermission(
obj=obj,
password_required=True,
password="wrong_password"
):
pass
def test_OwnerRolePermission_authenticated_user_with_password_without_check_owner(
authenticated_user_instance
):
authenticated_user_instance.password = "correct_password"
obj = Mock()
del obj.check_owner
with pytest.raises(HTTPException):
with permissions.OwnerRolePermission(
obj=obj,
password_required=True,
password="correct_password"
):
pass
with pytest.raises(HTTPException):
with permissions.OwnerRolePermission(
obj=obj,
password_required=True,
password="wrong_password"
):
pass
| |
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext, Context
from django.template import loader, TemplateDoesNotExist
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Group
from lingcod.features.models import Feature
from lingcod.features import user_sharing_groups
from lingcod.common.utils import get_logger
from lingcod.common import default_mimetypes as mimetypes
from lingcod.features import workspace_json, get_feature_by_uid
from django.template.defaultfilters import slugify
from lingcod.features.models import SpatialFeature, Feature, FeatureCollection
from django.core.urlresolvers import reverse
from django.views.decorators.cache import cache_page
from django.utils import simplejson
logger = get_logger()
def get_object_for_editing(request, uid, target_klass=None):
"""
Return the specified instance by uid for editing.
If a target_klass is provided, uid will be checked for consistency.
If the request has no logged-in user, a 401 Response will be returned. If
the item is not found, a 404 Response will be returned. If the user is
not authorized to edit the item (not the owner or a staff user), a 403 Not
Authorized Response will be returned.
usage:
instance = get_object_for_editing(request, 'mlpa_mpa_12', target_klass=Mpa)
if isinstance(instance, HttpResponse):
return instance
"""
if target_klass and not target_klass.model_uid() in uid:
return HttpResponse("Target class %s doesn't match the provided uid %s" %
(target_klass, uid),
status=401)
try:
instance = get_feature_by_uid(uid)
except ValueError:
return HttpResponse("Uid not valid: %s" % uid, status=401)
except:
return HttpResponse("Feature not found - %s" % uid, status=404)
if not request.user.is_authenticated():
return HttpResponse('You must be logged in.', status=401)
# Check that user owns the object or is staff
if not request.user.is_staff and request.user != instance.user:
return HttpResponseForbidden(
'You do not have permission to modify this object.')
return instance
def get_object_for_viewing(request, uid, target_klass=None):
"""
Return the specified instance by uid for viewing.
If a target_klass is provided, uid will be checked for consistency.
If the request has no authenticated user, a 401 Response will be returned.
If the item is not found, a 404 Response will be returned. If the user is
not authorized to view the item (not the owner or part of a group the item
is shared with), a 403 Not Authorized Response will be returned.
usage:
instance = get_object_for_viewing(request, 'mlpa_mpa_12', target_klass=Mpa)
if isinstance(instance, HttpResponse):
return instance
"""
if target_klass and not target_klass.model_uid() in uid:
return HttpResponse("Target class %s doesn't match the provided uid %s" %
(target_klass, uid),
status=401)
try:
instance = get_feature_by_uid(uid)
except ValueError:
return HttpResponse("Uid not valid: %s" % uid, status=401)
except:
return HttpResponse("Feature not found - %s" % uid, status=404)
viewable, response = instance.is_viewable(request.user)
if viewable:
return instance
else:
return response
# RESTful Generic Views
def handle_link(request, uids, link=None):
"""
Handles all requests to views setup via features.register using Link
objects.
Assuming a valid request, this generic view will call the view specified
by the link including an instance or instances argument containing the
relavent Feature(s).
If the incoming request is invalid, any one of the following errors may be
returned:
401: login required
403: user does not have permission (not admin user or doesn't own object
to be edited)
404: feature(s) could not be found
400: requested for feature classes not supported by this view
5xx: server error
"""
if link is None:
raise Exception('handle_link configured without link kwarg!')
uids = uids.split(',')
# check that the number of instances matches the link.select property
if len(uids) > 1 and link.select is 'single':
# too many
return HttpResponse(
'Not Supported Error: Requested %s for multiple instances' % (
link.title, ), status=400)
singles = ('single', 'multiple single', 'single multiple')
if len(uids) is 1 and link.select not in singles:
# not enough
return HttpResponse(
'Not Supported Error: Requested %s for single instance' % (
link.title, ), status=400)
instances = []
for uid in uids:
if link.rel == 'edit':
if link.method.lower() == 'post' and request.method == 'GET':
resp = HttpResponse('Invalid Method', status=405)
resp['Allow'] = 'POST'
return resp
if link.edits_original is False:
# users who can view the object can then make copies
inst = get_object_for_viewing(request, uid)
else:
inst = get_object_for_editing(request, uid)
else:
inst = get_object_for_viewing(request, uid)
if isinstance(inst, HttpResponse):
return inst
else:
instances.append(inst)
for instance in instances:
if link.generic and instance.__class__ not in link.models:
return HttpResponse(
'Not Supported Error: Requested for "%s" feature class. This \
generic link only supports requests for feature classes %s' % (
instance.__class__.__name__,
', '.join([m.__name__ for m in link.models])), status=400)
if link.select is 'single':
return link.view(request, instances[0], **link.extra_kwargs)
else:
return link.view(request, instances, **link.extra_kwargs)
def delete(request, model=None, uid=None):
"""
When calling, provide the request object, reference to the resource
class, and the primary key of the object to delete.
Possible response codes:
200: delete operation successful
401: login required
403: user does not have permission (not admin user or doesn't own object)
404: resource for deletion could not be found
5xx: server error
"""
if model is None:
return HttpResponse('Model not specified in feature urls', status=500)
if request.method == 'DELETE':
if model is None or uid is None:
raise Exception('delete view not configured properly.')
instance = get_object_for_editing(request, uid, target_klass=model)
if isinstance(instance, HttpResponse):
# get_object_for_editing is trying to return a 404, 401, or 403
return instance
instance.delete()
return HttpResponse('{"status": 200}')
else:
return HttpResponse('DELETE http method must be used to delete',
status=405)
def multi_delete(request, instances):
"""
Generic view to delete multiple instances
"""
deleted = []
if request.method == 'DELETE':
for instance in instances:
uid = instance.uid
instance.delete()
deleted.append(uid)
return HttpResponse('{"status": 200}')
else:
return HttpResponse('DELETE http method must be used to delete',
status=405)
def create(request, model, action):
"""
When calling, provide the request object and a ModelForm class
POST: Create a new instance from filled out ModelForm
201: Created. Response body includes representation of resource
400: Validation error. Response body includes form. Form should
be displayed back to the user for correction.
401: Not logged in.
5xx: Server error.
"""
config = model.get_options()
form_class = config.get_form_class()
if not request.user.is_authenticated():
return HttpResponse('You must be logged in.', status=401)
title = 'New %s' % (config.slug, )
if request.method == 'POST':
values = request.POST.copy()
values.__setitem__('user', request.user.pk)
if request.FILES:
form = form_class(values, request.FILES, label_suffix='')
else:
form = form_class(values, label_suffix='')
if form.is_valid():
m = form.save(commit=False)
'''
Note on the following 3 lines:
We need to call form.save_m2m after save but before run, this is accomplished in the Feature model save method
'''
kwargs = {}
kwargs['form']=form
m.save(**kwargs)
return to_response(
status=201,
location=m.get_absolute_url(),
select=m.uid,
show=m.uid
)
else:
context = config.form_context
user = request.user
context.update({
'form': form,
'title': title,
'action': action,
'is_ajax': request.is_ajax(),
'MEDIA_URL': settings.MEDIA_URL,
'is_spatial': issubclass(model, SpatialFeature),
'is_collection': issubclass(model, FeatureCollection),
'user': user,
})
context = decorate_with_manipulators(context, form_class)
c = RequestContext(request, context)
t = loader.get_template(config.form_template)
return HttpResponse(t.render(c), status=400)
else:
return HttpResponse('Invalid http method', status=405)
def create_form(request, model, action=None):
"""
Serves a form for creating new objects
GET only
"""
config = model.get_options()
form_class = config.get_form_class()
if action is None:
raise Exception('create_form view is not configured properly.')
if not request.user.is_authenticated():
return HttpResponse('You must be logged in.', status=401)
title = 'New %s' % (config.verbose_name)
user = request.user
context = config.form_context
if request.method == 'GET':
context.update({
'form': form_class(label_suffix=''),
'title': title,
'action': action,
'is_ajax': request.is_ajax(),
'MEDIA_URL': settings.MEDIA_URL,
'is_spatial': issubclass(model, SpatialFeature),
'is_collection': issubclass(model, FeatureCollection),
'user': user,
})
context = decorate_with_manipulators(context, form_class)
return render_to_response(config.form_template, context)
else:
return HttpResponse('Invalid http method', status=405)
def update_form(request, model, uid):
"""
Returns a form for editing features
"""
instance = get_object_for_editing(request, uid, target_klass=model)
if isinstance(instance, HttpResponse):
# get_object_for_editing is trying to return a 404, 401, or 403
return instance
try:
instance.get_absolute_url()
except:
raise Exception(
'Model to be edited must have get_absolute_url defined.')
try:
instance.name
except:
raise Exception('Model to be edited must have a name attribute.')
user = request.user
config = model.get_options()
if request.method == 'GET':
form_class = config.get_form_class()
form = form_class(instance=instance, label_suffix='')
context = config.form_context
context.update({
'form': form,
'title': "Edit '%s'" % (instance.name, ),
'action': instance.get_absolute_url(),
'is_ajax': request.is_ajax(),
'MEDIA_URL': settings.MEDIA_URL,
'is_spatial': issubclass(model, SpatialFeature),
'is_collection': issubclass(model, FeatureCollection),
'user': user,
})
context = decorate_with_manipulators(context, form_class)
return render_to_response(config.form_template, context)
else:
return HttpResponse('Invalid http method', status=405)
def update(request, model, uid):
"""
When calling, provide the request object, a model class, and the
primary key of the instance to be updated.
POST: Update instance.
possible response codes:
200: OK. Object updated and in response body.
400: Form validation error. Present form back to user.
401: Not logged in.
403: Forbidden. User is not staff or does not own object.
404: Instance for uid not found.
5xx: Server error.
"""
config = model.get_options()
instance = get_object_for_editing(request, uid, target_klass=model)
if isinstance(instance, HttpResponse):
# get_object_for_editing is trying to return a 404, 401, or 403
return instance
try:
instance.get_absolute_url()
except:
raise Exception('Model must have get_absolute_url defined.')
try:
instance.name
except:
raise Exception('Model to be edited must have a name attribute.')
if request.method == 'POST':
values = request.POST.copy()
# Even if request.user is different (ie request.user is staff)
# user is still set to the original owner to prevent staff from
# 'stealing'
values.__setitem__('user', instance.user.pk)
form_class = config.get_form_class()
if request.FILES:
form = form_class(
values, request.FILES, instance=instance, label_suffix='')
else:
form = form_class(values, instance=instance, label_suffix='')
if form.is_valid():
m = form.save(commit=False)
kwargs = {}
kwargs['form']=form
m.save(**kwargs)
return to_response(
status=200,
select=m.uid,
show=m.uid,
parent=m.collection,
)
else:
context = config.form_context
context.update({
'form': form,
'title': "Edit '%s'" % (instance.name, ),
'action': instance.get_absolute_url(),
'is_ajax': request.is_ajax(),
'MEDIA_URL': settings.MEDIA_URL,
'is_spatial': issubclass(model, SpatialFeature),
'is_collection': issubclass(model, FeatureCollection),
})
context = decorate_with_manipulators(context, form_class)
c = RequestContext(request, context)
t = loader.get_template(config.form_template)
return HttpResponse(t.render(c), status=400)
else:
return HttpResponse("""Invalid http method.
Yes we know, PUT is supposed to be used rather than POST,
but it was much easier to implement as POST :)""", status=405)
def resource(request, model=None, uid=None):
"""
Provides a resource for a django model that can be utilized by the
lingcod.features client module.
Implements actions for the following http actions:
POST: Update an object
DELETE: Delete it
GET: Provide a page representing the model. For MPAs, this is the
MPA attributes screen. The marinemap client will display this
page in the sidebar whenever the object is brought into focus.
To implement GET, this view needs to be passed a view function
that returns an HttpResponse or a template can be specified
that will be passed the instance and an optional extra_context
Uses lingcod.features.views.update and lingcod.feature.views.delete
"""
if model is None:
return HttpResponse('Model not specified in feature urls', status=500)
config = model.get_options()
if request.method == 'DELETE':
return delete(request, model, uid)
elif request.method == 'GET':
instance = get_object_for_viewing(request, uid, target_klass=model)
if isinstance(instance, HttpResponse):
# Object is not viewable so we return httpresponse
# should contain the appropriate error code
return instance
t = config.get_show_template()
context = config.show_context
context.update({
'instance': instance,
'MEDIA_URL': settings.MEDIA_URL,
'is_ajax': request.is_ajax(),
'template': t.name,
})
return HttpResponse(t.render(RequestContext(request, context)))
elif request.method == 'POST':
return update(request, model, uid)
def form_resources(request, model=None, uid=None):
if model is None:
return HttpResponse('Model not specified in feature urls', status=500)
if request.method == 'POST':
if uid is None:
return create(request, model, request.build_absolute_uri())
else:
return HttpResponse('Invalid http method', status=405)
elif request.method == 'GET':
if uid is None:
# Get the create form
return create_form(
request,
model,
action=request.build_absolute_uri())
else:
# get the update form
return update_form(request, model, uid)
else:
return HttpResponse('Invalid http method', status=405)
from lingcod.manipulators.manipulators import get_manipulators_for_model
# TODO: Refactor this so that it is part of Feature.Options.edit_context
def decorate_with_manipulators(extra_context, form_class):
try:
extra_context['json'] = simplejson.dumps(get_manipulators_for_model(form_class.Meta.model))
except:
extra_context['json'] = False
return extra_context
def copy(request, instances):
"""
Generic view that can be used to copy any feature classes. Supports
requests referencing multiple instances.
To copy, this view will call the copy() method with the request's user as
it's sole argument. The Feature base class has a generic copy method, but
developers can override it. A poorly implemented copy method that does not
return the copied instance will raise an exception here.
This view returns a space-delimited list of the Feature uid's for
selection in the user-interface after this operation via the
X-MarineMap-Select response header.
"""
copies = []
# setting this here because somehow the copies and instances vars get
# confused
untoggle = ' '.join([i.uid for i in instances])
for instance in instances:
copy = instance.copy(request.user)
if not copy or not isinstance(copy, Feature):
raise Exception('copy method on feature class %s did not return \
Feature instance.' % (instance.__class__.__name__, ))
copies.append(copy)
return to_response(
status=200,
select=copies,
untoggle=untoggle,
)
return response
def kml(request, instances):
return kml_core(request, instances, kmz=False)
def kmz(request, instances):
return kml_core(request, instances, kmz=True)
def kml_core(request, instances, kmz):
"""
Generic view for KML representation of feature classes.
Can be overridden in options but this provided a default.
"""
from lingcod.kmlapp.views import get_styles, create_kmz
from django.template.loader import get_template
from lingcod.common import default_mimetypes as mimetypes
from lingcod.features.models import FeatureCollection
user = request.user
try:
session_key = request.COOKIES['sessionid']
except:
session_key = 0
# Get features, collection from instances
features = []
collections = []
# If there is only a single instance with a kml_full property,
# just return the contents verbatim
if len(instances) == 1:
from lingcod.common.utils import is_text
filename = slugify(instances[0].name)
try:
kml = instances[0].kml_full
response = HttpResponse()
if is_text(kml) and kmz:
# kml_full is text, but they want as KMZ
kmz = create_kmz(kml, 'mm/doc.kml')
response['Content-Type'] = mimetypes.KMZ
response['Content-Disposition'] = 'attachment; filename=%s.kmz' % filename
response.write(kmz)
return response
elif is_text(kml) and not kmz:
# kml_full is text, they just want kml
response['Content-Type'] = mimetypes.KML
response['Content-Disposition'] = 'attachment; filename=%s.kml' % filename
response.write(kml)
return response
else:
# If the kml_full returns binary, always return kmz
# even if they asked for kml
response['Content-Type'] = mimetypes.KMZ
response['Content-Disposition'] = 'attachment; filename=%s.kmz' % filename
response.write(kml) # actually its kmz but whatevs
return response
except AttributeError:
pass
for instance in instances:
viewable, response = instance.is_viewable(user)
if not viewable:
return viewable, response
if isinstance(instance, FeatureCollection):
collections.append(instance)
else:
features.append(instance)
styles = get_styles(features,collections,links=False)
t = get_template('kmlapp/myshapes.kml')
context = Context({
'user': user,
'features': features,
'collections': collections,
'use_network_links': False,
'request_path': request.path,
'styles': styles,
'session_key': session_key,
'shareuser': None,
'sharegroup': None,
'feature_id': None,
})
kml = t.render(context)
response = HttpResponse()
filename = '_'.join([slugify(i.name) for i in instances])
if kmz:
kmz = create_kmz(kml, 'mm/doc.kml')
response['Content-Type'] = mimetypes.KMZ
response['Content-Disposition'] = 'attachment; filename=%s.kmz' % filename
response.write(kmz)
else:
response['Content-Type'] = mimetypes.KML
response['Content-Disposition'] = 'attachment; filename=%s.kml' % filename
response.write(kml)
response.write('\n')
return response
def share_form(request,model=None, uid=None):
"""
Generic view for showing the sharing form for an object
POST: Update the sharing status of an object
GET: Provide an html form for selecting groups
to which the feature will be shared.
"""
if model is None:
return HttpResponse('Model not specified in feature urls', status=500)
if uid is None:
return HttpResponse('Instance UID not specified', status=500)
obj = get_object_for_editing(request, uid, target_klass=model)
if isinstance(obj, HttpResponse):
return obj
if not isinstance(obj, Feature):
return HttpResponse('Instance is not a Feature', status=500)
obj_type_verbose = obj._meta.verbose_name
if request.method == 'GET':
# Display the form
# Which groups is this object already shared to?
already_shared_groups = obj.sharing_groups.all()
# Get a list of user's groups that have sharing permissions
groups = user_sharing_groups(request.user)
return render_to_response('sharing/share_form.html', {'groups': groups,
'already_shared_groups': already_shared_groups, 'obj': obj,
'obj_type_verbose': obj_type_verbose, 'user':request.user,
'MEDIA_URL': settings.MEDIA_URL,
'action': request.build_absolute_uri()})
elif request.method == 'POST':
group_ids = [int(x) for x in request.POST.getlist('sharing_groups')]
groups = Group.objects.filter(pk__in=group_ids)
try:
obj.share_with(groups)
return to_response(
status=200,
select=obj,
parent=obj.collection,
)
except Exception as e:
return HttpResponse(
'Unable to share objects with those specified groups: %r.' % e,
status=500)
else:
return HttpResponse( "Received unexpected " + request.method +
" request.", status=400 )
def manage_collection(request, action, uids, collection_model, collection_uid):
config = collection_model.get_options()
collection_instance = get_object_for_editing(request, collection_uid,
target_klass=collection_model)
if isinstance(collection_instance, HttpResponse):
return instance
if request.method == 'POST':
uids = uids.split(',')
instances = []
for uid in uids:
inst = get_object_for_editing(request, uid)
if isinstance(inst, HttpResponse):
return inst
else:
instances.append(inst)
if action == 'remove':
for instance in instances:
instance.remove_from_collection()
elif action == 'add':
for instance in instances:
instance.add_to_collection(collection_instance)
else:
return HttpResponse("Invalid action %s." % action, status=500)
return to_response(
status=200,
select=instances,
parent=collection_instance,
)
else:
return HttpResponse("Invalid http method.", status=405)
@cache_page(60 * 60)
def workspace(request, username, is_owner):
user = request.user
if request.method == 'GET':
if user.is_anonymous() and is_owner:
return HttpResponse("Anonymous user can't access workspace as owner", status=403)
res = HttpResponse(workspace_json(user, is_owner), status=200)
res['Content-Type'] = mimetypes.JSON
return res
else:
return HttpResponse("Invalid http method.", status=405)
@cache_page(60 * 60)
def feature_tree_css(request):
from lingcod.features import registered_models
if request.method == 'GET':
styles = []
for model in registered_models:
try:
css = model.css()
if css:
styles.append(css)
except:
logger.ERROR("Something is wrong with %s.css() class method" % model)
pass
res = HttpResponse('\n'.join(styles), status=200)
res['Content-Type'] = 'text/css'
return res
else:
return HttpResponse("Invalid http method.", status=405)
def to_response(status=200, select=None, show=None, parent=None,
untoggle=None, location=None):
"""Will return an appropriately structured response that the client can
interpret to carry out the following actions:
select
Accepts a list of features. Tells the client to select these
features in the user interface after an editing operation
show
Accepts a single feature. Client will show that feature's
attribute window in the sidebar
untoggle
Accepts a list of features. Useful for toggling the visibility of
original features that are being copied so there are not multiple
overlapping copies on the map
parent
Gives a hint to the client that the edited feature falls within a
particular FeatureCollection. Without this hint the client may not
in all cases be able to perform select and show behaviors.
These behaviors are intended to be specified to the client using
X-MarineMap- style headers in the response. Unfortunately, we have to post
some forms via an iframe in order to upload files. This makes it
impossible to get the response headers on the client end. This function
therefor currently also returns all headers in a json structure in the
response body.
"""
headers = {
"status": status,
"Location": location,
"X-MarineMap-Select": to_csv(select),
"X-MarineMap-Parent-Hint": to_csv(parent),
"X-MarineMap-Show": to_csv(show),
"X-MarineMap-UnToggle": to_csv(untoggle),
}
headers = dict((k,v) for k,v in headers.items() if v != '' and v != None)
response = HttpResponse(simplejson.dumps(headers), status=status)
for k,v in headers.items():
if k != 'status' and k != 'Location':
response[k] = v
return response
def to_csv(features):
if not features or isinstance(features, unicode):
return features
elif isinstance(features, Feature):
return features.uid
elif len(features) != 0:
return ' '.join([f.uid for f in features])
else:
return features
def has_features(user):
"""
Util function to determine if a user owns any features
"""
from lingcod.features import registered_models
for model in registered_models:
try:
if len(model.objects.filter(user=user)) > 0:
return True
except:
pass
return False
| |
"""Tests related to SVG groups.
To run these tests, you can use (from root svgpathtools directory):
$ python -m unittest test.test_groups.TestGroups.test_group_flatten
"""
from __future__ import division, absolute_import, print_function
import unittest
from svgpathtools import *
from os.path import join, dirname
import numpy as np
def get_desired_path(name, paths):
return next(p for p in paths
if p.element.get('{some://testuri}name') == name)
class TestGroups(unittest.TestCase):
def check_values(self, v, z):
# Check that the components of 2D vector v match the components
# of complex number z
self.assertAlmostEqual(v[0], z.real)
self.assertAlmostEqual(v[1], z.imag)
def check_line(self, tf, v_s_vals, v_e_relative_vals, name, paths):
# Check that the endpoints of the line have been correctly transformed.
# * tf is the transform that should have been applied.
# * v_s_vals is a 2D list of the values of the line's start point
# * v_e_relative_vals is a 2D list of the values of the line's
# end point relative to the start point
# * name is the path name (value of the test:name attribute in
# the SVG document)
# * paths is the output of doc.paths()
v_s_vals.append(1.0)
v_e_relative_vals.append(0.0)
v_s = np.array(v_s_vals)
v_e = v_s + v_e_relative_vals
actual = get_desired_path(name, paths)
self.check_values(tf.dot(v_s), actual.start)
self.check_values(tf.dot(v_e), actual.end)
def test_group_flatten(self):
# Test the Document.paths() function against the
# groups.svg test file.
# There are 12 paths in that file, with various levels of being
# nested inside of group transforms.
# The check_line function is used to reduce the boilerplate,
# since all the tests are very similar.
# This test covers each of the different types of transforms
# that are specified by the SVG standard.
doc = Document(join(dirname(__file__), 'groups.svg'))
result = doc.paths()
self.assertEqual(12, len(result))
tf_matrix_group = np.array([[1.5, 0.0, -40.0],
[0.0, 0.5, 20.0],
[0.0, 0.0, 1.0]])
self.check_line(tf_matrix_group,
[183, 183], [0.0, -50],
'path00', result)
tf_scale_group = np.array([[1.25, 0.0, 0.0],
[0.0, 1.25, 0.0],
[0.0, 0.0, 1.0]])
self.check_line(tf_matrix_group.dot(tf_scale_group),
[122, 320], [-50.0, 0.0],
'path01', result)
self.check_line(tf_matrix_group.dot(tf_scale_group),
[150, 200], [-50, 25],
'path02', result)
self.check_line(tf_matrix_group.dot(tf_scale_group),
[150, 200], [-50, 25],
'path03', result)
tf_nested_translate_group = np.array([[1, 0, 20],
[0, 1, 0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_scale_group
).dot(tf_nested_translate_group),
[150, 200], [-50, 25],
'path04', result)
tf_nested_translate_xy_group = np.array([[1, 0, 20],
[0, 1, 30],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_scale_group
).dot(tf_nested_translate_xy_group),
[150, 200], [-50, 25],
'path05', result)
tf_scale_xy_group = np.array([[0.5, 0, 0],
[0, 1.5, 0.0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_scale_xy_group),
[122, 320], [-50, 0],
'path06', result)
a_07 = 20.0*np.pi/180.0
tf_rotate_group = np.array([[np.cos(a_07), -np.sin(a_07), 0],
[np.sin(a_07), np.cos(a_07), 0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_rotate_group),
[183, 183], [0, 30],
'path07', result)
a_08 = 45.0*np.pi/180.0
tf_rotate_xy_group_R = np.array([[np.cos(a_08), -np.sin(a_08), 0],
[np.sin(a_08), np.cos(a_08), 0],
[0, 0, 1]])
tf_rotate_xy_group_T = np.array([[1, 0, 183],
[0, 1, 183],
[0, 0, 1]])
tf_rotate_xy_group = tf_rotate_xy_group_T.dot(
tf_rotate_xy_group_R).dot(
np.linalg.inv(tf_rotate_xy_group_T))
self.check_line(tf_matrix_group.dot(tf_rotate_xy_group),
[183, 183], [0, 30],
'path08', result)
a_09 = 5.0*np.pi/180.0
tf_skew_x_group = np.array([[1, np.tan(a_09), 0],
[0, 1, 0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_skew_x_group),
[183, 183], [40, 40],
'path09', result)
a_10 = 5.0*np.pi/180.0
tf_skew_y_group = np.array([[1, 0, 0],
[np.tan(a_10), 1, 0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_skew_y_group),
[183, 183], [40, 40],
'path10', result)
# This last test is for handling transforms that are defined as
# attributes of a <path> element.
a_11 = -40*np.pi/180.0
tf_path11_R = np.array([[np.cos(a_11), -np.sin(a_11), 0],
[np.sin(a_11), np.cos(a_11), 0],
[0, 0, 1]])
tf_path11_T = np.array([[1, 0, 100],
[0, 1, 100],
[0, 0, 1]])
tf_path11 = tf_path11_T.dot(tf_path11_R).dot(np.linalg.inv(tf_path11_T))
self.check_line(tf_matrix_group.dot(tf_skew_y_group).dot(tf_path11),
[180, 20], [-70, 80],
'path11', result)
def check_group_count(self, doc, expected_count):
count = 0
for _ in doc.tree.getroot().iter('{{{0}}}g'.format(SVG_NAMESPACE['svg'])):
count += 1
self.assertEqual(expected_count, count)
def test_nested_group(self):
# A bug in the flattened_paths_from_group() implementation made it so that only top-level
# groups could have their paths flattened. This is a regression test to make
# sure that when a nested group is requested, its paths can also be flattened.
doc = Document(join(dirname(__file__), 'groups.svg'))
result = doc.paths_from_group(['matrix group', 'scale group'])
self.assertEqual(len(result), 5)
def test_add_group(self):
# Test `Document.add_group()` function and related Document functions.
doc = Document(None)
self.check_group_count(doc, 0)
base_group = doc.add_group()
base_group.set('id', 'base_group')
self.assertTrue(doc.contains_group(base_group))
self.check_group_count(doc, 1)
child_group = doc.add_group(parent=base_group)
child_group.set('id', 'child_group')
self.assertTrue(doc.contains_group(child_group))
self.check_group_count(doc, 2)
grandchild_group = doc.add_group(parent=child_group)
grandchild_group.set('id', 'grandchild_group')
self.assertTrue(doc.contains_group(grandchild_group))
self.check_group_count(doc, 3)
sibling_group = doc.add_group(parent=base_group)
sibling_group.set('id', 'sibling_group')
self.assertTrue(doc.contains_group(sibling_group))
self.check_group_count(doc, 4)
# Test that we can retrieve each new group from the document
self.assertEqual(base_group, doc.get_or_add_group(['base_group']))
self.assertEqual(child_group, doc.get_or_add_group(
['base_group', 'child_group']))
self.assertEqual(grandchild_group, doc.get_or_add_group(
['base_group', 'child_group', 'grandchild_group']))
self.assertEqual(sibling_group, doc.get_or_add_group(
['base_group', 'sibling_group']))
# Create a new nested group
new_child = doc.get_or_add_group(
['base_group', 'new_parent', 'new_child'])
self.check_group_count(doc, 6)
self.assertEqual(new_child, doc.get_or_add_group(
['base_group', 'new_parent', 'new_child']))
new_leaf = doc.get_or_add_group(
['base_group', 'new_parent', 'new_child', 'new_leaf'])
self.assertEqual(new_leaf, doc.get_or_add_group([
'base_group', 'new_parent', 'new_child', 'new_leaf']))
self.check_group_count(doc, 7)
path_d = ('M 206.07112,858.41289 L 206.07112,-2.02031 '
'C -50.738,-81.14814 -20.36402,-105.87055 52.52793,-101.01525 '
'L 103.03556,0.0 '
'L 0.0,111.11678')
svg_path = doc.add_path(path_d, group=new_leaf)
self.assertEqual(path_d, svg_path.get('d'))
path = parse_path(path_d)
svg_path = doc.add_path(path, group=new_leaf)
self.assertEqual(path_d, svg_path.get('d'))
# Test that paths are added to the correct group
new_sibling = doc.get_or_add_group(
['base_group', 'new_parent', 'new_sibling'])
doc.add_path(path, group=new_sibling)
self.assertEqual(len(new_sibling), 1)
self.assertEqual(path_d, new_sibling[0].get('d'))
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Oozie API classes.
This is mostly just codifying the datastructure of the Oozie REST API.
http://incubator.apache.org/oozie/docs/3.2.0-incubating/docs/WebServicesAPI.html
"""
import logging
import re
from cStringIO import StringIO
from time import mktime
from desktop.lib import i18n
from desktop.lib.exceptions_renderable import PopupException
from desktop.log.access import access_warn
import hadoop.confparse
from liboozie.utils import parse_timestamp, format_time
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
LOG = logging.getLogger(__name__)
class Action(object):
def __init__(self, json_dict):
self.json_dict = json_dict
for attr in self._ATTRS:
setattr(self, attr, json_dict.get(attr))
self._fixup()
def _fixup(self): pass
def is_finished(self):
return self.status in ('OK', 'SUCCEEDED', 'DONE')
@classmethod
def create(self, action_class, action_dict):
if ControlFlowAction.is_control_flow(action_dict.get('type')):
return ControlFlowAction(action_dict)
else:
return action_class(action_dict)
def __str__(self):
return '%s - %s' % (self.type, self.name)
def to_json(self):
return self.json_dict.copy()
class ControlFlowAction(Action):
_ATTRS = [
'errorMessage',
'status',
'stats',
'data',
'transition',
'externalStatus',
'cred',
'conf',
'type',
'endTime',
'externalId',
'id',
'startTime',
'externalChildIDs',
'name',
'errorCode',
'trackerUri',
'retries',
'toString',
'consoleUrl'
]
@classmethod
def is_control_flow(self, action_type):
return action_type is not None and (':' in action_type)
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
- protect externalId
"""
super(ControlFlowAction, self)._fixup()
if self.startTime:
self.startTime = parse_timestamp(self.startTime)
if self.endTime:
self.endTime = parse_timestamp(self.endTime)
if self.retries:
self.retries = int(self.retries)
if self.externalId and not re.match('job_.*', self.externalId):
self.externalId = None
self.conf_dict = {}
class WorkflowAction(Action):
_ATTRS = [
'conf',
'consoleUrl',
'data',
'endTime',
'errorCode',
'errorMessage',
'externalId',
'externalStatus',
'id',
'name',
'retries',
'startTime',
'status',
'trackerUri',
'transition',
'type',
'externalChildIDs',
]
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
"""
super(WorkflowAction, self)._fixup()
if self.startTime:
self.startTime = parse_timestamp(self.startTime)
if self.endTime:
self.endTime = parse_timestamp(self.endTime)
if self.retries:
self.retries = int(self.retries)
if self.conf:
xml = StringIO(i18n.smart_str(self.conf))
try:
self.conf_dict = hadoop.confparse.ConfParse(xml)
except Exception, e:
LOG.error('Failed to parse XML configuration for Workflow action %s: %s' % (self.name, e))
self.conf_dict = {}
else:
self.conf_dict = {}
def get_absolute_url(self):
related_job_ids = []
if hasattr(self, 'oozie_coordinator') and self.oozie_coordinator:
related_job_ids.append('coordinator_job_id=%s' % self.oozie_coordinator.id)
if hasattr(self, 'oozie_bundle') and self.oozie_bundle:
related_job_ids.append('bundle_job_id=%s' % self.oozie_bundle.id)
if related_job_ids:
extra_params = '?' + '&'.join(related_job_ids)
else:
extra_params = ''
return reverse('oozie:list_oozie_workflow_action', kwargs={'action': self.id}) + extra_params
def get_absolute_log_url(self):
url = None
if self.externalId and re.match('job_.*', self.externalId):
url = self.externalId and reverse('jobbrowser.views.job_single_logs', kwargs={'job': self.externalId}) or ''
return url
def get_external_id_url(self):
url = None
if self.externalId and self.externalId.endswith('W'):
url = reverse('oozie:list_oozie_workflow', kwargs={'job_id': self.externalId}) or ''
elif self.externalId and re.match('job_.*', self.externalId):
url = reverse('jobbrowser.views.single_job', kwargs={'job': self.externalId}) or ''
return url
class CoordinatorAction(Action):
_ATTRS = [
'status',
'runConf',
'errorMessage',
'missingDependencies',
'coordJobId',
'errorCode',
'actionNumber',
'consoleUrl',
'nominalTime',
'externalStatus',
'createdConf',
'createdTime',
'externalId',
'lastModifiedTime',
'type',
'id',
'trackerUri'
]
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
"""
super(CoordinatorAction, self)._fixup()
if self.createdTime:
self.createdTime = parse_timestamp(self.createdTime)
if self.nominalTime:
self.nominalTime = parse_timestamp(self.nominalTime)
if self.lastModifiedTime:
self.lastModifiedTime = parse_timestamp(self.lastModifiedTime)
if self.runConf:
xml = StringIO(i18n.smart_str(self.runConf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
self.title = ' %s-%s'% (self.actionNumber, format_time(self.nominalTime))
class BundleAction(Action):
_ATTRS = [
'startTime',
'actions',
'frequency',
'concurrency',
'pauseTime',
'group',
'toString',
'consoleUrl',
'mat_throttling',
'status',
'conf',
'user',
'timeOut',
'coordJobPath',
'timeUnit',
'coordJobId',
'coordJobName',
'nextMaterializedTime',
'coordExternalId',
'acl',
'lastAction',
'executionPolicy',
'timeZone',
'endTime'
]
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
"""
super(BundleAction, self)._fixup()
self.type = 'coord-action'
self.name = self.coordJobName
if self.conf:
xml = StringIO(i18n.smart_str(self.conf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
def get_progress(self):
"""How much more time before the next action."""
if self.lastAction is None:
return 0
next = mktime(parse_timestamp(self.lastAction))
start = mktime(parse_timestamp(self.startTime))
end = mktime(parse_timestamp(self.endTime))
if end != start:
progress = min(int((1 - (end - next) / (end - start)) * 100), 100)
else:
progress = 100
return progress
class Job(object):
MAX_LOG_SIZE = 3500 * 20 # 20 pages
"""
Accessing log and definition will trigger Oozie API calls.
"""
def __init__(self, api, json_dict):
for attr in self._ATTRS:
setattr(self, attr, json_dict.get(attr))
self._fixup()
self._api = api
self._log = None
self._definition = None
def _fixup(self):
"""
Fixup fields:
- expand actions
- time fields are struct_time
- run is integer
- configuration dict
- log
- definition
"""
if self.startTime:
self.startTime = parse_timestamp(self.startTime)
if self.endTime:
self.endTime = parse_timestamp(self.endTime)
self.actions = [Action.create(self.ACTION, act_dict) for act_dict in self.actions]
if self.conf is not None:
xml = StringIO(i18n.smart_str(self.conf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
def _get_log(self):
"""Get the log lazily, trigger Oozie API call at the first access."""
if self._log is None:
self._log = self._api.get_job_log(self.id)
return self._log[-Job.MAX_LOG_SIZE:]
log = property(_get_log)
def _get_definition(self):
"""Get the definition lazily, trigger Oozie API call at the first access."""
if self._definition is None:
self._definition = self._api.get_job_definition(self.id)
return self._definition
definition = property(_get_definition)
def start(self):
self._api.job_control(self.id, 'start')
def suspend(self):
self._api.job_control(self.id, 'suspend')
def resume(self):
self._api.job_control(self.id, 'resume')
def kill(self):
self._api.job_control(self.id, 'kill')
def available_actions(self):
"""
available_actions() -> Zero or more of [ 'start', 'suspend', 'resume', 'kill' ]
"""
if self.status in ('SUCCEEDED', 'KILLED', 'FAILED'):
return []
res = []
if self.status == 'PREP':
res.append('start')
if self.status == 'RUNNING':
res.append('suspend')
if self.status == 'SUSPENDED':
res.append('resume')
res.append('kill')
return res
def check_request_permission(self, request):
"""Raise PopupException if request user doesn't have permission to modify workflow"""
if not request.user.is_superuser and request.user.username != self.user:
access_warn(request, _('Insufficient permission.'))
raise PopupException(_("Permission denied. User %(username)s cannot modify user %(user)s's job.") %
dict(username=request.user.username, user=self.user))
def get_control_flow_actions(self):
return [action for action in self.actions if ControlFlowAction.is_control_flow(action.type)]
def get_working_actions(self):
return [action for action in self.actions if not ControlFlowAction.is_control_flow(action.type)]
def is_running(self):
return self.status in Workflow.RUNNING_STATUSES | Coordinator.RUNNING_STATUSES | Bundle.RUNNING_STATUSES
def __str__(self):
return '%s - %s' % (self.id, self.status)
@property
def has_sla(self):
return '<sla:info>' in self.definition
class Workflow(Job):
_ATTRS = [
'actions',
'appName',
'appPath',
'conf',
'consoleUrl',
'createdTime',
'endTime',
'externalId',
'group',
'id',
'lastModTime',
'run',
'startTime',
'status',
'user',
'acl',
'parentId'
]
ACTION = WorkflowAction
RUNNING_STATUSES = set(['PREP', 'RUNNING', 'SUSPENDED'])
FINISHED_STATUSES = set(['SUCCEEDED', 'KILLED', 'FAILED'])
def _fixup(self):
super(Workflow, self)._fixup()
if self.createdTime:
self.createdTime = parse_timestamp(self.createdTime)
if self.lastModTime:
self.lastModTime = parse_timestamp(self.lastModTime)
if self.run:
self.run = int(self.run)
@property
def type(self):
return 'Workflow'
def get_parent_job_id(self):
if self.parentId and '@' in self.parentId:
return self.parentId.split('@')[0]
return self.parentId
def get_absolute_url(self, format='html'):
extra_params = []
if format == 'json':
extra_params.append('format=json')
if hasattr(self, 'oozie_coordinator') and self.oozie_coordinator:
extra_params.append('coordinator_job_id=%s' % self.oozie_coordinator.id)
if hasattr(self, 'oozie_bundle') and self.oozie_bundle:
extra_params.append('bundle_job_id=%s' % self.oozie_bundle.id)
if extra_params:
extra_params = '?' + '&'.join(extra_params)
else:
extra_params = ''
return reverse('oozie:list_oozie_workflow', kwargs={'job_id': self.id}) + extra_params
def get_progress(self, full_node_list=None):
if self.status in ('SUCCEEDED', 'KILLED', 'FAILED'):
return 100 # Case of decision nodes
else:
if full_node_list is not None: # Should remove the un-reached branches if decision node
total_actions = len(full_node_list) - 1 # -1 because of Kill node
else:
total_actions = len(self.actions)
return int(sum([action.is_finished() for action in self.actions]) / float(max(total_actions, 1)) * 100)
class Coordinator(Job):
_ATTRS = [
'acl',
'actions',
'conf',
'concurrency',
'consoleUrl',
'coordExternalId',
'coordJobId',
'coordJobName',
'coordJobPath',
'endTime',
'executionPolicy',
'frequency',
'group',
'lastAction',
'mat_throttling',
'nextMaterializedTime',
'pauseTime',
'startTime',
'status',
'timeOut',
'timeUnit',
'timeZone',
'user',
'bundleId',
'total'
]
ACTION = CoordinatorAction
RUNNING_STATUSES = set(['PREP', 'RUNNING', 'RUNNINGWITHERROR', 'PREPSUSPENDED', 'SUSPENDED', 'SUSPENDEDWITHERROR', 'PREPPAUSED', 'PAUSED', 'PAUSEDWITHERROR'])
FINISHED_STATUSES = set(['SUCCEEDED', 'DONEWITHERROR', 'KILLED', 'FAILED'])
def _fixup(self):
super(Coordinator, self)._fixup()
if self.nextMaterializedTime is not None:
self.nextMaterializedTime = parse_timestamp(self.nextMaterializedTime)
else:
self.nextMaterializedTime = self.startTime
if self.pauseTime:
self.pauseTime = parse_timestamp(self.pauseTime)
# For when listing/mixing all the jobs together
self.id = self.coordJobId
self.appName = self.coordJobName
@property
def type(self):
return 'Coordinator'
def get_absolute_url(self, oozie_bundle=None, format='html'):
extra_params = []
if format == 'json':
extra_params.append('format=json')
if oozie_bundle:
extra_params.append('bundle_job_id=%s' % oozie_bundle.id)
if hasattr(self, 'bundleId') and self.bundleId:
extra_params.append('bundle_job_id=%s' % self.bundleId)
if extra_params:
extra_params = '?' + '&'.join(extra_params)
else:
extra_params = ''
return reverse('oozie:list_oozie_coordinator', kwargs={'job_id': self.id}) + extra_params
def get_progress(self):
"""How much more time before the final materialization."""
next = mktime(self.nextMaterializedTime)
start = mktime(self.startTime)
end = mktime(self.endTime)
if end != start:
progress = min(int((1 - (end - next) / (end - start)) * 100), 100)
else:
progress = 100
# Manage case of a rerun
action_count = float(len(self.actions))
if action_count != 0 and progress == 100:
progress = int(sum([action.is_finished() for action in self.actions]) / action_count * 100)
return progress
@classmethod
def aggreate(cls, actions):
if not actions:
return []
result = []
first = prev = actions[0]
for a in actions[1:]:
if int(a) != int(prev) + 1:
result.append('-'.join((first, prev)))
first = a
prev = a
result.append('-'.join((first, prev)))
return result
@property
def human_frequency(self):
from oozie.models import Coordinator
return Coordinator.CRON_MAPPING.get(self.frequency, self.frequency)
class Bundle(Job):
_ATTRS = [
'status',
'toString',
'group',
'conf',
'bundleJobName',
'startTime',
'bundleCoordJobs',
'kickoffTime',
'acl',
'bundleJobPath',
'createdTime',
'timeOut',
'consoleUrl',
'bundleExternalId',
'timeUnit',
'pauseTime',
'bundleJobId',
'endTime',
'user',
]
ACTION = BundleAction
RUNNING_STATUSES = set(['PREP', 'RUNNING', 'RUNNINGWITHERROR', 'SUSPENDED', 'PREPSUSPENDED', 'SUSPENDEDWITHERROR', 'PAUSED', 'PAUSEDWITHERROR', 'PREPPAUSED'])
FINISHED_STATUSES = set(['SUCCEEDED', 'DONEWITHERROR', 'KILLED', 'FAILED'])
def _fixup(self):
self.actions = self.bundleCoordJobs
super(Bundle, self)._fixup()
# For when listing/mixing all the jobs together
self.id = self.bundleJobId
self.appName = self.bundleJobName
@property
def type(self):
return 'Bundle'
def get_absolute_url(self, format='html'):
extra_params = ''
if format == 'json':
extra_params = '?format=json'
return reverse('oozie:list_oozie_bundle', kwargs={'job_id': self.id}) + extra_params
def get_progress(self):
progresses = [action.get_progress() for action in self.actions]
count = len(progresses)
if count != 0:
return sum(progresses) / float(count)
else:
return 0
class JobList(object):
"""
Represents a list of Oozie jobs (Workflows or Coordinators or Bundles).
"""
_ATTRS = [
'offset',
'len',
'total',
'jobs',
]
def __init__(self, klass, jobs_key, api, json_dict, filters=None):
"""
json_dict is the oozie json.
filters is (optionally) the list of filters used to select this list
"""
self._api = api
self.offset = int(json_dict['offset'])
self.total = int(json_dict['total'])
self.jobs = [klass(self._api, wf_dict) for wf_dict in json_dict[jobs_key]]
self.filters = filters
class WorkflowList(JobList):
def __init__(self, api, json_dict, filters=None):
super(WorkflowList, self).__init__(Workflow, 'workflows', api, json_dict, filters)
class CoordinatorList(JobList):
def __init__(self, api, json_dict, filters=None):
super(CoordinatorList, self).__init__(Coordinator, 'coordinatorjobs', api, json_dict, filters)
class BundleList(JobList):
def __init__(self, api, json_dict, filters=None):
super(BundleList, self).__init__(Bundle, 'bundlejobs', api, json_dict, filters)
| |
# This module contains utility functions for interacting with the
# Elasticsearch API.
import httplib
import json
import logging
import math
import re
import sys
logger = logging.getLogger(__name__)
class HttpError(Exception):
pass
class Connection(object):
def __init__(self, host, port):
self.conn = httplib.HTTPConnection(host, port)
self.es_version = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def connect(self):
self.conn.connect()
def close(self):
self.conn.close()
def request(self, method, path, data=None):
if data is None:
self.conn.request(method, path)
else:
self.conn.request(method, path, data)
resp = self.conn.getresponse()
if resp.status != 200:
raise HttpError("Expected HTTP 200 - got HTTP %d: %s %s "
"(data: %r)" %
(resp.status, method, path, data))
resp_body = resp.read()
try:
resp_payload = json.loads(resp_body)
except ValueError: # Broken JSON
if len(resp_body) == 0:
resp_payload = json.loads("{}")
else:
logger.error("Failed to parse JSON in response: %r" %
resp_body)
raise
return resp_payload
def get(self, path):
return self.request('GET', path)
def put(self, path, data):
return self.request('PUT', path, data)
def post(self, path, data=""):
return self.request('POST', path, data)
def delete(self, path, data=""):
return self.request('DELETE', path, data)
def master(self):
id = self.master_node_id()
state = self.get('/_cluster/state')
return state['nodes'][id]['name']
def master_node_id(self):
state = self.get('/_cluster/state')
return state['master_node']
def my_node_id(self):
# Elasticsearch 1.0.0 replaced the /_cluster/nodes endpoint
# with /_nodes. Check which version we're dealing with and
# choose the correct API.
if self.es_version is None:
# Remove everything except digits and . and - separators,
# turning e.g. 1.0.0-rc1 into 1.0.0-1. The subsequent split
# on the same separators leaves us with a list of numbers.
sanitized_version = re.sub(r'[^0-9.-]', '',
self.get('/')['version']['number'])
self.es_version = tuple(
int(s) for s in re.split(r'[.-]', sanitized_version))
if self.es_version >= (1, 0, 0):
info = self.get('/_nodes/_local')
else:
info = self.get('/_cluster/nodes/_local')
return info['nodes'].keys()[0]
def indices(self):
"""Return a list of index names in no particular order."""
state = self.get('/_cluster/state')
return state['metadata']['indices'].keys()
def get_index_translog_disable_flush(self):
"""Return a dictionary showing the position of the
'translog.disable_flush' knob for each index in the cluster.
The dictionary will look like this:
{
"index1": True, # Autoflushing DISABLED
"index2": False, # Autoflushing ENABLED
"index3": "unknown", # Using default setting (probably enabled)
...
}
"""
disabled = {}
settings = self.get('/_settings')
setting_getters = [
lambda s: s['index.translog.disable_flush'],
lambda s: s['index']['translog']['disable_flush']]
for idx in settings:
idx_settings = settings[idx]['settings']
for getter in setting_getters:
try:
disabled[idx] = booleanise(getter(idx_settings))
except KeyError as e:
pass
if not idx in disabled:
disabled[idx] = 'unknown'
return disabled
def allocator_disabled(self):
"""Return a simplified one-word answer to the question, 'Has the
automatic shard allocator been disabled for this cluster?'
The answer will be one of "disabled" (yes), "enabled" (no), or
"unknown".
"""
state = "unknown"
setting_getters = [
lambda s: s['cluster.routing.allocation.disable_allocation'],
lambda s: s['cluster']['routing']['allocation']['disable_allocation']]
settings = self.get('/_cluster/settings')
for i in ['persistent', 'transient']:
for getter in setting_getters:
try:
v = booleanise(getter(settings[i]))
if v == True:
state = "disabled"
elif v == False:
state = "enabled"
except KeyError:
pass
return state
def flushing_disabled(self):
"""Return a simplified one-word answer to the question, 'Has
automatic transaction log flushing been disabled on all indexes in
the cluster?'
The answer will be one of "disabled" (yes, on all), "enabled" (no,
on all), "some" (yes, only on some), or "unknown".
"""
states = self.get_index_translog_disable_flush().values()
if not states:
return "unknown"
if all(s == True for s in states):
return "disabled"
if all(s == False for s in states):
return "enabled"
if any(s == False for s in states):
return "some"
return "unknown"
class TabularPrinter(object):
"""It prints stuff... in a tabular format.
Call row() once for each row you want to print. Each one of your
columns must be supplied to row() as a separate argument. With all
rows queued, call output() to dump out a formatted table.
tb = TabularPrinter()
tb.row("foo", "bar")
tb.row("baz")
tb.output()
TabularPrinter will automatically size its columns to suit the
supplied content.
"""
DEFAULT_MARGIN = " " * 4
DEFAULT_SEPARATOR = " "
def __init__(self, margin=DEFAULT_MARGIN,
separator=DEFAULT_SEPARATOR):
self.margin = str(margin)
self.separator = str(separator)
self._column_widths = []
self._max_columns = 0
self._row_count = 0
self._rows = []
@property
def row_count(self):
return self._row_count
def row(self, *columns):
columns = map(lambda c: str(c), columns)
self._rows.append(columns)
self._row_count += 1
column_widths = map(lambda c: len(c), columns)
self._column_widths = (
map(lambda (x, y): max(x, y),
self.nontruncating_zip(self._column_widths,
column_widths)))
self._max_columns = max(self._max_columns, len(columns))
def output(self, stream=sys.stdout):
for row in self._rows:
stream.write(self.margin)
column_idx = 0
for column in row:
fmt_string = "%%-%ds%s" % (
self._column_widths[column_idx],
self.separator)
print >>stream, fmt_string % column,
column_idx += 1
print >>stream
@staticmethod
def nontruncating_zip(*seqs):
"""Return a list of tuples, where each tuple contains the i-th
element from each of the argument sequences.
The returned list is as long as the longest argument sequence.
Shorter argument sequences will be represented in the output as
None padding elements:
nontruncating_zip([1, 2, 3], ['a', 'b'])
-> [(1, 'a'), (2, 'b'), (3, None)]
"""
n_seqs = len(seqs)
tups = []
idx = 0
while True:
empties = 0
tup = []
for seq in seqs:
try:
tup.append(seq[idx])
except IndexError:
empties += 1
tup.append(None)
if empties == n_seqs:
break
tup = tuple(tup)
tups.append(tup)
idx += 1
return tups
def booleanise(b):
"""Normalise a 'stringified' Boolean to a proper Python Boolean.
ElasticSearch has a habit of returning "true" and "false" in its
JSON responses when it should be returning `true` and `false`. If
`b` looks like a stringified Boolean true, return True. If `b`
looks like a stringified Boolean false, return False.
If we don't know what `b` is supposed to represent, return it back
to the caller.
"""
s = str(b)
if s.lower() == "true":
return True
if s.lower() == "false":
return False
return b
def fmt_bytes(bytes, precision=2):
"""Reduce a large number of `bytes` down to a humanised SI
equivalent and return the result as a string with trailing unit
abbreviation.
"""
UNITS = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']
if bytes == 0:
return '0 bytes'
log = math.floor(math.log(bytes, 1000))
return "%.*f %s" % (precision,
bytes / math.pow(1000, log),
UNITS[int(log)])
| |
# encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import random
from seesaw.config import realize, NumberConfigValue
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
import shutil
import socket
import subprocess
import sys
import time
import string
import seesaw
from seesaw.externalprocess import WgetDownload
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.8.5"):
raise Exception("This pipeline needs seesaw version 0.8.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c", "GNU Wget 1.14.lua.20160530-955376b"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20161231.01"
USER_AGENT = 'Archive Team'
TRACKER_ID = 'googlecode'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_')
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
LUA_SHA1 = get_hash(os.path.join(CWD, 'googlecode.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'lua_hash': LUA_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", USER_AGENT,
"-nv",
"--lua-script", "googlecode.lua",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"--no-cookies",
"-e", "robots=off",
"--rotate-dns",
"--recursive", "--level=inf",
"--no-parent",
"--page-requisites",
"--timeout", "30",
"--tries", "inf",
"--domains", "google.com",
"--span-hosts",
"--waitretry", "30",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "googlecode-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("googlecode-user: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_type, item_value = item_name.split(':', 1)
item['item_type'] = item_type
item['item_value'] = item_value
if item_type == 'archive':
wget_args.append('https://code.google.com/archive/p/{0}/'.format(item_value))
wget_args.append('https://www.googleapis.com/storage/v1/b/google-code-archive/o/v2%2Fcode.google.com%2F{0}%2Fproject.json?alt=media&stripTrailingSlashes=false'.format(item_value))
wget_args.append('https://www.googleapis.com/storage/v1/b/google-code-archive/o/v2%2Fcode.google.com%2F{0}%2Fsource-page-1.json?alt=media&stripTrailingSlashes=false'.format(item_value))
wget_args.append('https://www.googleapis.com/storage/v1/b/google-code-archive/o/v2%2Fcode.google.com%2F{0}%2Fcommits-page-1.json?alt=media&stripTrailingSlashes=false'.format(item_value))
wget_args.append('https://www.googleapis.com/storage/v1/b/google-code-archive/o/v2%2Fcode.google.com%2F{0}%2Fissues-page-1.json?alt=media&stripTrailingSlashes=false'.format(item_value))
wget_args.append('https://www.googleapis.com/storage/v1/b/google-code-archive/o/v2%2Fcode.google.com%2F{0}%2Fwikis.json?alt=media&stripTrailingSlashes=false'.format(item_value))
wget_args.append('https://www.googleapis.com/storage/v1/b/google-code-archive/o/v2%2Fcode.google.com%2F{0}%2Fdownloads-page-1.json?alt=media&stripTrailingSlashes=false'.format(item_value))
wget_args.append('https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/{0}/source-archive.zip'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/source'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/source/default/source'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/source/default/source?page=1'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/source/default/commits'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/source/default/commits?page=1'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/issues'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/issues?page=1'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/wikis'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/downloads'.format(item_value))
wget_args.append('https://code.google.com/archive/p/{0}/downloads?page=1'.format(item_value))
else:
raise Exception('Unknown item')
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="googlecode",
project_html="""
<img class="project-logo" alt="Project logo" src="http://archiveteam.org/images/c/ca/Google_Code_logo.gif" height="50px" title=""/>
<h2>code.google.com <span class="links"><a href="https://code.google.com/">Website</a> · <a href="http://tracker.archiveteam.org/googlecode/">Leaderboard</a></span></h2>
<p>Saving the full Google Code!</p>
""",
utc_deadline=datetime.datetime(2016, 1, 24, 23, 59, 0)
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="googlecode"),
WgetDownload(
WgetArgs(),
max_tries=2,
accept_on_exit_code=[0, 4, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_value": ItemValue("item_value"),
"item_type": ItemValue("item_type"),
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
| |
# -*- coding: utf-8 -*-
# vim:et:sts=4:ts=4
"""
This is a copy of the python2.6 stdlib urlparse with special cases factored out.
We've been doing painful special-case code to undo the special cases herein, but
it's overall easier and more reliable to just fix this code...
We preserve the 4-space indents to ease merging from upstream.
Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
from collections import namedtuple
import six
# This is a stdlib file. To ease merging, we won't fix these style issues.
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache."""
_parse_cache.clear()
class ResultMixin(object):
"""Shared methods for the parsed result objects."""
@property
def username(self):
netloc = self.netloc
if netloc is None:
return None
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
userinfo = userinfo.split(":", 1)[0]
return userinfo
return None
@property
def password(self):
netloc = self.netloc
if netloc is None:
return None
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)[1]
return None
@property
def hostname(self):
netloc = self.netloc
if netloc is None:
return None
if "@" in netloc:
netloc = netloc.rsplit("@", 1)[1]
if ":" in netloc:
netloc = netloc.split(":", 1)[0]
return netloc
@property
def port(self):
netloc = self.netloc
if netloc is None:
return None
if "@" in netloc:
netloc = netloc.rsplit("@", 1)[1]
if ":" in netloc:
port = netloc.split(":", 1)[1]
return int(port, 10)
return None
class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunparse(self)
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
t = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = t
if ';' in url:
url, params = _splitparams(url)
else:
params = ''
return ParseResult(scheme, netloc, url, params, query, fragment)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i + 1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = None
query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
def urlunparse(data):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment = data
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
def urlunsplit(data):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment = data
if netloc is not None:
if url and url[:1] != '/':
url = '/' + url
url = '//' + netloc + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return url
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return urlunparse((scheme, netloc, path,
params, query, fragment))
netloc = bnetloc
if path[:1] == '/':
return urlunparse((scheme, netloc, path,
params, query, fragment))
if not path:
path = bpath
if not params:
params = bparams
else:
path = path[:-1]
return urlunparse((scheme, netloc, path,
params, query, fragment))
if not query:
query = bquery
return urlunparse((scheme, netloc, path,
params, query, fragment))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == '..' and segments[i - 1] not in ('', '..'):
del segments[i - 1:i + 1]
break
i = i + 1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
return defrag, frag
else:
return url, ''
# unquote method for parse_qs and parse_qsl
# Cannot use directly from urllib as it would create circular reference.
# urllib uses urlparse methods ( urljoin)
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a + b, chr(int(a + b, 16))) for a in _hexdig for b in _hexdig)
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
res = s.split('%')
for i in range(1, len(res)):
item = res[i]
try:
res[i] = _hextochr[item[:2]] + item[2:]
except KeyError:
res[i] = '%' + item
except UnicodeDecodeError:
res[i] = six.unichr(int(item[:2], 16)) + item[2:]
return "".join(res)
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
d = {}
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
if name in d:
d[name].append(value)
else:
d[name] = [value]
return d
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a list, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote(nv[0].replace('+', ' '))
value = unquote(nv[1].replace('+', ' '))
r.append((name, value))
return r
| |
import warnings
from distutils.version import LooseVersion
from typing import Iterable
import numpy as np
from .pycompat import dask_array_type
try:
import dask.array as da
from dask import __version__ as dask_version
except ImportError:
dask_version = "0.0.0"
da = None
if LooseVersion(dask_version) >= LooseVersion("2.0.0"):
meta_from_array = da.utils.meta_from_array
else:
# Copied from dask v2.4.0
# Used under the terms of Dask's license, see licenses/DASK_LICENSE.
import numbers
def meta_from_array(x, ndim=None, dtype=None):
"""Normalize an array to appropriate meta object
Parameters
----------
x: array-like, callable
Either an object that looks sufficiently like a Numpy array,
or a callable that accepts shape and dtype keywords
ndim: int
Number of dimensions of the array
dtype: Numpy dtype
A valid input for ``np.dtype``
Returns
-------
array-like with zero elements of the correct dtype
"""
# If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)
# implement a _meta attribute that are incompatible with Dask Array._meta
if hasattr(x, "_meta") and isinstance(x, dask_array_type):
x = x._meta
if dtype is None and x is None:
raise ValueError("You must specify the meta or dtype of the array")
if np.isscalar(x):
x = np.array(x)
if x is None:
x = np.ndarray
if isinstance(x, type):
x = x(shape=(0,) * (ndim or 0), dtype=dtype)
if (
not hasattr(x, "shape")
or not hasattr(x, "dtype")
or not isinstance(x.shape, tuple)
):
return x
if isinstance(x, list) or isinstance(x, tuple):
ndims = [
0
if isinstance(a, numbers.Number)
else a.ndim
if hasattr(a, "ndim")
else len(a)
for a in x
]
a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]
return a if isinstance(x, list) else tuple(x)
if ndim is None:
ndim = x.ndim
try:
meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]
if meta.ndim != ndim:
if ndim > x.ndim:
meta = meta[
(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))
]
meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]
elif ndim == 0:
meta = meta.sum()
else:
meta = meta.reshape((0,) * ndim)
except Exception:
meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)
if np.isscalar(meta):
meta = np.array(meta)
if dtype and meta.dtype != dtype:
meta = meta.astype(dtype)
return meta
def _validate_pad_output_shape(input_shape, pad_width, output_shape):
"""Validates the output shape of dask.array.pad, raising a RuntimeError if they do not match.
In the current versions of dask (2.2/2.4), dask.array.pad with mode='reflect' sometimes returns
an invalid shape.
"""
isint = lambda i: isinstance(i, int)
if isint(pad_width):
pass
elif len(pad_width) == 2 and all(map(isint, pad_width)):
pad_width = sum(pad_width)
elif (
len(pad_width) == len(input_shape)
and all(map(lambda x: len(x) == 2, pad_width))
and all(isint(i) for p in pad_width for i in p)
):
pad_width = np.sum(pad_width, axis=1)
else:
# unreachable: dask.array.pad should already have thrown an error
raise ValueError("Invalid value for `pad_width`")
if not np.array_equal(np.array(input_shape) + pad_width, output_shape):
raise RuntimeError(
"There seems to be something wrong with the shape of the output of dask.array.pad, "
"try upgrading Dask, use a different pad mode e.g. mode='constant' or first convert "
"your DataArray/Dataset to one backed by a numpy array by calling the `compute()` method."
"See: https://github.com/dask/dask/issues/5303"
)
def pad(array, pad_width, mode="constant", **kwargs):
padded = da.pad(array, pad_width, mode=mode, **kwargs)
# workaround for inconsistency between numpy and dask: https://github.com/dask/dask/issues/5303
if mode == "mean" and issubclass(array.dtype.type, np.integer):
warnings.warn(
'dask.array.pad(mode="mean") converts integers to floats. xarray converts '
"these floats back to integers to keep the interface consistent. There is a chance that "
"this introduces rounding errors. If you wish to keep the values as floats, first change "
"the dtype to a float before calling pad.",
UserWarning,
)
return da.round(padded).astype(array.dtype)
_validate_pad_output_shape(array.shape, pad_width, padded.shape)
return padded
if LooseVersion(dask_version) >= LooseVersion("2.8.1"):
median = da.median
else:
# Copied from dask v2.8.1
# Used under the terms of Dask's license, see licenses/DASK_LICENSE.
def median(a, axis=None, keepdims=False):
"""
This works by automatically chunking the reduced axes to a single chunk
and then calling ``numpy.median`` function across the remaining dimensions
"""
if axis is None:
raise NotImplementedError(
"The da.median function only works along an axis. "
"The full algorithm is difficult to do in parallel"
)
if not isinstance(axis, Iterable):
axis = (axis,)
axis = [ax + a.ndim if ax < 0 else ax for ax in axis]
a = a.rechunk({ax: -1 if ax in axis else "auto" for ax in range(a.ndim)})
result = a.map_blocks(
np.median,
axis=axis,
keepdims=keepdims,
drop_axis=axis if not keepdims else None,
chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]
if keepdims
else None,
)
return result
if LooseVersion(dask_version) > LooseVersion("2.9.0"):
nanmedian = da.nanmedian
else:
def nanmedian(a, axis=None, keepdims=False):
"""
This works by automatically chunking the reduced axes to a single chunk
and then calling ``numpy.nanmedian`` function across the remaining dimensions
"""
if axis is None:
raise NotImplementedError(
"The da.nanmedian function only works along an axis. "
"The full algorithm is difficult to do in parallel"
)
if not isinstance(axis, Iterable):
axis = (axis,)
axis = [ax + a.ndim if ax < 0 else ax for ax in axis]
a = a.rechunk({ax: -1 if ax in axis else "auto" for ax in range(a.ndim)})
result = a.map_blocks(
np.nanmedian,
axis=axis,
keepdims=keepdims,
drop_axis=axis if not keepdims else None,
chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]
if keepdims
else None,
)
return result
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
'''
desimodel.inputs.fiberpos
=========================
Utilities for updating positioner to fiber mapping.
'''
import os
import shutil
import numpy as np
from astropy.table import Table, vstack
from . import docdb
from ..io import datadir
def update(testdir=None, seed=2):
'''
Update positioner to fiber number mapping from DocDB
Options:
testdir: if not None, write files here instead of
$DESIMODEL/data/footprint/fiberpos*
seed:
integer random number seed for randomization within a cartridge
Writes testdir/fiberpos* or $DESIMODEL/data/focalplane/fiberpos*
'''
from desiutil.log import get_logger
log = get_logger()
#- Download input files from DocDB
cassette_file = docdb.download(2721, 2, 'cassette_order.txt')
xls_fp_layout = docdb.download(530, 14, 'DESI-0530-v14 (Focal Plane Layout).xlsx')
platescale_file = docdb.download(329, 15, 'Echo22Platescale.txt')
#- Pick filenames in output directory
if testdir is None:
outdir = os.path.join(datadir(), 'focalplane')
else:
outdir = testdir
if not os.path.isdir(outdir):
raise ValueError("Missing directory {}".format(testdir))
#- copy platescale file
outpsfile = os.path.join(outdir, 'platescale.txt')
shutil.copy(platescale_file, outpsfile)
log.info('Wrote {}'.format(outpsfile))
#- Random but reproducible
np.random.seed(seed)
#- DESI-0530 file name (fn) and sheet name (sn) shortcuts
fn = xls_fp_layout
sn = 'PositionerAndFiducialLocations'
#- Sanity check that columns are still in the same place
rowmin, rowmax = 49, 591
headers = docdb.xls_read_row(fn, sn, rowmin-1, 'B', 'S')
assert headers[0] == 'device_location_id'
assert headers[1] == 'device_type'
assert headers[2] == 'X'
assert headers[3] == 'Y'
assert headers[4] == 'Z'
assert headers[8] == 'cassetteID'
assert headers[15] == 'Q'
assert headers[17] == 'S'
#- Read Excel table with device locations
posloc = Table()
posloc['DEVICE'] = docdb.xls_read_col(fn, sn, 'B', rowmin, rowmax, dtype=int)
posloc['DEVICE_TYPE'] = docdb.xls_read_col(fn, sn, 'C', rowmin, rowmax, dtype=str)
posloc['X'] = docdb.xls_read_col(fn, sn, 'D', rowmin, rowmax, dtype=float)
posloc['Y'] = docdb.xls_read_col(fn, sn, 'E', rowmin, rowmax, dtype=float)
posloc['Z'] = docdb.xls_read_col(fn, sn, 'F', rowmin, rowmax, dtype=float)
posloc['Q'] = docdb.xls_read_col(fn, sn, 'Q', rowmin, rowmax, dtype=float)
posloc['S'] = docdb.xls_read_col(fn, sn, 'S', rowmin, rowmax, dtype=float)
#- Cassette N/A -> -1, and parse string -> float -> int
c = docdb.xls_read_col(fn, sn, 'J', rowmin, rowmax)
not_spectro_fiber = (c == 'N/A')
c[not_spectro_fiber] = '-1'
posloc['CASSETTE'] = np.array(c, dtype=float).astype(int)
#- Sanity check on values
ndevice = len(posloc)
assert ndevice == 543 #- 543 holes have been drilled
assert len(np.unique(posloc['DEVICE'])) == len(posloc['DEVICE'])
assert set(posloc['DEVICE_TYPE']) == set(['POS', 'FIF', 'GIF', 'NON', 'OPT', 'ETC'])
assert 0 < np.min(posloc['X']) and np.max(posloc['X']) < 410
assert 0 <= np.min(posloc['Q']) and np.max(posloc['Q']) < 36.0
assert 0 <= np.min(posloc['S']) and np.max(posloc['S']) < 412.3
assert np.all(posloc['S']**2 > posloc['X']**2 + posloc['Y']**2 + posloc['Z']**2)
assert np.min(posloc['CASSETTE']) == -1
assert np.max(posloc['CASSETTE']) == 11
assert set(posloc['DEVICE_TYPE'][posloc['CASSETTE']==11]) == set(['ETC', 'OPT'])
assert set(posloc['DEVICE_TYPE'][posloc['CASSETTE']==-1]) == set(['FIF', 'GIF', 'NON'])
assert 0 not in posloc['CASSETTE']
#- Read mapping of cassettes on focal plane to fibers on slithead
colnames = ['fibermin', 'fibermax', 'sp0', 'sp1', 'sp2', 'sp3', 'sp4', 'sp5', 'sp6', 'sp7', 'sp8', 'sp9']
cassettes = Table.read(cassette_file, format='ascii', names=colnames)
#- Randomize fibers within a cassette
petals = list()
for p in range(10):
fiberpos = posloc.copy(copy_data=True)
fiberpos['FIBER'] = -1
fiberpos['PETAL'] = p
fiberpos['SLIT'] = p
fiberpos['SPECTRO'] = p
iipos = (fiberpos['DEVICE_TYPE'] == 'POS')
### fiberpos['device'] += p*len(fiberpos)
for c in range(1,11):
ii = (cassettes['sp'+str(p)] == c)
assert np.count_nonzero(ii) == 1
fibermin = p*500 + cassettes['fibermin'][ii][0]
fibermax = p*500 + cassettes['fibermax'][ii][0]
jj = iipos & (fiberpos['CASSETTE'] == c)
assert np.count_nonzero(jj) == 50
fiber = list(range(fibermin, fibermax+1))
np.random.shuffle(fiber)
fiberpos['FIBER'][jj] = fiber
#- Additional columns
fiberpos['SLITBLOCK'] = (fiberpos['FIBER'] % 500) // 25
fiberpos['BLOCKFIBER'] = (fiberpos['FIBER'] % 500) % 25
fiberpos['LOCATION'] = p*1000 + fiberpos['DEVICE']
#- Petal 0 is at the "bottom"; See DESI-0530
phi = np.radians((7*36 + 36*p)%360)
x = np.cos(phi)*fiberpos['X'] - np.sin(phi)*fiberpos['Y']
y = np.sin(phi)*fiberpos['X'] + np.cos(phi)*fiberpos['Y']
fiberpos['X'] = x
fiberpos['Y'] = y
petals.append(fiberpos)
fiberpos = vstack(petals)
fiberpos.sort('FIBER')
POS = (fiberpos['DEVICE_TYPE'] == 'POS')
#- devices that don't go to spectrographs don't have slitblock, blockfiber
fiberpos['SLITBLOCK'][~POS] = -1
fiberpos['BLOCKFIBER'][~POS] = -1
#- More sanity checks before writing output
fp = fiberpos[POS]
assert len(fp) == 5000
assert len(np.unique(fp['FIBER'])) == 5000
assert min(fp['FIBER']) == 0
assert max(fp['FIBER']) == 4999
assert len(set(fp['SPECTRO'])) == 10
assert min(fp['SPECTRO']) == 0
assert max(fp['SPECTRO']) == 9
assert len(np.unique(fiberpos['DEVICE'])) == ndevice
assert len(np.unique(fiberpos['LOCATION'])) == len(fiberpos)
#- Drop some columns we don't need
fiberpos.remove_column('CASSETTE')
#- Update i8 -> i4 for integer columns
for colname in ['FIBER', 'DEVICE', 'SPECTRO', 'PETAL', 'SLIT']:
fiberpos.replace_column(colname, fiberpos[colname].astype('i4'))
#- Reorder columns
assert set(fiberpos.colnames) == set('DEVICE DEVICE_TYPE X Y Z Q S FIBER PETAL SLIT SPECTRO SLITBLOCK BLOCKFIBER LOCATION'.split())
colnames = 'PETAL DEVICE DEVICE_TYPE LOCATION FIBER X Y Z Q S SPECTRO SLIT SLITBLOCK BLOCKFIBER'.split()
fiberpos = fiberpos[colnames]
assert fiberpos.colnames == colnames
#- Set units and descriptions; see DESI-2724
fiberpos['X'].unit = 'mm'
fiberpos['Y'].unit = 'mm'
fiberpos['Z'].unit = 'mm'
fiberpos['Q'].unit = 'deg'
fiberpos['S'].unit = 'mm'
fiberpos['X'].description = 'focal surface location [mm]'
fiberpos['Y'].description = 'focal surface location [mm]'
fiberpos['Z'].description = 'focal surface location [mm]'
fiberpos['Q'].description = 'azimuthal angle on focal surface [deg]'
fiberpos['S'].description = 'radial distance along focal surface [mm]'
fiberpos['FIBER'].description = 'fiber number [0-4999]'
fiberpos['DEVICE'].description = 'focal plane device_loc number [0-542]'
fiberpos['SPECTRO'].description = 'spectrograph number [0-9]'
fiberpos['PETAL'].description = 'focal plane petal_loc number [0-9]'
fiberpos['SLIT'].description = 'spectrograph slit number [0-9]'
fiberpos['SLITBLOCK'].description = 'id of the slitblock on the slit [0-19]'
fiberpos['BLOCKFIBER'].description = 'id of the fiber on the slitblock [0-24]'
fiberpos['LOCATION'].description = 'global location id across entire focal plane [0-9543]; has gaps in sequence'
fiberpos.meta['comments'] = [
"Coordinates at zenith: +x = East = +RA; +y = South = -dec",
"PETAL and DEVICE refer to locations, not hardware serial numbers",
"Differences from DESI-2724 naming:",
' - Drops "_ID" from column names',
' - Drops "_LOC" from "DEVICE_LOC" and "PETAL_LOC"',
" - SLITBLOCK as int [0-19] instead of string [B0-B19]",
" - BLOCKFIBER as int [0-24] instead of string [F0-F24]",
"Convenience columns:",
" - FIBER = PETAL*500 + SLITBLOCK*25 + BLOCKFIBER",
" - LOCATION = PETAL*1000 + DEVICE",
]
ecsvout = os.path.join(outdir, 'fiberpos.ecsv')
textout = os.path.join(outdir, 'fiberpos.txt')
fitsout = os.path.join(outdir, 'fiberpos.fits')
pngout = os.path.join(outdir, 'fiberpos.png')
#- Write old text format with just fiber, device, spectro, x, y, z
write_text_fiberpos(textout, fiberpos[POS])
log.info('Wrote {}'.format(textout))
#- Write all columns but only for positioners with fibers
fiberpos[POS].write(ecsvout, format='ascii.ecsv')
log.info('Wrote {}'.format(ecsvout))
fiberpos[POS].write(fitsout, format='fits', overwrite=True)
log.info('Wrote {}'.format(fitsout))
#- Write all columns and all rows, including
#- fiducials (device_type='FIF') and sky monitor (device_type='ETC')
fiberpos.sort('LOCATION')
fitsallout = fitsout.replace('.fits', '-all.fits')
ecsvallout = textout.replace('.txt', '-all.ecsv')
fiberpos.write(fitsallout, format='fits', overwrite=True)
fiberpos.write(ecsvallout, format='ascii.ecsv')
log.info('Wrote {}'.format(fitsallout))
log.info('Wrote {}'.format(ecsvallout))
#- Visualize mapping
POS = (fiberpos['DEVICE_TYPE'] == 'POS')
FIF = (fiberpos['DEVICE_TYPE'] == 'FIF')
ETC = (fiberpos['DEVICE_TYPE'] == 'ETC')
import pylab as P
P.jet() #- With apologies to viridis
P.figure(figsize=(7,7))
P.scatter(fiberpos['X'][POS], fiberpos['Y'][POS], c=fiberpos['FIBER'][POS]%500, edgecolor='none', s=20)
# P.scatter(fiberpos['x'][FIF], fiberpos['y'][FIF], s=5, color='k')
# P.plot(fiberpos['x'][ETC], fiberpos['y'][ETC], 'kx', ms=3)
P.grid(alpha=0.2, color='k')
P.xlim(-420,420)
P.ylim(-420,420)
P.xlabel('x [mm]')
P.ylabel('y [mm]')
P.title('Focal plane color coded by fiber location on slithead')
P.savefig(pngout, dpi=80)
log.info('Wrote {}'.format(pngout))
def write_text_fiberpos(filename, fiberpos):
'''
Writes a fiberpos table to filename, maintaining backwards compatibility
with the original fiberpos.txt format
Args:
filename: output file name string
fiberpos: astropy Table of fiber positions
'''
#- Write the old text file format for backwards compatibility
fxlines = [
"#- THIS FILE IS PROVIDED FOR BACKWARDS COMPATIBILITY",
"#- Please use fiberpos-all.[ecsv,fits] for additional columns",
'#- and non-spectrofiber device hole locations.',
'#-'
"#- Fiber to focal plane device hole mapping; x,y,z in mm on focal plane",
"#- See doc/fiberpos.rst and DESI-0530 for more details.",
"#- Coordinates at zenith: +x = East = +RA; +y = South = -dec",
"",
"#- fiber=at spectrograph; fpdevice=numbering on focal plane",
"",
'#- fiber location spectro x y z']
for row in fiberpos:
fxlines.append("{:4d} {:4d} {:2d} {:12.6f} {:12.6f} {:12.6f}".format(
row['FIBER'], row['LOCATION'], row['SPECTRO'],
row['X'], row['Y'], row['Z'],
))
with open(filename, 'w') as fx:
fx.write('\n'.join(fxlines)+'\n')
| |
# -*- coding: utf-8 -*-
'''
Control virtual machines via Salt
'''
# Import Salt libs
import salt.client
import salt.output
import salt.utils.virt
def _determine_hyper(data, omit=''):
'''
Determine what the most resource free hypervisor is based on the given
data
'''
# This is just checking for the hyper with the most free ram, this needs
# to be much more complicated.
hyper = ''
bestmem = 0
bestcpu = 0
for hv_, comps in data.items():
if hv_ == omit:
continue
if not isinstance(comps, dict):
continue
if comps.get('freemem', 0) > bestmem:
bestmem = comps['freemem']
hyper = hv_
return hyper
def _find_vm(name, data, quiet=False):
'''
Scan the query data for the named vm
'''
for hv_ in data:
# Check if data is a dict, and not '"virt.full_info" is not available.'
if not isinstance(data[hv_], dict):
continue
if name in data[hv_].get('vm_info', {}):
ret = {hv_: {name: data[hv_]['vm_info'][name]}}
if not quiet:
salt.output.display_output(
ret,
'nested',
__opts__)
return ret
return {}
def query(hyper=None, quiet=False):
'''
Query the virtual machines
'''
ret = {}
client = salt.client.LocalClient(__opts__['conf_file'])
for info in client.cmd_iter('virtual:physical',
'virt.full_info', expr_form='grain'):
if not info:
continue
if not isinstance(info, dict):
continue
chunk = {}
id_ = info.keys()[0]
if hyper:
if hyper != id_:
continue
if not isinstance(info[id_], dict):
continue
if 'ret' not in info[id_]:
continue
chunk[id_] = info[id_]['ret']
ret.update(chunk)
if not quiet:
salt.output.display_output(chunk, 'virt_query', __opts__)
return ret
def list(hyper=None, quiet=False):
'''
List the virtual machines on each hyper
'''
ret = {}
client = salt.client.LocalClient(__opts__['conf_file'])
for info in client.cmd_iter('virtual:physical',
'virt.vm_info', expr_form='grain'):
if not info:
continue
if not isinstance(info, dict):
continue
chunk = {}
id_ = info.keys()[0]
if hyper:
if hyper != id_:
continue
if not isinstance(info[id_], dict):
continue
if 'ret' not in info[id_]:
continue
if not isinstance(info[id_]['ret'], dict):
continue
data = {}
for k, v in info[id_]['ret'].items():
if v['state'] in data:
data[v['state']].append(k)
else:
data[v['state']] = [k]
chunk[id_] = data
ret.update(chunk)
if not quiet:
salt.output.display_output(chunk, 'virt_list', __opts__)
return ret
def next_hyper():
'''
Return the hypervisor to use for the next autodeployed vm
'''
hyper = _determine_hyper(query(quiet=True))
print(hyper)
return hyper
def hyper_info(hyper=None):
'''
Return information about the hypervisors connected to this master
'''
data = query(hyper, quiet=True)
for id_ in data:
if 'vm_info' in data[id_]:
data[id_].pop('vm_info')
salt.output.display_output(data, 'nested', __opts__)
return data
def init(name, cpu, mem, image, hyper=None, seed=True, nic='default', install=True):
'''
Initialize a new vm
'''
print('Searching for Hypervisors')
data = query(hyper, quiet=True)
# Check if the name is already deployed
for hyper in data:
if 'vm_info' in data[hyper]:
if name in data[hyper]['vm_info']:
print('Virtual machine {0} is already deployed'.format(name))
return 'fail'
if hyper:
if hyper not in data:
print('Hypervisor {0} was not found'.format(hyper))
return 'fail'
else:
hyper = _determine_hyper(data)
if seed:
print('Minion will be preseeded')
kv = salt.utils.virt.VirtKey(hyper, name, __opts__)
kv.authorize()
client = salt.client.LocalClient(__opts__['conf_file'])
print('Creating VM {0} on hypervisor {1}'.format(name, hyper))
cmd_ret = client.cmd_iter(
hyper,
'virt.init',
[
name,
cpu,
mem,
image,
'seed={0}'.format(seed),
'nic={0}'.format(nic),
'install={0}'.format(install)
],
timeout=600)
next(cmd_ret)
print('VM {0} initialized on hypervisor {1}'.format(name, hyper))
return 'good'
def vm_info(name, quiet=False):
'''
Return the information on the named vm
'''
data = query(quiet=True)
return _find_vm(name, data, quiet)
def reset(name):
'''
Force power down and restart an existing vm
'''
ret = {}
client = salt.client.LocalClient(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
print('Failed to find vm {0} to reset'.format(name))
return 'fail'
hyper = data.keys()[0]
cmd_ret = client.cmd_iter(
hyper,
'virt.reset',
[name],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
print('Reset VM {0}'.format(name))
return ret
def start(name):
'''
Start a named virtual machine
'''
ret = {}
client = salt.client.LocalClient(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
print('Failed to find vm {0} to start'.format(name))
return 'fail'
hyper = data.keys()[0]
if data[hyper][name]['state'] == 'running':
print('VM {0} is already running'.format(name))
return 'bad state'
cmd_ret = client.cmd_iter(
hyper,
'virt.start',
[name],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
print('Started VM {0}'.format(name))
return 'good'
def force_off(name):
'''
Force power down the named virtual machine
'''
ret = {}
client = salt.client.LocalClient(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
print('Failed to find vm {0} to destroy'.format(name))
return 'fail'
hyper = data.keys()[0]
if data[hyper][name]['state'] == 'shutdown':
print('VM {0} is already shutdown'.format(name))
return'bad state'
cmd_ret = client.cmd_iter(
hyper,
'virt.destroy',
[name],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
print('Powered off VM {0}'.format(name))
return 'good'
def purge(name):
'''
Destroy the named vm
'''
ret = {}
client = salt.client.LocalClient(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
print('Failed to find vm {0} to purge'.format(name))
return 'fail'
hyper = data.keys()[0]
cmd_ret = client.cmd_iter(
hyper,
'virt.purge',
[name, True],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
print('Purged VM {0}'.format(name))
return 'good'
def pause(name):
'''
Pause the named vm
'''
ret = {}
client = salt.client.LocalClient(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
print('Failed to find VM {0} to pause'.format(name))
return 'fail'
hyper = data.keys()[0]
if data[hyper][name]['state'] == 'paused':
print('VM {0} is already paused'.format(name))
return 'bad state'
cmd_ret = client.cmd_iter(
hyper,
'virt.pause',
[name],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
print('Paused VM {0}'.format(name))
return 'good'
def resume(name):
'''
Resume a paused vm
'''
ret = {}
client = salt.client.LocalClient(__opts__['conf_file'])
data = vm_info(name, quiet=True)
if not data:
print('Failed to find VM {0} to pause'.format(name))
return 'not found'
hyper = data.keys()[0]
if data[hyper][name]['state'] != 'paused':
print('VM {0} is not paused'.format(name))
return 'bad state'
cmd_ret = client.cmd_iter(
hyper,
'virt.resume',
[name],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
print('Resumed VM {0}'.format(name))
return 'good'
def migrate(name, target=''):
'''
Migrate a vm from one hypervisor to another. This routine will just start
the migration and display information on how to look up the progress
'''
client = salt.client.LocalClient(__opts__['conf_file'])
data = query(quiet=True)
origin_data = _find_vm(name, data, quiet=True)
origin_hyper = origin_data.keys()[0]
disks = origin_data[origin_hyper][name]['disks']
if not origin_data:
print('Named vm {0} was not found to migrate'.format(name))
return ''
if not target:
target = _determine_hyper(data, origin_hyper)
if target not in data:
print('Target hypervisor {0} not found'.format(origin_data))
return ''
client.cmd(target, 'virt.seed_non_shared_migrate', [disks, True])
print client.cmd_async(origin_hyper,
'virt.migrate_non_shared',
[name, target])
| |
"""Test suite specifically targeting JP2 box layout.
"""
# Standard library imports ...
import doctest
import importlib.resources as ir
from io import BytesIO
import os
import pathlib
import re
import shutil
import struct
import tempfile
from uuid import UUID
import unittest
import warnings
# Third party library imports ...
import lxml.etree as ET
import numpy as np
# Local imports ...
import glymur
from glymur import Jp2k
from glymur.jp2box import (
ColourSpecificationBox, ContiguousCodestreamBox, FileTypeBox,
ImageHeaderBox, JP2HeaderBox, JPEG2000SignatureBox, BitsPerComponentBox,
PaletteBox, UnknownBox, InvalidJp2kError
)
from glymur.core import COLOR, OPACITY, SRGB, GREYSCALE
from glymur.core import RED, GREEN, BLUE, GREY, WHOLE_IMAGE
from . import fixtures, data
from .fixtures import MetadataBase
from .fixtures import OPENJPEG_NOT_AVAILABLE, OPENJPEG_NOT_AVAILABLE_MSG
def docTearDown(doctest_obj): # pragma: no cover
glymur.set_option('parse.full_codestream', False)
def load_tests(loader, tests, ignore): # pragma: no cover
"""Run doc tests as well."""
if os.name == "nt":
# Can't do it on windows, temporary file issue.
return tests
tests.addTests(doctest.DocTestSuite('glymur.jp2box',
tearDown=docTearDown))
return tests
@unittest.skipIf(OPENJPEG_NOT_AVAILABLE, OPENJPEG_NOT_AVAILABLE_MSG)
class TestDataEntryURL(fixtures.TestCommon):
"""Test suite for DataEntryURL boxes."""
def test_wrap_greyscale(self):
"""A single component should be wrapped as GREYSCALE."""
j = Jp2k(self.jp2file)
data = j[:]
red = data[:, :, 0]
# Write it back out as a raw codestream.
file1 = self.test_dir_path / 'file1.j2k'
j2k = glymur.Jp2k(file1, data=red)
# Ok, now rewrap it as JP2. The colorspace should be GREYSCALE.
file2 = self.test_dir_path / 'file2.jp2'
jp2 = j2k.wrap(file2)
self.assertEqual(jp2.box[2].box[1].colorspace, glymur.core.GREYSCALE)
def test_basic_url(self):
"""Just your most basic URL box."""
# Wrap our j2k file in a JP2 box along with an interior url box.
jp2 = Jp2k(self.jp2file)
url = 'http://glymur.readthedocs.org'
deurl = glymur.jp2box.DataEntryURLBox(0, (0, 0, 0), url)
boxes = [box for box in jp2.box if box.box_id != 'uuid']
boxes.append(deurl)
with open(self.temp_jp2_filename, mode='wb') as tfile:
jp22 = jp2.wrap(tfile.name, boxes=boxes)
actdata = [box.box_id for box in jp22.box]
expdata = ['jP ', 'ftyp', 'jp2h', 'jp2c', 'url ']
self.assertEqual(actdata, expdata)
self.assertEqual(jp22.box[4].version, 0)
self.assertEqual(jp22.box[4].flag, (0, 0, 0))
self.assertEqual(jp22.box[4].url, url)
def test_null_termination(self):
"""I.9.3.2 specifies that location field must be null terminated."""
jp2 = Jp2k(self.jp2file)
url = 'http://glymur.readthedocs.org'
deurl = glymur.jp2box.DataEntryURLBox(0, (0, 0, 0), url)
boxes = [box for box in jp2.box if box.box_id != 'uuid']
boxes.append(deurl)
with open(self.temp_jp2_filename, mode='wb') as tfile:
jp22 = jp2.wrap(tfile.name, boxes=boxes)
self.assertEqual(jp22.box[-1].length, 42)
# Go to the last box. Seek past the L, T, version,
# and flag fields.
with open(tfile.name, 'rb') as fptr:
fptr.seek(jp22.box[-1].offset + 4 + 4 + 1 + 3)
nbytes = (
jp22.box[-1].offset + jp22.box[-1].length - fptr.tell()
)
read_buffer = fptr.read(nbytes)
read_url = read_buffer.decode('utf-8')
self.assertEqual(url + chr(0), read_url)
@unittest.skipIf(OPENJPEG_NOT_AVAILABLE, OPENJPEG_NOT_AVAILABLE_MSG)
class TestChannelDefinition(fixtures.TestCommon):
"""Test suite for channel definition boxes."""
@classmethod
def setUpClass(cls):
"""Need a one_plane plane image for greyscale testing."""
j2k = Jp2k(glymur.data.goodstuff())
data = j2k[:]
cls.planes_dir = pathlib.Path(tempfile.mkdtemp())
# Write the first component back out to file.
cls.one_plane = cls.planes_dir / 'one_plane.j2k'
Jp2k(cls.one_plane, data=data[:, :, 0])
# Write the first two components back out to file.
cls.two_planes = cls.planes_dir / 'two_planes.j2k'
Jp2k(cls.two_planes, data=data[:, :, 0:2])
# Write four components back out to file.
cls.four_planes = cls.planes_dir / 'four_planes.j2k'
shape = (data.shape[0], data.shape[1], 1)
alpha = np.zeros((shape), dtype=data.dtype)
data4 = np.concatenate((data, alpha), axis=2)
Jp2k(cls.four_planes, data=data4)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.planes_dir)
def setUp(self):
super(TestChannelDefinition, self).setUp()
j2k = Jp2k(self.j2kfile)
codestream = j2k.get_codestream()
height = codestream.segment[1].ysiz
width = codestream.segment[1].xsiz
num_components = len(codestream.segment[1].xrsiz)
self.jp2b = JPEG2000SignatureBox()
self.ftyp = FileTypeBox()
self.jp2h = JP2HeaderBox()
self.jp2c = ContiguousCodestreamBox()
self.ihdr = ImageHeaderBox(
height=height, width=width, num_components=num_components
)
self.ihdr1 = ImageHeaderBox(
height=height, width=width, num_components=1
)
self.ihdr2 = ImageHeaderBox(
height=height, width=width, num_components=2
)
self.ihdr4 = ImageHeaderBox(
height=height, width=width, num_components=4
)
self.colr_rgb = ColourSpecificationBox(colorspace=SRGB)
self.colr_gr = ColourSpecificationBox(colorspace=GREYSCALE)
def test_cdef_no_inputs(self):
"""channel_type and association are required inputs."""
with self.assertRaises(TypeError):
glymur.jp2box.ChannelDefinitionBox()
def test_rgb_with_index(self):
"""Just regular RGB."""
j2k = Jp2k(self.j2kfile)
channel_type = [COLOR, COLOR, COLOR]
association = [RED, GREEN, BLUE]
cdef = glymur.jp2box.ChannelDefinitionBox(
index=[0, 1, 2],
channel_type=channel_type,
association=association
)
boxes = [self.ihdr, self.colr_rgb, cdef]
self.jp2h.box = boxes
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
j2k.wrap(tfile.name, boxes=boxes)
jp2 = Jp2k(tfile.name)
jp2h = jp2.box[2]
boxes = [box.box_id for box in jp2h.box]
self.assertEqual(boxes, ['ihdr', 'colr', 'cdef'])
self.assertEqual(jp2h.box[2].index, (0, 1, 2))
self.assertEqual(
jp2h.box[2].channel_type, (COLOR, COLOR, COLOR)
)
self.assertEqual(
jp2h.box[2].association, (RED, GREEN, BLUE)
)
def test_rgb(self):
"""Just regular RGB, but don't supply the optional index."""
j2k = Jp2k(self.j2kfile)
channel_type = [COLOR, COLOR, COLOR]
association = [RED, GREEN, BLUE]
cdef = glymur.jp2box.ChannelDefinitionBox(
channel_type=channel_type, association=association
)
boxes = [self.ihdr, self.colr_rgb, cdef]
self.jp2h.box = boxes
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
j2k.wrap(tfile.name, boxes=boxes)
jp2 = Jp2k(tfile.name)
jp2h = jp2.box[2]
boxes = [box.box_id for box in jp2h.box]
self.assertEqual(boxes, ['ihdr', 'colr', 'cdef'])
self.assertEqual(jp2h.box[2].index, (0, 1, 2))
self.assertEqual(
jp2h.box[2].channel_type, (COLOR, COLOR, COLOR)
)
self.assertEqual(
jp2h.box[2].association, (RED, GREEN, BLUE)
)
def test_rgba(self):
"""Just regular RGBA."""
j2k = Jp2k(self.four_planes)
channel_type = (COLOR, COLOR, COLOR, OPACITY)
association = (RED, GREEN, BLUE, WHOLE_IMAGE)
cdef = glymur.jp2box.ChannelDefinitionBox(
channel_type=channel_type, association=association
)
boxes = [self.ihdr4, self.colr_rgb, cdef]
self.jp2h.box = boxes
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
j2k.wrap(tfile.name, boxes=boxes)
jp2 = Jp2k(tfile.name)
jp2h = jp2.box[2]
boxes = [box.box_id for box in jp2h.box]
self.assertEqual(boxes, ['ihdr', 'colr', 'cdef'])
self.assertEqual(jp2h.box[2].index, (0, 1, 2, 3))
self.assertEqual(jp2h.box[2].channel_type, channel_type)
self.assertEqual(jp2h.box[2].association, association)
def test_bad_rgba(self):
"""R, G, and B must be specified."""
j2k = Jp2k(self.four_planes)
channel_type = (COLOR, COLOR, OPACITY, OPACITY)
association = (RED, GREEN, BLUE, WHOLE_IMAGE)
cdef = glymur.jp2box.ChannelDefinitionBox(
channel_type=channel_type, association=association
)
boxes = [self.ihdr, self.colr_rgb, cdef]
self.jp2h.box = boxes
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(RuntimeError):
j2k.wrap(tfile.name, boxes=boxes)
def test_grey(self):
"""Just regular greyscale."""
j2k = Jp2k(self.one_plane)
channel_type = (COLOR,)
association = (GREY,)
cdef = glymur.jp2box.ChannelDefinitionBox(
channel_type=channel_type, association=association
)
boxes = [self.ihdr1, self.colr_gr, cdef]
self.jp2h.box = boxes
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
j2k.wrap(tfile.name, boxes=boxes)
jp2 = Jp2k(tfile.name)
jp2h = jp2.box[2]
boxes = [box.box_id for box in jp2h.box]
self.assertEqual(boxes, ['ihdr', 'colr', 'cdef'])
self.assertEqual(jp2h.box[2].index, (0,))
self.assertEqual(jp2h.box[2].channel_type, channel_type)
self.assertEqual(jp2h.box[2].association, association)
def test_grey_alpha(self):
"""Just regular greyscale plus alpha."""
j2k = Jp2k(self.two_planes)
channel_type = (COLOR, OPACITY)
association = (GREY, WHOLE_IMAGE)
cdef = glymur.jp2box.ChannelDefinitionBox(
channel_type=channel_type, association=association
)
boxes = [self.ihdr2, self.colr_gr, cdef]
self.jp2h.box = boxes
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
j2k.wrap(tfile.name, boxes=boxes)
jp2 = Jp2k(tfile.name)
jp2h = jp2.box[2]
boxes = [box.box_id for box in jp2h.box]
self.assertEqual(boxes, ['ihdr', 'colr', 'cdef'])
self.assertEqual(jp2h.box[2].index, (0, 1))
self.assertEqual(jp2h.box[2].channel_type, channel_type)
self.assertEqual(jp2h.box[2].association, association)
def test_bad_grey_alpha(self):
"""A greyscale image with alpha layer must specify a color channel"""
j2k = Jp2k(self.two_planes)
channel_type = (OPACITY, OPACITY)
association = (GREY, WHOLE_IMAGE)
# This cdef box
cdef = glymur.jp2box.ChannelDefinitionBox(
channel_type=channel_type, association=association
)
boxes = [self.ihdr, self.colr_gr, cdef]
self.jp2h.box = boxes
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises((OSError, RuntimeError)):
j2k.wrap(tfile.name, boxes=boxes)
def test_only_one_cdef_in_jp2h(self):
"""There can only be one channel definition box in the jp2 header."""
j2k = Jp2k(self.j2kfile)
channel_type = (COLOR, COLOR, COLOR)
association = (RED, GREEN, BLUE)
cdef = glymur.jp2box.ChannelDefinitionBox(
channel_type=channel_type, association=association
)
boxes = [self.ihdr, cdef, self.colr_rgb, cdef]
self.jp2h.box = boxes
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(RuntimeError):
j2k.wrap(tfile.name, boxes=boxes)
def test_not_in_jp2h(self):
"""need cdef in jp2h"""
j2k = Jp2k(self.j2kfile)
boxes = [self.ihdr, self.colr_rgb]
self.jp2h.box = boxes
channel_type = (COLOR, COLOR, COLOR)
association = (RED, GREEN, BLUE)
cdef = glymur.jp2box.ChannelDefinitionBox(
channel_type=channel_type, association=association
)
boxes = [self.jp2b, self.ftyp, self.jp2h, cdef, self.jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises((RuntimeError, OSError)):
j2k.wrap(tfile.name, boxes=boxes)
class TestFileTypeBox(fixtures.TestCommon):
"""Test suite for ftyp box issues."""
def test_bad_brand_on_parse(self):
"""
SCENARIO: The JP2 file file type box does not contain a valid brand.
EXPECTED RESULT: RuntimeError
"""
with ir.path(data, 'issue396.jp2') as path:
with warnings.catch_warnings():
# Lots of things wrong with this file.
warnings.simplefilter('ignore')
with self.assertRaises(InvalidJp2kError):
Jp2k(path)
def test_brand_unknown(self):
"""A ftyp box brand must be 'jp2 ' or 'jpx '."""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ftyp = glymur.jp2box.FileTypeBox(brand='jp3')
with tempfile.TemporaryFile() as tfile:
with self.assertRaises(InvalidJp2kError):
ftyp.write(tfile)
def test_cl_entry_unknown(self):
"""A ftyp box cl list can only contain 'jp2 ', 'jpx ', or 'jpxb'."""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# Bad compatibility list item.
ftyp = glymur.jp2box.FileTypeBox(compatibility_list=['jp3'])
with tempfile.TemporaryFile() as tfile:
with self.assertRaises(InvalidJp2kError):
ftyp.write(tfile)
def test_cl_entry_not_utf8(self):
"""A ftyp box cl list entry must be utf-8 decodable."""
with open(self.jp2file, mode='rb') as f:
data = f.read()
# Replace bytes 28-32 with bad utf-8 data
data = data[:28] + b'\xff\xff\xff\xff' + data[32:]
with open(self.temp_jp2_filename, mode='wb') as tfile:
tfile.write(data)
tfile.flush()
with self.assertWarns(UserWarning):
Jp2k(tfile.name)
class TestPaletteBox(fixtures.TestCommon):
"""Test suite for pclr box instantiation."""
def test_writing_with_different_bitdepths(self):
"""Bitdepths must be the same when writing."""
palette = np.array([[255, 0, 255], [0, 255, 0]], dtype=np.uint16)
bps = (8, 16, 8)
signed = (False, False, False)
pclr = glymur.jp2box.PaletteBox(
palette, bits_per_component=bps, signed=signed
)
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(InvalidJp2kError):
pclr.write(tfile)
def test_signed_components(self):
"""
Palettes with signed components are not supported.
"""
b = BytesIO()
# L, T
b.write(struct.pack('>I4s', 20, b'pclr'))
# Palette is 2 rows, 3 columns
ncols = 3
nrows = 2
b.write(struct.pack('>HB', nrows, ncols))
# bits per sample is 8, but signed
bps = (np.int8(7), np.int8(7), np.int8(7))
bps_signed = (x | 0x80 for x in bps)
b.write(struct.pack('BBB', *bps_signed))
# Write the palette itself.
#
buffer = np.int8([[0, 0, 0], [127, 127, 127]])
b.write(struct.pack('BBB', *buffer[0]))
b.write(struct.pack('BBB', *buffer[1]))
# Seek back to point after L, T
b.seek(8)
with self.assertRaises(InvalidJp2kError):
PaletteBox.parse(b, 8, 20)
class TestAppend(fixtures.TestCommon):
"""Tests for append method."""
def test_append_xml(self):
"""Should be able to append an XML box."""
with open(self.temp_jp2_filename, mode='wb') as tfile:
shutil.copyfile(self.jp2file, tfile.name)
jp2 = Jp2k(tfile.name)
b = BytesIO(b'<?xml version="1.0"?><data>0</data>')
doc = ET.parse(b)
xmlbox = glymur.jp2box.XMLBox(xml=doc)
jp2.append(xmlbox)
# The sequence of box IDs should be the same as before, but with an
# xml box at the end.
box_ids = [box.box_id for box in jp2.box]
expected = ['jP ', 'ftyp', 'jp2h', 'uuid', 'jp2c', 'xml ']
self.assertEqual(box_ids, expected)
self.assertEqual(
ET.tostring(jp2.box[-1].xml.getroot()),
b'<data>0</data>'
)
def test_only_jp2_allowed_to_append(self):
"""Only JP2 files are allowed to be appended."""
with open(self.temp_j2k_filename, mode="wb") as tfile:
shutil.copyfile(self.j2kfile, tfile.name)
j2k = Jp2k(tfile.name)
# Make an XML box. XML boxes should always be appendable to jp2
# files.
the_xml = ET.fromstring('<?xml version="1.0"?><data>0</data>')
xmlbox = glymur.jp2box.XMLBox(xml=the_xml)
with self.assertRaises(RuntimeError):
j2k.append(xmlbox)
def test_length_field_is_zero(self):
"""L=0 (length field in box header) is handled.
L=0 implies that the containing box is the last box. If this is not
handled properly, the appended box is never seen.
"""
baseline_jp2 = Jp2k(self.jp2file)
with open(self.temp_jp2_filename, mode='wb') as tfile:
with open(self.jp2file, 'rb') as ifile:
# Everything up until the jp2c box.
offset = baseline_jp2.box[-1].offset
tfile.write(ifile.read(offset))
# Write the L, T fields of the jp2c box such that L == 0
write_buffer = struct.pack('>I4s', int(0), b'jp2c')
tfile.write(write_buffer)
# Write out the rest of the codestream.
ifile.seek(offset + 8)
tfile.write(ifile.read())
tfile.flush()
jp2 = Jp2k(tfile.name)
b = BytesIO(b'<?xml version="1.0"?><data>0</data>')
doc = ET.parse(b)
xmlbox = glymur.jp2box.XMLBox(xml=doc)
jp2.append(xmlbox)
# The sequence of box IDs should be the same as before, but with an
# xml box at the end.
box_ids = [box.box_id for box in jp2.box]
expected = ['jP ', 'ftyp', 'jp2h', 'uuid', 'jp2c', 'xml ']
self.assertEqual(box_ids, expected)
self.assertEqual(
ET.tostring(jp2.box[-1].xml.getroot()),
b'<data>0</data>'
)
def test_append_allowable_boxes(self):
"""Only XML boxes are allowed to be appended."""
with open(self.temp_jp2_filename, mode='wb') as tfile:
shutil.copyfile(self.jp2file, tfile.name)
jp2 = Jp2k(tfile.name)
# Make a UUID box. Only XMP UUID boxes can currently be appended.
uuid_instance = UUID('00000000-0000-0000-0000-000000000000')
data = b'0123456789'
uuidbox = glymur.jp2box.UUIDBox(uuid_instance, data)
with self.assertRaises(RuntimeError):
jp2.append(uuidbox)
class TestWrap(fixtures.TestCommon):
"""Tests for wrap method."""
def verify_wrapped_raw(self, jp2file):
"""Shared fixture"""
jp2 = Jp2k(jp2file)
self.assertEqual(len(jp2.box), 4)
self.assertEqual(jp2.box[0].box_id, 'jP ')
self.assertEqual(jp2.box[0].offset, 0)
self.assertEqual(jp2.box[0].length, 12)
self.assertEqual(jp2.box[0].longname, 'JPEG 2000 Signature')
self.assertEqual(jp2.box[1].box_id, 'ftyp')
self.assertEqual(jp2.box[1].offset, 12)
self.assertEqual(jp2.box[1].length, 20)
self.assertEqual(jp2.box[1].longname, 'File Type')
self.assertEqual(jp2.box[2].box_id, 'jp2h')
self.assertEqual(jp2.box[2].offset, 32)
self.assertEqual(jp2.box[2].length, 45)
self.assertEqual(jp2.box[2].longname, 'JP2 Header')
self.assertEqual(jp2.box[3].box_id, 'jp2c')
self.assertEqual(jp2.box[3].offset, 77)
self.assertEqual(jp2.box[3].length, 115228)
# jp2h super box
self.assertEqual(len(jp2.box[2].box), 2)
self.assertEqual(jp2.box[2].box[0].box_id, 'ihdr')
self.assertEqual(jp2.box[2].box[0].offset, 40)
self.assertEqual(jp2.box[2].box[0].length, 22)
self.assertEqual(jp2.box[2].box[0].longname, 'Image Header')
self.assertEqual(jp2.box[2].box[0].height, 800)
self.assertEqual(jp2.box[2].box[0].width, 480)
self.assertEqual(jp2.box[2].box[0].num_components, 3)
self.assertEqual(jp2.box[2].box[0].bits_per_component, 8)
self.assertEqual(jp2.box[2].box[0].signed, False)
self.assertEqual(jp2.box[2].box[0].compression, 7)
self.assertEqual(jp2.box[2].box[0].colorspace_unknown, False)
self.assertEqual(jp2.box[2].box[0].ip_provided, False)
self.assertEqual(jp2.box[2].box[1].box_id, 'colr')
self.assertEqual(jp2.box[2].box[1].offset, 62)
self.assertEqual(jp2.box[2].box[1].length, 15)
self.assertEqual(jp2.box[2].box[1].longname, 'Colour Specification')
self.assertEqual(jp2.box[2].box[1].precedence, 0)
self.assertEqual(jp2.box[2].box[1].approximation, 0)
self.assertEqual(jp2.box[2].box[1].colorspace, glymur.core.SRGB)
self.assertIsNone(jp2.box[2].box[1].icc_profile)
def test_wrap(self):
"""basic test for rewrapping a j2c file, no specified boxes"""
j2k = Jp2k(self.j2kfile)
with open(self.temp_jp2_filename, mode='wb') as tfile:
j2k.wrap(tfile.name)
self.verify_wrapped_raw(tfile.name)
def test_jpx_to_jp2(self):
"""basic test for rewrapping a jpx file"""
jpx = Jp2k(self.jpxfile)
# Use only the signature, file type, header, and 1st codestream.
lst = [0, 1, 2, 5]
boxes = [jpx.box[idx] for idx in lst]
with open(self.temp_jp2_filename, mode='wb') as tfile:
jp2 = jpx.wrap(tfile.name, boxes=boxes)
# Verify the outer boxes.
boxes = [box.box_id for box in jp2.box]
self.assertEqual(boxes, ['jP ', 'ftyp', 'jp2h', 'jp2c'])
# Verify the inside boxes.
boxes = [box.box_id for box in jp2.box[2].box]
self.assertEqual(boxes, ['ihdr', 'colr', 'pclr', 'cmap'])
expected_offsets = [0, 12, 40, 887]
for j, offset in enumerate(expected_offsets):
self.assertEqual(jp2.box[j].offset, offset)
def test_wrap_jp2(self):
"""basic test for rewrapping a jp2 file, no specified boxes"""
j2k = Jp2k(self.jp2file)
with open(self.temp_jp2_filename, mode='wb') as tfile:
jp2 = j2k.wrap(tfile.name)
boxes = [box.box_id for box in jp2.box]
self.assertEqual(boxes, ['jP ', 'ftyp', 'jp2h', 'jp2c'])
def test_wrap_jp2_Lzero(self):
"""Wrap jp2 with jp2c box length is zero"""
with open(self.temp_jp2_filename, mode='wb') as tfile:
with open(self.jp2file, 'rb') as ifile:
tfile.write(ifile.read())
# Rewrite with codestream length as zero.
tfile.seek(3223)
tfile.write(struct.pack('>I', 0))
tfile.flush()
jp = Jp2k(tfile.name)
file2 = self.test_dir_path / 'file2.jp2'
jp2 = jp.wrap(file2)
boxes = [box for box in jp2.box]
self.assertEqual(boxes[3].length, 1132296)
def test_wrap_jp2_Lone(self):
"""Wrap jp2 with jp2c box length is 1, implies Q field"""
with open(self.temp_jp2_filename, mode='wb') as tfile:
with open(self.jp2file, 'rb') as ifile:
tfile.write(ifile.read(3223))
# Write new L, T, Q fields
tfile.write(struct.pack('>I4sQ', 1, b'jp2c', 1132296 + 8))
# skip over the old L, T fields
ifile.seek(3231)
tfile.write(ifile.read())
tfile.flush()
jp2 = Jp2k(tfile.name)
file2 = self.test_dir_path / 'file2.jp2'
jp22 = jp2.wrap(file2)
self.assertEqual(jp22.box[3].length, 1132296 + 8)
def test_wrap_compatibility_not_jp2(self):
"""File type compatibility must contain jp2"""
jp2 = Jp2k(self.jp2file)
boxes = [box for box in jp2.box]
boxes[1].compatibility_list = ['jpx ']
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(RuntimeError):
jp2.wrap(tfile.name, boxes=boxes)
def test_empty_jp2h(self):
"""JP2H box list cannot be empty."""
jp2 = Jp2k(self.jp2file)
with open(self.temp_jp2_filename, mode='wb') as tfile:
boxes = jp2.box
# Right here the jp2h superbox has two child boxes. Empty out that
# list to trigger the error.
boxes[2].box = []
with self.assertRaises(RuntimeError):
jp2.wrap(tfile.name, boxes=boxes)
def test_default_layout_with_boxes(self):
"""basic test for rewrapping a jp2 file, boxes specified"""
j2k = Jp2k(self.j2kfile)
boxes = [
JPEG2000SignatureBox(),
FileTypeBox(),
JP2HeaderBox(),
ContiguousCodestreamBox()
]
codestream = j2k.get_codestream()
height = codestream.segment[1].ysiz
width = codestream.segment[1].xsiz
num_components = len(codestream.segment[1].xrsiz)
boxes[2].box = [
ImageHeaderBox(
height=height, width=width, num_components=num_components
),
ColourSpecificationBox(colorspace=glymur.core.SRGB)
]
with open(self.temp_jp2_filename, mode='wb') as tfile:
j2k.wrap(tfile.name, boxes=boxes)
self.verify_wrapped_raw(tfile.name)
def test_ihdr_not_first_in_jp2h(self):
"""The specification says that ihdr must be the first box in jp2h."""
j2k = Jp2k(self.j2kfile)
boxes = [
JPEG2000SignatureBox(),
FileTypeBox(),
JP2HeaderBox(),
ContiguousCodestreamBox()
]
codestream = j2k.get_codestream()
height = codestream.segment[1].ysiz
width = codestream.segment[1].xsiz
num_components = len(codestream.segment[1].xrsiz)
boxes[2].box = [
ColourSpecificationBox(colorspace=glymur.core.SRGB),
ImageHeaderBox(
height=height, width=width, num_components=num_components
)
]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(RuntimeError):
j2k.wrap(tfile.name, boxes=boxes)
def test_first_boxes_jp_and_ftyp(self):
"""first two boxes must be jP followed by ftyp"""
j2k = Jp2k(self.j2kfile)
codestream = j2k.get_codestream()
height = codestream.segment[1].ysiz
width = codestream.segment[1].xsiz
num_components = len(codestream.segment[1].xrsiz)
jp2b = JPEG2000SignatureBox()
ftyp = FileTypeBox()
jp2h = JP2HeaderBox()
jp2c = ContiguousCodestreamBox()
colr = ColourSpecificationBox(colorspace=glymur.core.SRGB)
ihdr = ImageHeaderBox(
height=height, width=width, num_components=num_components
)
jp2h.box = [ihdr, colr]
boxes = [ftyp, jp2b, jp2h, jp2c]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(RuntimeError):
j2k.wrap(tfile.name, boxes=boxes)
def test_pclr_not_in_jp2h(self):
"""A palette box must reside in a JP2 header box."""
palette = np.array([[255, 0, 255], [0, 255, 0]], dtype=np.int32)
bps = (8, 8, 8)
pclr = glymur.jp2box.PaletteBox(
palette=palette, bits_per_component=bps, signed=(True, False, True)
)
j2k = Jp2k(self.j2kfile)
codestream = j2k.get_codestream()
height = codestream.segment[1].ysiz
width = codestream.segment[1].xsiz
num_components = len(codestream.segment[1].xrsiz)
jp2b = JPEG2000SignatureBox()
ftyp = FileTypeBox()
jp2h = JP2HeaderBox()
jp2c = ContiguousCodestreamBox()
colr = ColourSpecificationBox(colorspace=glymur.core.SRGB)
ihdr = ImageHeaderBox(
height=height, width=width, num_components=num_components
)
jp2h.box = [ihdr, colr]
boxes = [jp2b, ftyp, jp2h, jp2c, pclr]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(RuntimeError):
j2k.wrap(tfile.name, boxes=boxes)
def test_jp2h_not_preceeding_jp2c(self):
"""jp2h must precede jp2c"""
j2k = Jp2k(self.j2kfile)
codestream = j2k.get_codestream()
height = codestream.segment[1].ysiz
width = codestream.segment[1].xsiz
num_components = len(codestream.segment[1].xrsiz)
jp2b = JPEG2000SignatureBox()
ftyp = FileTypeBox()
jp2h = JP2HeaderBox()
jp2c = ContiguousCodestreamBox()
colr = ColourSpecificationBox(colorspace=glymur.core.SRGB)
ihdr = ImageHeaderBox(
height=height, width=width, num_components=num_components
)
jp2h.box = [ihdr, colr]
boxes = [jp2b, ftyp, jp2c, jp2h]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(RuntimeError):
j2k.wrap(tfile.name, boxes=boxes)
def test_missing_codestream(self):
"""Need a codestream box in order to call wrap method."""
j2k = Jp2k(self.j2kfile)
codestream = j2k.get_codestream()
height = codestream.segment[1].ysiz
width = codestream.segment[1].xsiz
num_components = len(codestream.segment[1].xrsiz)
jp2k = JPEG2000SignatureBox()
ftyp = FileTypeBox()
jp2h = JP2HeaderBox()
ihdr = ImageHeaderBox(
height=height, width=width, num_components=num_components
)
jp2h.box = [ihdr]
boxes = [jp2k, ftyp, jp2h]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(RuntimeError):
j2k.wrap(tfile.name, boxes=boxes)
def test_wrap_jpx_to_jp2_with_unadorned_jpch(self):
"""A JPX file rewrapped with plain jpch is not allowed."""
with open(self.temp_jp2_filename, mode='wb') as tfile1:
jpx = Jp2k(self.jpxfile)
boxes = [
jpx.box[0],
jpx.box[1],
jpx.box[2],
glymur.jp2box.ContiguousCodestreamBox()
]
with self.assertRaises(RuntimeError):
jpx.wrap(tfile1.name, boxes=boxes)
def test_wrap_jpx_to_jp2_with_incorrect_jp2c_offset(self):
"""Reject A JPX file rewrapped with bad jp2c offset."""
with open(self.temp_jp2_filename, mode='wb') as tfile1:
jpx = Jp2k(self.jpxfile)
jpch = jpx.box[5]
# The offset should be 902.
jpch.offset = 901
jpch.length = 313274
boxes = [jpx.box[0], jpx.box[1], jpx.box[2], jpch]
with self.assertRaises(RuntimeError):
jpx.wrap(tfile1.name, boxes=boxes)
def test_wrap_jpx_to_jp2_with_correctly_specified_jp2c(self):
"""Accept A JPX file rewrapped with good jp2c."""
with open(self.temp_jp2_filename, mode='wb') as tfile1:
jpx = Jp2k(self.jpxfile)
jpch = jpx.box[5]
# This time get it right.
jpch.offset = 903
jpch.length = 313274
boxes = [jpx.box[0], jpx.box[1], jpx.box[2], jpch]
jp2 = jpx.wrap(tfile1.name, boxes=boxes)
act_ids = [box.box_id for box in jp2.box]
exp_ids = ['jP ', 'ftyp', 'jp2h', 'jp2c']
self.assertEqual(act_ids, exp_ids)
act_offsets = [box.offset for box in jp2.box]
exp_offsets = [0, 12, 40, 887]
self.assertEqual(act_offsets, exp_offsets)
act_lengths = [box.length for box in jp2.box]
exp_lengths = [12, 28, 847, 313274]
self.assertEqual(act_lengths, exp_lengths)
def test_full_blown_jpx(self):
"""Rewrap a jpx file."""
with open(self.temp_jp2_filename, mode='wb') as tfile1:
jpx = Jp2k(self.jpxfile)
idx = (
list(range(5)) + list(range(9, 12)) + list(range(6, 9))
) + [12]
boxes = [jpx.box[j] for j in idx]
jpx2 = jpx.wrap(tfile1.name, boxes=boxes)
exp_ids = [box.box_id for box in boxes]
lengths = [box.length for box in jpx.box]
exp_lengths = [lengths[j] for j in idx]
act_ids = [box.box_id for box in jpx2.box]
act_lengths = [box.length for box in jpx2.box]
self.assertEqual(exp_ids, act_ids)
self.assertEqual(exp_lengths, act_lengths)
class TestJp2Boxes(fixtures.TestCommon):
"""Tests for canonical JP2 boxes."""
def test_no_ihdr_box(self):
"""
SCENARIO: The JP2/IHDR box cannot be parsed.
EXPECTED RESULT: An RuntimeError is issued.
"""
# Write a new JP2 file that omits the IHDR box.
j = Jp2k(self.jp2file)
jp2h = [box for box in j.box if box.box_id == 'jp2h'][0]
ihdr = jp2h.box[0]
with open(self.temp_jp2_filename, mode='wb') as tfile:
numbytes = ihdr.offset
with open(self.jp2file, 'rb') as ifile:
# Write all the way up to the ihdr box
tfile.write(ifile.read(numbytes))
# Seek past the ihdr box
ifile.seek(ihdr.length, os.SEEK_CUR)
# Write the rest of the JP2 file
tfile.write(ifile.read(numbytes))
tfile.flush()
with self.assertRaises(InvalidJp2kError):
with warnings.catch_warnings():
# Lots of things wrong with this file.
warnings.simplefilter('ignore')
Jp2k(tfile.name)
def test_no_jp2c_box(self):
"""
SCENARIO: The JP2/JP2C box cannot be parsed.
EXPECTED RESULT: An RuntimeError is issued.
"""
# Write a new JP2 file that omits the JP2C box.
j = Jp2k(self.jp2file)
jp2c = [box for box in j.box if box.box_id == 'jp2c'][0]
with open(self.temp_jp2_filename, mode='wb') as tfile:
numbytes = jp2c.offset
with open(self.jp2file, 'rb') as ifile:
tfile.write(ifile.read(numbytes))
tfile.flush()
with self.assertRaises(InvalidJp2kError):
Jp2k(tfile.name)
def test_default_jp2k(self):
"""Should be able to instantiate a JPEG2000SignatureBox"""
jp2k = glymur.jp2box.JPEG2000SignatureBox()
self.assertEqual(jp2k.signature, (13, 10, 135, 10))
def test_default_ftyp(self):
"""Should be able to instantiate a FileTypeBox"""
ftyp = glymur.jp2box.FileTypeBox()
self.assertEqual(ftyp.brand, 'jp2 ')
self.assertEqual(ftyp.minor_version, 0)
self.assertEqual(ftyp.compatibility_list, ['jp2 '])
def test_default_ihdr(self):
"""Should be able to instantiate an image header box."""
ihdr = glymur.jp2box.ImageHeaderBox(
height=512, width=256, num_components=3
)
self.assertEqual(ihdr.height, 512)
self.assertEqual(ihdr.width, 256)
self.assertEqual(ihdr.num_components, 3)
self.assertEqual(ihdr.bits_per_component, 8)
self.assertFalse(ihdr.signed)
self.assertFalse(ihdr.colorspace_unknown)
def test_default_jp2headerbox(self):
"""Should be able to set jp2h boxes."""
box = JP2HeaderBox()
box.box = [
ImageHeaderBox(height=512, width=256),
ColourSpecificationBox(colorspace=glymur.core.GREYSCALE)
]
self.assertTrue(True)
def test_default_ccodestreambox(self):
"""Raw instantiation should not produce a main_header."""
box = ContiguousCodestreamBox()
self.assertEqual(box.box_id, 'jp2c')
self.assertIsNone(box.codestream)
def test_codestream_main_header_offset(self):
"""
main_header_offset is an attribute of the ContiguousCodesStream box
"""
j = Jp2k(self.jpxfile)
self.assertEqual(
j.box[5].main_header_offset, j.box[5].offset + 8
)
class TestRepr(MetadataBase):
"""Tests for __repr__ methods."""
def test_default_jp2k(self):
"""Should be able to eval a JPEG2000SignatureBox"""
jp2k = glymur.jp2box.JPEG2000SignatureBox()
# Test the representation instantiation.
newbox = eval(repr(jp2k))
self.assertTrue(isinstance(newbox, glymur.jp2box.JPEG2000SignatureBox))
self.assertEqual(newbox.signature, (13, 10, 135, 10))
def test_unknown(self):
"""Should be able to instantiate an unknown box"""
box = UnknownBox('bpcc')
# Test the representation instantiation.
newbox = eval(repr(box))
self.assertTrue(isinstance(newbox, glymur.jp2box.UnknownBox))
def test_bpcc(self):
"""Should be able to instantiate a bpcc box"""
bpc = (5, 5, 5, 1)
signed = (False, False, True, False)
box = BitsPerComponentBox(bpc, signed, length=12, offset=62)
# Test the representation instantiation.
newbox = eval(repr(box))
self.assertEqual(bpc, newbox.bpc)
self.assertEqual(signed, newbox.signed)
def test_free(self):
"""Should be able to instantiate a free box"""
free = glymur.jp2box.FreeBox()
# Test the representation instantiation.
newbox = eval(repr(free))
self.assertTrue(isinstance(newbox, glymur.jp2box.FreeBox))
def test_nlst(self):
"""Should be able to instantiate a number list box"""
assn = (0, 1, 2)
nlst = glymur.jp2box.NumberListBox(assn)
# Test the representation instantiation.
newbox = eval(repr(nlst))
self.assertTrue(isinstance(newbox, glymur.jp2box.NumberListBox))
self.assertEqual(newbox.associations, (0, 1, 2))
def test_ftbl(self):
"""Should be able to instantiate a fragment table box"""
flst = glymur.jp2box.FragmentListBox([89], [1132288], [0])
ftbl = glymur.jp2box.FragmentTableBox([flst])
# Test the representation instantiation.
newbox = eval(repr(ftbl))
self.assertTrue(isinstance(newbox, glymur.jp2box.FragmentTableBox))
def test_dref(self):
"""Should be able to instantiate a data reference box"""
dref = glymur.jp2box.DataReferenceBox()
# Test the representation instantiation.
newbox = eval(repr(dref))
self.assertTrue(isinstance(newbox, glymur.jp2box.DataReferenceBox))
def test_flst(self):
"""Should be able to instantiate a fragment list box"""
flst = glymur.jp2box.FragmentListBox([89], [1132288], [0])
# Test the representation instantiation.
newbox = eval(repr(flst))
self.assertTrue(isinstance(newbox, glymur.jp2box.FragmentListBox))
self.assertEqual(newbox.fragment_offset, [89])
self.assertEqual(newbox.fragment_length, [1132288])
self.assertEqual(newbox.data_reference, [0])
def test_default_cgrp(self):
"""Should be able to instantiate a color group box"""
cgrp = glymur.jp2box.ColourGroupBox()
# Test the representation instantiation.
newbox = eval(repr(cgrp))
self.assertTrue(isinstance(newbox, glymur.jp2box.ColourGroupBox))
def test_default_ftyp(self):
"""Should be able to instantiate a FileTypeBox"""
ftyp = glymur.jp2box.FileTypeBox()
# Test the representation instantiation.
newbox = eval(repr(ftyp))
self.verify_filetype_box(newbox, FileTypeBox())
def test_colourspecification_box(self):
"""Verify __repr__ method on colr box."""
# TODO: add icc_profile
box = ColourSpecificationBox(colorspace=glymur.core.SRGB)
newbox = eval(repr(box))
self.assertEqual(newbox.method, glymur.core.ENUMERATED_COLORSPACE)
self.assertEqual(newbox.precedence, 0)
self.assertEqual(newbox.approximation, 0)
self.assertEqual(newbox.colorspace, glymur.core.SRGB)
self.assertIsNone(newbox.icc_profile)
def test_channeldefinition_box(self):
"""Verify __repr__ method on cdef box."""
channel_type = [COLOR, COLOR, COLOR]
association = [RED, GREEN, BLUE]
cdef = glymur.jp2box.ChannelDefinitionBox(
index=[0, 1, 2],
channel_type=channel_type,
association=association
)
newbox = eval(repr(cdef))
self.assertEqual(newbox.index, (0, 1, 2))
self.assertEqual(newbox.channel_type, (COLOR, COLOR, COLOR))
self.assertEqual(newbox.association, (RED, GREEN, BLUE))
def test_jp2header_box(self):
"""Verify __repr__ method on ihdr box."""
ihdr = ImageHeaderBox(100, 200, num_components=3)
colr = ColourSpecificationBox(colorspace=glymur.core.SRGB)
jp2h = JP2HeaderBox(box=[ihdr, colr])
newbox = eval(repr(jp2h))
self.assertEqual(newbox.box_id, 'jp2h')
self.assertEqual(newbox.box[0].box_id, 'ihdr')
self.assertEqual(newbox.box[1].box_id, 'colr')
def test_imageheader_box(self):
"""Verify __repr__ method on jhdr box."""
ihdr = ImageHeaderBox(100, 200, num_components=3)
newbox = eval(repr(ihdr))
self.assertEqual(newbox.height, 100)
self.assertEqual(newbox.width, 200)
self.assertEqual(newbox.num_components, 3)
self.assertFalse(newbox.signed)
self.assertEqual(newbox.bits_per_component, 8)
self.assertEqual(newbox.compression, 7)
self.assertFalse(newbox.colorspace_unknown)
self.assertFalse(newbox.ip_provided)
def test_association_box(self):
"""Verify __repr__ method on asoc box."""
asoc = glymur.jp2box.AssociationBox()
newbox = eval(repr(asoc))
self.assertEqual(newbox.box_id, 'asoc')
self.assertEqual(len(newbox.box), 0)
def test_codestreamheader_box(self):
"""Verify __repr__ method on jpch box."""
jpch = glymur.jp2box.CodestreamHeaderBox()
newbox = eval(repr(jpch))
self.assertEqual(newbox.box_id, 'jpch')
self.assertEqual(len(newbox.box), 0)
def test_compositinglayerheader_box(self):
"""Verify __repr__ method on jplh box."""
jplh = glymur.jp2box.CompositingLayerHeaderBox()
newbox = eval(repr(jplh))
self.assertEqual(newbox.box_id, 'jplh')
self.assertEqual(len(newbox.box), 0)
def test_componentmapping_box(self):
"""Verify __repr__ method on cmap box."""
cmap = glymur.jp2box.ComponentMappingBox(
component_index=(0, 0, 0),
mapping_type=(1, 1, 1),
palette_index=(0, 1, 2)
)
newbox = eval(repr(cmap))
self.assertEqual(newbox.box_id, 'cmap')
self.assertEqual(newbox.component_index, (0, 0, 0))
self.assertEqual(newbox.mapping_type, (1, 1, 1))
self.assertEqual(newbox.palette_index, (0, 1, 2))
def test_resolution_boxes(self):
"""Verify __repr__ method on resolution boxes."""
resc = glymur.jp2box.CaptureResolutionBox(0.5, 2.5)
resd = glymur.jp2box.DisplayResolutionBox(2.5, 0.5)
res_super_box = glymur.jp2box.ResolutionBox(box=[resc, resd])
newbox = eval(repr(res_super_box))
self.assertEqual(newbox.box_id, 'res ')
self.assertEqual(newbox.box[0].box_id, 'resc')
self.assertEqual(newbox.box[0].vertical_resolution, 0.5)
self.assertEqual(newbox.box[0].horizontal_resolution, 2.5)
self.assertEqual(newbox.box[1].box_id, 'resd')
self.assertEqual(newbox.box[1].vertical_resolution, 2.5)
self.assertEqual(newbox.box[1].horizontal_resolution, 0.5)
def test_label_box(self):
"""Verify __repr__ method on label box."""
lbl = glymur.jp2box.LabelBox("this is a test")
newbox = eval(repr(lbl))
self.assertEqual(newbox.box_id, 'lbl ')
self.assertEqual(newbox.label, "this is a test")
def test_data_entry_url_box(self):
"""Verify __repr__ method on data entry url box."""
version = 0
flag = (0, 0, 0)
url = "http://readthedocs.glymur.org"
box = glymur.jp2box.DataEntryURLBox(version, flag, url)
newbox = eval(repr(box))
self.assertEqual(newbox.box_id, 'url ')
self.assertEqual(newbox.version, version)
self.assertEqual(newbox.flag, flag)
self.assertEqual(newbox.url, url)
def test_uuidinfo_box(self):
"""Verify __repr__ method on uinf box."""
uinf = glymur.jp2box.UUIDInfoBox()
newbox = eval(repr(uinf))
self.assertEqual(newbox.box_id, 'uinf')
self.assertEqual(len(newbox.box), 0)
def test_uuidlist_box(self):
"""Verify __repr__ method on ulst box."""
uuid1 = UUID('00000000-0000-0000-0000-000000000001')
uuid2 = UUID('00000000-0000-0000-0000-000000000002')
uuids = [uuid1, uuid2]
ulst = glymur.jp2box.UUIDListBox(ulst=uuids)
newbox = eval(repr(ulst))
self.assertEqual(newbox.box_id, 'ulst')
self.assertEqual(newbox.ulst[0], uuid1)
self.assertEqual(newbox.ulst[1], uuid2)
def test_palette_box(self):
"""Verify Palette box repr."""
palette = np.array([[255, 0, 1000], [0, 255, 0]], dtype=np.int32)
bps = (8, 8, 16)
box = glymur.jp2box.PaletteBox(
palette=palette,
bits_per_component=bps,
signed=(True, False, True)
)
# Test will fail unless addition imports from numpy are done.
from numpy import array, int32 # noqa: F401
newbox = eval(repr(box))
np.testing.assert_array_equal(newbox.palette, palette)
self.assertEqual(newbox.bits_per_component, (8, 8, 16))
self.assertEqual(newbox.signed, (True, False, True))
def test_xml_box(self):
"""Verify xml box repr."""
elt = ET.fromstring('<?xml version="1.0"?><data>0</data>')
tree = ET.ElementTree(elt)
box = glymur.jp2box.XMLBox(xml=tree)
pattern = r"""
glymur.jp2box.XMLBox
\(xml=<lxml.etree._ElementTree\sobject\sat\s0x[a-fA-F0-9]+>\)
"""
regex = re.compile(pattern, re.VERBOSE)
self.assertRegex(repr(box), regex)
def test_readerrequirements_box(self):
"""Verify rreq repr method."""
box = glymur.jp2box.ReaderRequirementsBox(
fuam=160,
dcm=192,
standard_flag=(5, 61, 43),
standard_mask=(128, 96, 64),
vendor_feature=[],
vendor_mask=[]
)
newbox = eval(repr(box))
self.assertEqual(box.fuam, newbox.fuam)
self.assertEqual(box.dcm, newbox.dcm)
self.assertEqual(box.standard_flag, newbox.standard_flag)
self.assertEqual(box.standard_mask, newbox.standard_mask)
self.assertEqual(box.vendor_feature, newbox.vendor_feature)
self.assertEqual(box.vendor_mask, newbox.vendor_mask)
def test_uuid_box_generic(self):
"""Verify uuid repr method."""
uuid_instance = UUID('00000000-0000-0000-0000-000000000000')
data = b'0123456789'
box = glymur.jp2box.UUIDBox(the_uuid=uuid_instance, raw_data=data)
# Since the raw_data parameter is a sequence of bytes which could be
# quite long, don't bother trying to make it conform to eval(repr()).
pattern = r"""
glymur.jp2box.UUIDBox\(
UUID\('00000000-0000-0000-0000-000000000000'\),\s
raw_data=<byte\sarray\s10\selements>
\)
"""
regex = re.compile(pattern, re.VERBOSE)
self.assertRegex(repr(box), regex)
def test_uuid_box_xmp(self):
"""Verify uuid repr method for XMP UUID box."""
jp2file = glymur.data.nemo()
j = Jp2k(jp2file)
box = j.box[3]
# Since the raw_data parameter is a sequence of bytes which could be
# quite long, don't bother trying to make it conform to eval(repr()).
pattern = r"""
glymur.jp2box.UUIDBox\(
UUID\('be7acfcb-97a9-42e8-9c71-999491e3afac'\),\s
raw_data=<byte\sarray\s3122\selements>
\)
"""
regex = re.compile(pattern, re.VERBOSE)
self.assertRegex(repr(box), regex)
def test_contiguous_codestream_box(self):
"""Verify contiguous codestream box repr method."""
jp2file = glymur.data.nemo()
jp2 = Jp2k(jp2file)
box = jp2.box[-1]
# Difficult to eval(repr()) this, so just match the general pattern.
pattern = r"""
glymur.jp2box.ContiguousCodeStreamBox\(
codestream=<glymur.codestream.Codestream\sobject\s
at\s0x([a-fA-F0-9]*)>
\)
"""
regex = re.compile(pattern, re.VERBOSE)
self.assertRegex(repr(box), regex)
| |
'''
Created on Mar 13, 2012
.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
'''
import unittest
import numpy as np
import pandas as pd
from ema_workbench.analysis import prim
from ema_workbench.analysis.prim import PrimBox
from test import utilities
from ema_workbench.analysis.scenario_discovery_util import RuleInductionType
def flu_classify(data):
#get the output for deceased population
result = data['deceased population region 1']
#make an empty array of length equal to number of cases
classes = np.zeros(result.shape[0])
#if deceased population is higher then 1.000.000 people, classify as 1
classes[result[:, -1] > 1000000] = 1
return classes
def scarcity_classify(outcomes):
outcome = outcomes['relative market price']
change = np.abs(outcome[:, 1::]-outcome[:, 0:-1])
neg_change = np.min(change, axis=1)
pos_change = np.max(change, axis=1)
logical = (neg_change > -0.6) & (pos_change > 0.6)
classes = np.zeros(outcome.shape[0])
classes[logical] = 1
return classes
class PrimBoxTestCase(unittest.TestCase):
def test_init(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([0,1,2])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
self.assertEqual(box.peeling_trajectory.shape, (1,6))
def test_select(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([1,1,0])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
box.select(0)
self.assertTrue(np.all(box.yi==prim_obj.yi))
def test_inspect(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = np.array([1,1,0])
prim_obj = prim.Prim(x, y, threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
box.inspect(1)
box.inspect()
box.inspect(style='graph')
with self.assertRaises(ValueError):
box.inspect(style='some unknown style')
def test_show_ppt(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = np.array([1,1,0])
prim_obj = prim.Prim(x, y, threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
cols = ['mean', 'mass', 'coverage', 'density', 'res_dim']
data = np.zeros((100, 5))
data[:, 0:4] = np.random.rand(100, 4)
data[:, 4] = np.random.randint(0, 5, size=(100, ))
box.peeling_trajectory = pd.DataFrame(data, columns=cols)
box.show_ppt()
def test_show_tradeoff(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = np.array([1,1,0])
prim_obj = prim.Prim(x, y, threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
cols = ['mean', 'mass', 'coverage', 'density', 'res_dim']
data = np.zeros((100, 5))
data[:, 0:4] = np.random.rand(100, 4)
data[:, 4] = np.random.randint(0, 5, size=(100, ))
box.peeling_trajectory = pd.DataFrame(data, columns=cols)
box.show_tradeoff()
def test_update(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([1,1,0])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
self.assertEqual(box.peeling_trajectory['mean'][1], 1)
self.assertEqual(box.peeling_trajectory['coverage'][1], 1)
self.assertEqual(box.peeling_trajectory['density'][1], 1)
self.assertEqual(box.peeling_trajectory['res_dim'][1], 1)
self.assertEqual(box.peeling_trajectory['mass'][1], 2/3)
def test_drop_restriction(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([1,1,0])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,2,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
box.drop_restriction('b')
correct_box_lims = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
box_lims = box.box_lims[-1]
names = box_lims.columns
for entry in names:
lim_correct = correct_box_lims[entry]
lim_box = box_lims[entry]
for i in range(len(lim_correct)):
self.assertEqual(lim_correct[i], lim_box[i])
self.assertEqual(box.peeling_trajectory['mean'][2], 1)
self.assertEqual(box.peeling_trajectory['coverage'][2], 1)
self.assertEqual(box.peeling_trajectory['density'][2], 1)
self.assertEqual(box.peeling_trajectory['res_dim'][2], 1)
self.assertEqual(box.peeling_trajectory['mass'][2], 2/3)
def test_calculate_quasi_p(self):
pass
class PrimTestCase(unittest.TestCase):
def test_setup_prim(self):
self.results = utilities.load_flu_data()
self.classify = flu_classify
experiments, outcomes = self.results
# test initialization, including t_coi calculation in case of searching
# for results equal to or higher than the threshold
outcomes['death toll'] = outcomes['deceased population region 1'][:, -1]
results = experiments, outcomes
threshold = 10000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.ABOVE, threshold=threshold)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] >= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
# test initialization, including t_coi calculation in case of searching
# for results equal to or lower than the threshold
threshold = 1000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.BELOW,
threshold=threshold)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] <= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
prim.setup_prim(self.results, self.classify, threshold=prim.ABOVE)
def test_boxes(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([0,1,2])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
boxes = prim_obj.boxes
self.assertEqual(len(boxes), 1, 'box length not correct')
# real data test case
prim_obj = prim.setup_prim(utilities.load_flu_data(), flu_classify,
threshold=0.8)
prim_obj.find_box()
boxes = prim_obj.boxes
self.assertEqual(len(boxes), 1, 'box length not correct')
def test_prim_init_select(self):
self.results = utilities.load_flu_data()
self.classify = flu_classify
experiments, outcomes = self.results
unc = experiments.columns.values.tolist()
# test initialization, including t_coi calculation in case of searching
# for results equal to or higher than the threshold
outcomes['death toll'] = outcomes['deceased population region 1'][:, -1]
results = experiments, outcomes
threshold = 10000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.ABOVE, threshold=threshold,
incl_unc=unc)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] >= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
# test initialization, including t_coi calculation in case of searching
# for results equal to or lower than the threshold
threshold = 1000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.BELOW,
threshold=threshold)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] <= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
prim.setup_prim(self.results, self.classify, threshold=prim.ABOVE)
def test_quantile(self):
data = pd.Series(np.arange(10))
self.assertTrue(prim.get_quantile(data, 0.9)==8.5)
self.assertTrue(prim.get_quantile(data, 0.95)==8.5)
self.assertTrue(prim.get_quantile(data, 0.1)==0.5)
self.assertTrue(prim.get_quantile(data, 0.05)==0.5)
data = pd.Series(1)
self.assertTrue(prim.get_quantile(data, 0.9)==1)
self.assertTrue(prim.get_quantile(data, 0.95)==1)
self.assertTrue(prim.get_quantile(data, 0.1)==1)
self.assertTrue(prim.get_quantile(data, 0.05)==1)
data = pd.Series([1,1,2,3,4,5,6,7,8,9,9])
self.assertTrue(prim.get_quantile(data, 0.9)==8.5)
self.assertTrue(prim.get_quantile(data, 0.95)==8.5)
self.assertTrue(prim.get_quantile(data, 0.1)==1.5)
self.assertTrue(prim.get_quantile(data, 0.05)==1.5)
def test_box_init(self):
# test init box without NANS
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,7)],
columns=['a', 'b', 'c'])
y = np.array([0,1,2])
prim_obj = prim.Prim(x,y, threshold=0.5,
mode=RuleInductionType.REGRESSION)
box_init = prim_obj.box_init
# some test on the box
self.assertTrue(box_init.loc[0, 'a']==0)
self.assertTrue(box_init.loc[1, 'a']==3)
self.assertTrue(box_init.loc[0, 'b']==1)
self.assertTrue(box_init.loc[1, 'b']==5)
self.assertTrue(box_init.loc[0, 'c']==2)
self.assertTrue(box_init.loc[1, 'c']==7)
# heterogenous without NAN
x = pd.DataFrame([[0.1, 0, 'a'],
[0.2, 1, 'b'],
[0.3, 2, 'a'],
[0.4, 3, 'b'],
[0.5, 4, 'a'],
[0.6, 5, 'a'],
[0.7, 6, 'b'],
[0.8, 7, 'a'],
[0.9, 8, 'b'],
[1.0, 9, 'a']],
columns=['a', 'b', 'c'])
y = np.arange(0, x.shape[0])
prim_obj = prim.Prim(x,y, threshold=0.5,
mode=RuleInductionType.REGRESSION)
box_init = prim_obj.box_init
# some test on the box
self.assertTrue(box_init['a'][0]==0.1)
self.assertTrue(box_init['a'][1]==1.0)
self.assertTrue(box_init['b'][0]==0)
self.assertTrue(box_init['b'][1]==9)
self.assertTrue(box_init['c'][0]==set(['a','b']))
self.assertTrue(box_init['c'][1]==set(['a','b']))
def test_prim_exceptions(self):
results = utilities.load_flu_data()
x, outcomes = results
y = outcomes['deceased population region 1']
self.assertRaises(prim.PrimException, prim.Prim,
x, y, threshold=0.8,
mode=RuleInductionType.REGRESSION)
def test_find_box(self):
results = utilities.load_flu_data()
classify = flu_classify
prim_obj = prim.setup_prim(results, classify,
threshold=0.8)
box_1 = prim_obj.find_box()
prim_obj._update_yi_remaining(prim_obj)
after_find = box_1.yi.shape[0] + prim_obj.yi_remaining.shape[0]
self.assertEqual(after_find, prim_obj.y.shape[0])
box_2 = prim_obj.find_box()
prim_obj._update_yi_remaining(prim_obj)
after_find = box_1.yi.shape[0] +\
box_2.yi.shape[0] +\
prim_obj.yi_remaining.shape[0]
self.assertEqual(after_find, prim_obj.y.shape[0])
def test_discrete_peel(self):
x = pd.DataFrame(np.random.randint(0, 10, size=(100,), dtype=np.int),
columns=['a'])
y = np.zeros(100,)
y[x.a > 5] = 1
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
for peel in peels:
self.assertEqual(len(peel), 2)
indices, tempbox = peel
self.assertTrue(isinstance(indices, np.ndarray))
self.assertTrue(isinstance(tempbox, pd.DataFrame))
# have modified boxlims as starting point
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
boxlims.a = [1,8]
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
for peel in peels:
self.assertEqual(len(peel), 2)
indices, tempbox = peel
self.assertTrue(isinstance(indices, np.ndarray))
self.assertTrue(isinstance(tempbox, pd.DataFrame))
# have modified boxlims as starting point
x.a[x.a>5] = 5
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
boxlims.a = [5,8]
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
x.a[x.a<5] = 5
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
boxlims.a = [5,8]
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
def test_categorical_peel(self):
x = pd.DataFrame(list(zip(np.random.rand(10,),
['a','b','a','b','a','a','b','a','b','a', ])),
columns=['a', 'b'])
y = np.random.randint(0,2, (10,))
y = y.astype(np.int)
y = {'y':y}
results = x, y
classify = 'y'
prim_obj = prim.setup_prim(results, classify, threshold=0.8)
box_lims = pd.DataFrame([(0, set(['a','b'])),
(1, set(['a','b']))],
columns=['a', 'b'] )
box = prim.PrimBox(prim_obj, box_lims, prim_obj.yi)
u = 'b'
x = x.select_dtypes(exclude=np.number).values
j = 0
peels = prim_obj._categorical_peel(box, u, j, x)
self.assertEqual(len(peels), 2)
for peel in peels:
pl = peel[1][u]
self.assertEqual(len(pl[0]), 1)
self.assertEqual(len(pl[1]), 1)
a = ('a',)
b = ('b',)
x = pd.DataFrame(list(zip(np.random.rand(10,),
[a, b, a, b, a,
a, b, a, b, a])),
columns=['a', 'b'])
y = np.random.randint(0,2, (10,))
y = y.astype(np.int)
y = {'y':y}
results = x, y
classify = 'y'
prim_obj = prim.setup_prim(results, classify, threshold=0.8)
box_lims = prim_obj.box_init
box = prim.PrimBox(prim_obj, box_lims, prim_obj.yi)
u = 'b'
x = x.select_dtypes(exclude=np.number).values
j = 0
peels = prim_obj._categorical_peel(box, u, j, x)
self.assertEqual(len(peels), 2)
for peel in peels:
pl = peel[1][u]
self.assertEqual(len(pl[0]), 1)
self.assertEqual(len(pl[1]), 1)
def test_categorical_paste(self):
a = np.random.rand(10,)
b = ['a','b','a','b','a','a','b','a','b','a', ]
x = pd.DataFrame(list(zip(a,b)), columns=['a', 'b'])
x['b'] = x['b'].astype('category')
y = np.random.randint(0,2, (10,))
y = y.astype(np.int)
y = {'y':y}
results = x,y
classify = 'y'
prim_obj = prim.setup_prim(results, classify, threshold=0.8)
box_lims = pd.DataFrame([(0, set(['a',])),
(1, set(['a',]))], columns=x.columns)
yi = np.where(x.loc[:,'b']=='a')
box = prim.PrimBox(prim_obj, box_lims, yi)
u = 'b'
pastes = prim_obj._categorical_paste(box, u, x, ['b'])
self.assertEqual(len(pastes), 1)
for paste in pastes:
indices, box_lims = paste
self.assertEqual(indices.shape[0], 10)
self.assertEqual(box_lims[u][0], set(['a','b']))
if __name__ == '__main__':
# ema_logging.log_to_stderr(ema_logging.INFO)
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(PrimTestCase("test_write_boxes_to_stdout"))
# unittest.TextTestRunner().run(suite)
| |
"""
mbed SDK
Copyright (c) 2019 ARM Limited
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import functools
import time
import threading
import uuid
import sys
import mbed_host_tests
import usb.core
from usb.util import (
CTRL_IN,
CTRL_OUT,
CTRL_TYPE_STANDARD,
CTRL_TYPE_CLASS,
CTRL_RECIPIENT_DEVICE,
CTRL_RECIPIENT_INTERFACE,
DESC_TYPE_CONFIG,
build_request_type)
if sys.platform.startswith('win'):
# Use libusb0 on Windows. libusb1 implementation for Windows
# does not support all features necessary for testing.
import usb.backend.libusb0
USB_BACKEND = usb.backend.libusb0.get_backend()
else:
# Use a default backend on other platforms.
USB_BACKEND = None
try:
import hid
except ImportError:
CYTHON_HIDAPI_PRESENT = False
else:
CYTHON_HIDAPI_PRESENT = True
# USB device -- device classes
USB_CLASS_HID = 0x03
# USB device -- standard requests
USB_REQUEST_GET_DESCRIPTOR = 0x06
# USB device -- HID class requests
HID_REQUEST_GET_REPORT = 0x01
HID_REQUEST_SET_REPORT = 0x09
HID_REQUEST_GET_IDLE = 0x02
HID_REQUEST_SET_IDLE = 0x0A
HID_REQUEST_GET_PROTOCOL = 0x03
HID_REQUEST_SET_PROTOCOL = 0x0B
# USB device -- HID class descriptors
DESC_TYPE_HID_HID = 0x21
DESC_TYPE_HID_REPORT = 0x22
DESC_TYPE_HID_PHYSICAL = 0x23
# USB device -- HID class descriptor lengths
DESC_LEN_HID_HID = 0x09
# USB device -- descriptor fields offsets
DESC_OFFSET_BLENGTH = 0
DESC_OFFSET_BDESCRIPTORTYPE = 1
# USB device -- HID subclasses
HID_SUBCLASS_NONE = 0
HID_SUBCLASS_BOOT = 1
# USB device -- HID protocols
HID_PROTOCOL_NONE = 0
HID_PROTOCOL_KEYBOARD = 1
HID_PROTOCOL_MOUSE = 2
# Greentea message keys used for callbacks
MSG_KEY_DEVICE_READY = 'dev_ready'
MSG_KEY_HOST_READY = 'host_ready'
MSG_KEY_SERIAL_NUMBER = 'usb_dev_sn'
MSG_KEY_TEST_GET_DESCRIPTOR_HID = 'test_get_desc_hid'
MSG_KEY_TEST_GET_DESCRIPTOR_CFG = 'test_get_desc_cfg'
MSG_KEY_TEST_REQUESTS = 'test_requests'
MSG_KEY_TEST_RAW_IO = 'test_raw_io'
# Greentea message keys used to notify DUT of test status
MSG_KEY_TEST_CASE_FAILED = 'fail'
MSG_KEY_TEST_CASE_PASSED = 'pass'
MSG_VALUE_DUMMY = '0'
MSG_VALUE_NOT_SUPPORTED = 'not_supported'
# Constants for the tests.
KEYBOARD_IDLE_RATE_TO_SET = 0x00 # Duration = 0 (indefinite)
HID_PROTOCOL_TO_SET = 0x01 # Protocol = 1 (Report Protocol)
RAW_IO_REPS = 16 # Number of loopback test reps.
def build_get_desc_value(desc_type, desc_index):
"""Build and return a wValue field for control requests."""
return (desc_type << 8) | desc_index
def usb_hid_path(serial_number):
"""Get a USB HID device system path based on the serial number."""
if not CYTHON_HIDAPI_PRESENT:
return None
for device_info in hid.enumerate(): # pylint: disable=no-member
if device_info.get('serial_number') == serial_number: # pylint: disable=not-callable
return device_info['path']
return None
def get_descriptor_types(desc):
"""Return a list of all bDescriptorType values found in desc.
desc is expected to be a sequence of bytes, i.e. array.array('B')
returned from usb.core.
From the USB 2.0 spec, paragraph 9.5:
Each descriptor begins with a byte-wide field that contains the total
number of bytes in the descriptor followed by a byte-wide field that
identifies the descriptor type.
"""
tmp_desc = desc[DESC_OFFSET_BLENGTH:]
desc_types = []
while True:
try:
bLength = tmp_desc[DESC_OFFSET_BLENGTH] # pylint: disable=invalid-name
bDescriptorType = tmp_desc[DESC_OFFSET_BDESCRIPTORTYPE] # pylint: disable=invalid-name
desc_types.append(int(bDescriptorType))
tmp_desc = tmp_desc[int(bLength):]
except IndexError:
break
return desc_types
def get_hid_descriptor_parts(hid_descriptor):
"""Return bNumDescriptors, bDescriptorType, wDescriptorLength from hid_descriptor."""
err_msg = 'Invalid HID class descriptor'
try:
if hid_descriptor[1] != DESC_TYPE_HID_HID:
raise TypeError(err_msg)
bNumDescriptors = int(hid_descriptor[5]) # pylint: disable=invalid-name
bDescriptorType = int(hid_descriptor[6]) # pylint: disable=invalid-name
wDescriptorLength = int((hid_descriptor[8] << 8) | hid_descriptor[7]) # pylint: disable=invalid-name
except (IndexError, ValueError):
raise TypeError(err_msg)
return bNumDescriptors, bDescriptorType, wDescriptorLength
def get_usbhid_dev_type(intf):
"""Return a name of the HID device class type for intf."""
if not isinstance(intf, usb.core.Interface):
return None
if intf.bInterfaceClass != USB_CLASS_HID:
# USB Device Class Definition for HID, v1.11, paragraphs 4.1, 4.2 & 4.3:
# the class is specified in the Interface descriptor
# and not the Device descriptor.
return None
if (intf.bInterfaceSubClass == HID_SUBCLASS_BOOT
and intf.bInterfaceProtocol == HID_PROTOCOL_KEYBOARD):
return 'boot_keyboard'
if (intf.bInterfaceSubClass == HID_SUBCLASS_BOOT
and intf.bInterfaceProtocol == HID_PROTOCOL_MOUSE):
return 'boot_mouse'
# Determining any other HID dev type, like a non-boot_keyboard or
# a non-boot_mouse requires getting and parsing a HID Report descriptor
# for intf.
# Only the boot_keyboard, boot_mouse and other_device are used for this
# greentea test suite.
return 'other_device'
class RetryError(Exception):
"""Exception raised by retry_fun_call()."""
def retry_fun_call(fun, num_retries=3, retry_delay=0.0):
"""Call fun and retry if any exception was raised.
fun is called at most num_retries with a retry_dalay in between calls.
Raises RetryError if the retry limit is exhausted.
"""
verbose = False
final_err = None
for retry in range(1, num_retries + 1):
try:
return fun() # pylint: disable=not-callable
except Exception as exc: # pylint: disable=broad-except
final_err = exc
if verbose:
print('Retry {}/{} failed ({})'
.format(retry, num_retries, str(fun)))
time.sleep(retry_delay)
err_msg = 'Failed with "{}". Tried {} times.'
raise RetryError(err_msg.format(final_err, num_retries))
def raise_if_different(expected, actual, text=''):
"""Raise a RuntimeError if actual is different than expected."""
if expected != actual:
raise RuntimeError('{}Got {!r}, expected {!r}.'.format(text, actual, expected))
def raise_if_false(expression, text):
"""Raise a RuntimeError if expression is False."""
if not expression:
raise RuntimeError(text)
class USBHIDTest(mbed_host_tests.BaseHostTest):
"""Host side test for USB device HID class."""
@staticmethod
def get_usb_hid_path(usb_id_str):
"""Get a USB HID device path as registered in the system.
Search is based on the unique USB SN generated by the host
during test suite setup.
Raises RuntimeError if the device is not found.
"""
hid_path = usb_hid_path(usb_id_str)
if hid_path is None:
err_msg = 'USB HID device (SN={}) not found.'
raise RuntimeError(err_msg.format(usb_id_str))
return hid_path
@staticmethod
def get_usb_dev(usb_id_str):
"""Get a usb.core.Device instance.
Search is based on the unique USB SN generated by the host
during test suite setup.
Raises RuntimeError if the device is not found.
"""
usb_dev = usb.core.find(custom_match=lambda d: d.serial_number == usb_id_str, backend=USB_BACKEND)
if usb_dev is None:
err_msg = 'USB device (SN={}) not found.'
raise RuntimeError(err_msg.format(usb_id_str))
return usb_dev
def __init__(self):
super(USBHIDTest, self).__init__()
self.__bg_task = None
self.dut_usb_dev_sn = uuid.uuid4().hex # 32 hex digit string
def notify_error(self, msg):
"""Terminate the test with an error msg."""
self.log('TEST ERROR: {}'.format(msg))
self.notify_complete(None)
def notify_failure(self, msg):
"""Report a host side test failure to the DUT."""
self.log('TEST FAILED: {}'.format(msg))
self.send_kv(MSG_KEY_TEST_CASE_FAILED, MSG_VALUE_DUMMY)
def notify_success(self, value=None, msg=''):
"""Report a host side test success to the DUT."""
if msg:
self.log('TEST PASSED: {}'.format(msg))
if value is None:
value = MSG_VALUE_DUMMY
self.send_kv(MSG_KEY_TEST_CASE_PASSED, value)
def cb_test_get_hid_desc(self, key, value, timestamp):
"""Verify the device handles Get_Descriptor request correctly.
Two requests are tested for every HID interface:
1. Get_Descriptor(HID),
2. Get_Descriptor(Report).
Details in USB Device Class Definition for HID, v1.11, paragraph 7.1.
"""
kwargs_hid_desc_req = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE),
'bRequest': USB_REQUEST_GET_DESCRIPTOR,
# Descriptor Index (part of wValue) is reset to zero for
# HID class descriptors other than Physical ones.
'wValue': build_get_desc_value(DESC_TYPE_HID_HID, 0x00),
# wIndex is replaced with the Interface Number in the loop.
'wIndex': None,
'data_or_wLength': DESC_LEN_HID_HID}
kwargs_report_desc_req = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE),
'bRequest': USB_REQUEST_GET_DESCRIPTOR,
# Descriptor Index (part of wValue) is reset to zero for
# HID class descriptors other than Physical ones.
'wValue': build_get_desc_value(DESC_TYPE_HID_REPORT, 0x00),
# wIndex is replaced with the Interface Number in the loop.
'wIndex': None,
# wLength is replaced with the Report Descriptor Length in the loop.
'data_or_wLength': None}
mbed_hid_dev = None
report_desc_lengths = []
try:
mbed_hid_dev = retry_fun_call(
fun=functools.partial(self.get_usb_dev, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
try:
for intf in mbed_hid_dev.get_active_configuration(): # pylint: disable=not-callable
if intf.bInterfaceClass != USB_CLASS_HID:
continue
try:
if mbed_hid_dev.is_kernel_driver_active(intf.bInterfaceNumber):
mbed_hid_dev.detach_kernel_driver(intf.bInterfaceNumber) # pylint: disable=not-callable
except (NotImplementedError, AttributeError):
pass
# Request the HID descriptor.
kwargs_hid_desc_req['wIndex'] = intf.bInterfaceNumber
hid_desc = mbed_hid_dev.ctrl_transfer(**kwargs_hid_desc_req) # pylint: disable=not-callable
try:
bNumDescriptors, bDescriptorType, wDescriptorLength = get_hid_descriptor_parts(hid_desc) # pylint: disable=invalid-name
except TypeError as exc:
self.notify_error(exc)
return
raise_if_different(1, bNumDescriptors, 'Exactly one HID Report descriptor expected. ')
raise_if_different(DESC_TYPE_HID_REPORT, bDescriptorType, 'Invalid HID class descriptor type. ')
raise_if_false(wDescriptorLength > 0, 'Invalid HID Report descriptor length. ')
# Request the Report descriptor.
kwargs_report_desc_req['wIndex'] = intf.bInterfaceNumber
kwargs_report_desc_req['data_or_wLength'] = wDescriptorLength
report_desc = mbed_hid_dev.ctrl_transfer(**kwargs_report_desc_req) # pylint: disable=not-callable
raise_if_different(wDescriptorLength, len(report_desc),
'The size of data received does not match the HID Report descriptor length. ')
report_desc_lengths.append(len(report_desc))
except usb.core.USBError as exc:
self.notify_failure('Get_Descriptor request failed. {}'.format(exc))
except RuntimeError as exc:
self.notify_failure(exc)
else:
# Send the report desc len to the device.
# USBHID::report_desc_length() returns uint16_t
msg_value = '{0:04x}'.format(max(report_desc_lengths))
self.notify_success(msg_value)
def cb_test_get_cfg_desc(self, key, value, timestamp):
"""Verify the device provides required HID descriptors.
USB Device Class Definition for HID, v1.11, paragraph 7.1:
When a Get_Descriptor(Configuration) request is issued, it
returns (...), and the HID descriptor for each interface.
"""
kwargs_cfg_desc_req = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_DEVICE),
'bRequest': USB_REQUEST_GET_DESCRIPTOR,
# Descriptor Index (part of wValue) is reset to zero.
'wValue': build_get_desc_value(DESC_TYPE_CONFIG, 0x00),
# wIndex is reset to zero.
'wIndex': 0x00,
# wLength unknown, set to 1024.
'data_or_wLength': 1024}
mbed_hid_dev = None
try:
mbed_hid_dev = retry_fun_call(
fun=functools.partial(self.get_usb_dev, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
try:
# Request the Configuration descriptor.
cfg_desc = mbed_hid_dev.ctrl_transfer(**kwargs_cfg_desc_req) # pylint: disable=not-callable
raise_if_false(DESC_TYPE_HID_HID in get_descriptor_types(cfg_desc),
'No HID class descriptor in the Configuration descriptor.')
except usb.core.USBError as exc:
self.notify_failure('Get_Descriptor request failed. {}'.format(exc))
except RuntimeError as exc:
self.notify_failure(exc)
else:
self.notify_success()
def cb_test_class_requests(self, key, value, timestamp):
"""Verify all required HID requests are supported.
USB Device Class Definition for HID, v1.11, Appendix G:
1. Get_Report -- required for all types,
2. Set_Report -- not required if dev doesn't declare an Output Report,
3. Get_Idle -- required for keyboards,
4. Set_Idle -- required for keyboards,
5. Get_Protocol -- required for boot_keyboard and boot_mouse,
6. Set_Protocol -- required for boot_keyboard and boot_mouse.
Details in USB Device Class Definition for HID, v1.11, paragraph 7.2.
"""
kwargs_get_report_request = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_GET_REPORT,
# wValue: ReportType = Input, ReportID = 0 (not used)
'wValue': (0x01 << 8) | 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
# wLength: unknown, set to 1024
'data_or_wLength': 1024}
kwargs_get_idle_request = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_GET_IDLE,
# wValue: 0, ReportID = 0 (not used)
'wValue': (0x00 << 8) | 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 1}
kwargs_set_idle_request = {
'bmRequestType': build_request_type(
CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_SET_IDLE,
# wValue: Duration, ReportID = 0 (all input reports)
'wValue': (KEYBOARD_IDLE_RATE_TO_SET << 8) | 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 0}
kwargs_get_protocol_request = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_GET_PROTOCOL,
'wValue': 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 1}
kwargs_set_protocol_request = {
'bmRequestType': build_request_type(
CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_SET_PROTOCOL,
'wValue': HID_PROTOCOL_TO_SET,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 0}
mbed_hid_dev = None
try:
mbed_hid_dev = retry_fun_call(
fun=functools.partial(self.get_usb_dev, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
hid_dev_type = None
tested_request_name = None
try:
for intf in mbed_hid_dev.get_active_configuration(): # pylint: disable=not-callable
hid_dev_type = get_usbhid_dev_type(intf)
if hid_dev_type is None:
continue
try:
if mbed_hid_dev.is_kernel_driver_active(intf.bInterfaceNumber):
mbed_hid_dev.detach_kernel_driver(intf.bInterfaceNumber) # pylint: disable=not-callable
except (NotImplementedError, AttributeError):
pass
if hid_dev_type == 'boot_keyboard':
# 4. Set_Idle
tested_request_name = 'Set_Idle'
kwargs_set_idle_request['wIndex'] = intf.bInterfaceNumber
mbed_hid_dev.ctrl_transfer(**kwargs_set_idle_request) # pylint: disable=not-callable
# 3. Get_Idle
tested_request_name = 'Get_Idle'
kwargs_get_idle_request['wIndex'] = intf.bInterfaceNumber
idle_rate = mbed_hid_dev.ctrl_transfer(**kwargs_get_idle_request) # pylint: disable=not-callable
raise_if_different(KEYBOARD_IDLE_RATE_TO_SET, idle_rate, 'Invalid idle rate received. ')
if hid_dev_type in ('boot_keyboard', 'boot_mouse'):
# 6. Set_Protocol
tested_request_name = 'Set_Protocol'
kwargs_set_protocol_request['wIndex'] = intf.bInterfaceNumber
mbed_hid_dev.ctrl_transfer(**kwargs_set_protocol_request) # pylint: disable=not-callable
# 5. Get_Protocol
tested_request_name = 'Get_Protocol'
kwargs_get_protocol_request['wIndex'] = intf.bInterfaceNumber
protocol = mbed_hid_dev.ctrl_transfer(**kwargs_get_protocol_request) # pylint: disable=not-callable
raise_if_different(HID_PROTOCOL_TO_SET, protocol, 'Invalid protocol received. ')
# 1. Get_Report
tested_request_name = 'Get_Report'
kwargs_get_report_request['wIndex'] = intf.bInterfaceNumber
mbed_hid_dev.ctrl_transfer(**kwargs_get_report_request) # pylint: disable=not-callable
except usb.core.USBError as exc:
self.notify_failure('The {!r} does not support the {!r} HID class request ({}).'
.format(hid_dev_type, tested_request_name, exc))
except RuntimeError as exc:
self.notify_failure('Set/Get data mismatch for {!r} for the {!r} HID class request ({}).'
.format(hid_dev_type, tested_request_name, exc))
else:
self.notify_success()
def raw_loopback(self, report_size):
"""Send every input report back to the device."""
mbed_hid_path = None
mbed_hid = hid.device()
try:
mbed_hid_path = retry_fun_call(
fun=functools.partial(self.get_usb_hid_path, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
retry_fun_call(
fun=functools.partial(mbed_hid.open_path, mbed_hid_path), # pylint: disable=not-callable
num_retries=10,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
# Notify the device it can send reports now.
self.send_kv(MSG_KEY_HOST_READY, MSG_VALUE_DUMMY)
try:
for _ in range(RAW_IO_REPS):
# There are no Report ID tags in the Report descriptor.
# Receiving only the Report Data, Report ID is omitted.
report_in = mbed_hid.read(report_size)
report_out = report_in[:]
# Set the Report ID to 0x00 (not used).
report_out.insert(0, 0x00)
mbed_hid.write(report_out)
except (ValueError, IOError) as exc:
self.notify_failure('HID Report transfer failed. {}'.format(exc))
finally:
mbed_hid.close()
def setup(self):
self.register_callback(MSG_KEY_DEVICE_READY, self.cb_device_ready)
self.register_callback(MSG_KEY_TEST_GET_DESCRIPTOR_HID, self.cb_test_get_hid_desc)
self.register_callback(MSG_KEY_TEST_GET_DESCRIPTOR_CFG, self.cb_test_get_cfg_desc)
self.register_callback(MSG_KEY_TEST_REQUESTS, self.cb_test_class_requests)
self.register_callback(MSG_KEY_TEST_RAW_IO, self.cb_test_raw_io)
def cb_device_ready(self, key, value, timestamp):
"""Send a unique USB SN to the device.
DUT uses this SN every time it connects to host as a USB device.
"""
self.send_kv(MSG_KEY_SERIAL_NUMBER, self.dut_usb_dev_sn)
def start_bg_task(self, **thread_kwargs):
"""Start a new daemon thread.
Some callbacks delegate HID dev handling to a background task to
prevent any delays in the device side assert handling. Only one
background task is kept running to prevent multiple access
to the HID device.
"""
try:
self.__bg_task.join()
except (AttributeError, RuntimeError):
pass
self.__bg_task = threading.Thread(**thread_kwargs)
self.__bg_task.daemon = True
self.__bg_task.start()
def cb_test_raw_io(self, key, value, timestamp):
"""Receive HID reports and send them back to the device."""
if not CYTHON_HIDAPI_PRESENT:
self.send_kv(MSG_KEY_HOST_READY, MSG_VALUE_NOT_SUPPORTED)
return
try:
# The size of input and output reports used in test.
report_size = int(value)
except ValueError as exc:
self.notify_error(exc)
return
self.start_bg_task(
target=self.raw_loopback,
args=(report_size, ))
| |
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# V5.
# save ovs dataparping -f -I eth1 -s 10.10.5.2 10.10.5.3 -bath object
# control arp and icmp packet
# need to mannual start arp from host: szb53@h2:~$
# To do: Automatically arp will be in version v7
# Automatcially install flow along the shortest path
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, \
MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet, ipv4, icmp, arp
from ryu.ofproto import inet
from ryu.controller import dpset
from ryu.lib.packet.lldp import LLDP_MAC_NEAREST_BRIDGE
# from ryu.lib.packet.ether_types import ETH_TYPE_LLDP
import array
from ryu.lib import hub
from operator import attrgetter
import json
import shutil
import os
import subprocess
import time
import networkx as nx
from ryu.topology import event, switches
from ryu.topology.api import get_switch, get_link
import pickle
# output ovs switch hostname and DPID pairs
OFP_SWITCHES_LIST = \
'./network-data/ofp_switches_list.db'
OFP_SWITCHES_LIST_PREVIOUS = \
'./network-data/ofp_switches_list_prev.db'
OFP_SWITCHES_LIST_SCRIPT = \
'./scripts/remote_ovs_operation/get_switch_ofpbr_datapath_id.sh'
OFP_SWITCHES_FLOW_STATS = \
'./network-data/ofp_switches_{0}_flow_stats.db'
OFP_SWITCHES_FLOW_STATS_PREVIOUS = \
'./network-data/ofp_switches_{0}_flow_stats_prev.db'
OFP_SWITCHES_PORT_STATS = \
'./network-data/ofp_switches_{0}_port_stats.db'
OFP_SWITCHES_PORT_STATS_PREVIOUS = \
'./network-data/ofp_switches_{0}_port_stats_prev.db'
OFP_SINGLE_SHOREST_PATH = './network-data/ofp_single_shortest_path.db'
OFP_ALL_PAIRS_SHOREST_PATH = './network-data/ofp_all_pairs_shortest_path.db'
OFP_ALL_PATHS_SHOREST_PATH = './network-data/ofp_all_paths_shortest_path.db'
OFP_MAC_TO_PORT = './network-data/ofp_mac_to_port.db'
OFP_LINK_PORT = './network-data/ofp_link_port.db'
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'dpset': dpset.DPSet,
}
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.dpset = kwargs['dpset']
self.datapaths = {}
# create thread for traffic monitoring
self.monitor_thread = hub.spawn(self._monitor)
self.hostname_list = {}
self.net = nx.DiGraph()
self.nodes = {}
self.links = {}
self.no_of_nodes = 0
self.no_of_links = 0
self.topology_data_app = self
self.arp_request = {}
self.arp_reply = {}
# port number between two OVS
self.link_port = {}
# save OVS datapath Object for later reference
self.dpid_datapathObj = {}
# Given DPID, output hostname in string
def _hostname_Check(self, datapath):
# Given decimal datapath ID, return hostname
if os.path.exists(os.path.abspath(OFP_SWITCHES_LIST_PREVIOUS)):
f = os.path.abspath(OFP_SWITCHES_LIST_PREVIOUS)
else:
f = os.path.abspath(OFP_SWITCHES_LIST)
with open(f, 'r') as iff:
for line in iff:
hostname, dpid = line.split()
self.hostname_list[int(dpid, 16)] = hostname
# print self.hostname_list
# NEED add some datapath check later
if datapath not in self.hostname_list.keys():
return datapath
else:
return self.hostname_list[datapath]
###################################################################
# ofp_event.EventOFPSwitchFeatures
####################################################################
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
self._update_switch_dpid_list()
self.logger.info("Switch Feature reply")
msg = ev.msg
datapath = ev.msg.datapath
dpid = datapath.id
# save datapath object into dpid_datapath
self.dpid_datapathObj[dpid] = ev.msg.datapath
print self.dpid_datapathObj
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
self.logger.info(
" datapath in decimal %s,in hex %s",
datapath.id, hex(int(datapath.id)))
self.logger.info(' OFPSwitchFeatures received: '
'datapath_id=0x%016x n_buffers=%d '
'n_tables=%d auxiliary_id=%d '
'capabilities=0x%08x',
msg.datapath_id, msg.n_buffers, msg.n_tables,
msg.auxiliary_id, msg.capabilities)
# install table-miss flow entry when switch first connected
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
###################################################################
# update switch dpid every 10s
# output to ./network-data/ofp_switches_list.db
####################################################################
def _update_switch_dpid_list(self):
# update and write to ./network-data/ofp_switches_list.db
# it will be called when switch in and out
subprocess.call([OFP_SWITCHES_LIST_SCRIPT])
shutil.copyfile(OFP_SWITCHES_LIST, OFP_SWITCHES_LIST_PREVIOUS)
def _udpate_switch_port_stats(self):
# write to ./network-data/ofp-switch-port-stats.db
pass
###################################################################
# add flow
####################################################################
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
# self.logger.info("add flow to %s", self._hostname_Check(datapath.id))
# print type(datapath)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
###################################################################
# EventOFPPacketIn handler
####################################################################
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
eth = pkt.get_protocols(ethernet.ethernet)[0]
# print "1:",pkt_ethernet
# print "2:", eth
dst = eth.dst
src = eth.src
# eth_proto = eth.protocol_name
# do not forward LLCP packet in message
# if not pkt_ethernet:
# return
if dst == LLDP_MAC_NEAREST_BRIDGE:
return
dpid = hex(datapath.id)
self.mac_to_port.setdefault(dpid, {})
self.arp_reply.setdefault(dpid, {})
self.arp_request.setdefault(dpid, {})
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
# iterate all the switch datapath objects
# for item in self.dpid_datapathObj:
# print item, " ", self.dpid_datapathObj[item].id
pkt_arp = pkt.get_protocol(arp.arp)
pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
pkt_icmp = pkt.get_protocol(icmp.icmp)
# print "mac_to_port\n %s" % self.mac_to_port
if pkt_arp:
self._handle_arp(datapath, in_port, pkt_ethernet, pkt_arp)
self._send_arp_packet(datapath, in_port, out_port, eth, msg)
if pkt_icmp and pkt_icmp.type == icmp.ICMP_ECHO_REQUEST:
print "ICMP From %d src_mac %s dst_mac %s" % (datapath.id, src, dst)
shortest_path_list = (
self._handle_icmp(datapath, in_port, pkt_ethernet, pkt_ipv4, pkt_icmp))
print shortest_path_list
count = 0
origin_in_port = in_port
if len(shortest_path_list) == 1:
next_node = shortest_path_list[0]
next_datapath = self.dpid_datapathObj[next_node]
out_port = self.mac_to_port[hex(next_node)][dst]
actions = [parser.OFPActionOutput(out_port)]
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(next_datapath, 1, match, actions, msg.buffer_id)
else:
for node in shortest_path_list:
next_datapath = self.dpid_datapathObj[node]
next_node = shortest_path_list[count+1]
print "---%s %s" % (node, next_datapath)
out_port = self.link_port[node][next_node]
actions = [parser.OFPActionOutput(out_port)]
# print out_port
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(next_datapath, 1, match, actions, msg.buffer_id)
reserse_match = parser.OFPMatch(in_port=out_port, eth_dst=src)
reserse_action = [parser.OFPActionOutput(in_port)]
self.add_flow(next_datapath, 1, reserse_match, reserse_action, msg.buffer_id)
print "in_port %s from dpid %s out_port %s To dpid %s" % (
in_port, self._hostname_Check(node), out_port, self._hostname_Check(next_node))
count += 1
in_port = self.link_port[next_node][node]
if count == len(shortest_path_list)-1:
next_datapath = self.dpid_datapathObj[next_node]
out_port = self.mac_to_port[hex(node)][dst]
actions = [parser.OFPActionOutput(out_port)]
print "last stop %s" % out_port
in_port = self.link_port[next_node][node]
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
print "in_port %s from dpid %s out_port %s To node %s" % (
in_port, self._hostname_Check(next_node), out_port, dst)
self.add_flow(next_datapath, 1, match, actions, msg.buffer_id)
reserse_match = parser.OFPMatch(in_port=out_port, eth_dst=src)
reserse_action = [parser.OFPActionOutput(in_port)]
self.add_flow(next_datapath, 1, reserse_match, reserse_action, msg.buffer_id)
out = parser.OFPPacketOut(datapath=next_datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
break
out = parser.OFPPacketOut(datapath=next_datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
# return
# actions = [parser.OFPActionOutput(out_port)]
# # find out output port and install a flow to avoid packet_in next time
# if out_port != ofproto.OFPP_FLOOD:
# match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# # verify if we have a valid buffer_id, if yes avoid to send both
# # flow_mod & packet_out
# if msg.buffer_id != ofproto.OFP_NO_BUFFER:
# # self.add_flow(datapath, 1, match, actions, msg.buffer_id)
# return
# else:
# # self.add_flow(datapath, 1, match, actions)
# pass
# data = None
# if msg.buffer_id == ofproto.OFP_NO_BUFFER:
# data = msg.data
# out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
# in_port=in_port, actions=actions, data=data)
# datapath.send_msg(out)
def _send_arp_packet(self, datapath, in_port, out_port, eth, msg):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
def _send_packet(self, datapath, in_port, out_port, eth, msg):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
actions = [parser.OFPActionOutput(out_port)]
dst = eth.dst
# find out output port and install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
# self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
# self.add_flow(datapath, 1, match, actions)
pass
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
###################################################################
# various packet handler
####################################################################
def _handle_arp(self, datapath, port, pkt_ethernet, pkt_arp):
dpid = hex(datapath.id)
if pkt_arp.opcode == arp.ARP_REQUEST:
self.arp_request[dpid]["src_mac"] = pkt_arp.src_mac
self.arp_request[dpid]["dst_mac"] = pkt_arp.dst_mac
self.arp_request[dpid]["src_ip"] = pkt_arp.src_ip
self.arp_request[dpid]["dst_ip"] = pkt_arp.dst_ip
# print "arp_request: %s" % (self.arp_request)
elif pkt_arp.opcode == arp.ARP_REPLY:
self.arp_reply[dpid]["src_mac"] = pkt_arp.src_mac
self.arp_reply[dpid]["dst_mac"] = pkt_arp.dst_mac
self.arp_reply[dpid]["src_ip"] = pkt_arp.src_ip
self.arp_reply[dpid]["dst_ip"] = pkt_arp.dst_ip
print "arp_reply: ", self.arp_reply
def _handle_icmp(self, datapath, in_port, pkt_ethernet, pkt_ipv4, pkt_icmp):
src_dpid = hex(datapath.id)
if pkt_icmp.type == icmp.ICMP_ECHO_REQUEST:
src_mac = pkt_ethernet.src
dst_mac = pkt_ethernet.dst
src_ip = pkt_ipv4.src
dst_ip = pkt_ipv4.dst
print "\n%s %s %s %s %s " % (src_dpid, src_mac, dst_mac, src_ip, dst_ip)
# print self.link_port
# get the DPID which connected to dst_mac
dst_dpid = self._return_destionation_dpid(self.arp_reply, src_mac, dst_mac)
print "src_dpid %s dst_dpid %s" % (src_dpid, dst_dpid)
# find a shortest path for this icmp request
shortest_path_list = self._single_shortest_path(int(src_dpid,16), int(dst_dpid,16))
# print shortest_path_list
return shortest_path_list
# add flows in each switch in short_path_list
# self._add_flows(datapath, shortest_path_list, in_port)
# def _add_flows(self, datapath, shortest_path_list, in_port):
# actions = [parser.OFPActionOutput(out_port)]
# match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# self.add_flow(datapath, 1, match, actions, msg.buffer_id)
# pass
def _return_destionation_dpid(self, arp_reply, src_mac, dst_mac):
# return dst dpid based on arp_reply, src_mac, dst_mac
for arp_entry in self.arp_reply:
if self.arp_reply[arp_entry]:
if (src_mac == self.arp_reply[arp_entry]['dst_mac']
and dst_mac == self.arp_reply[arp_entry]['src_mac']):
print "found match"
return arp_entry
else:
print "No Arp Match"
###################################################################
# output shortest path for all pairs for all switches (nodes) in every 10s
####################################################################
def _single_shortest_path(self, src_dpid, dst_dpid):
# return a shortestpath
try:
shortest_path = nx.shortest_path(self.net, src_dpid, dst_dpid)
except Exception as e:
self.logger.info("_single_shortest_path %s", e)
finally:
return [i for i in shortest_path]
def _all_single_shortest_path(self):
# print "Printing shortest Path..."
# print nx.shortest_path(self.net)
# print "_single_shortest_path " # ,self.net.nodes(), self.net.edges()
with open(OFP_SINGLE_SHOREST_PATH, 'w') as outp:
for src in self.net.nodes():
for dst in self.net.nodes():
if src != dst:
try:
shortest_path = nx.shortest_path(self.net, src, dst)
except Exception as e:
self.logger.info("_single_shortest_path %s", e)
finally:
outp.write("%s -> %s %s" % (self._hostname_Check(src),
self._hostname_Check(dst),
[self._hostname_Check(i) for i in shortest_path]))
outp.write("\n")
# print self._hostname_Check(src), " -> ",\
# self._hostname_Check(dst), " ",\
# [self._hostname_Check(i) for i in shortest_path]
def _all_paris_shortest_path(self):
# print one shortest path for all node pairs
# print "_all_paris_shortest_path ", self.net
with open(OFP_ALL_PAIRS_SHOREST_PATH, 'w') as outp:
try:
shortest_path = nx.all_pairs_dijkstra_path(self.net)
except Exception as e:
self.logger.info("_all_paris_shortest_path %s", e)
finally:
for src in shortest_path.keys():
for dst in shortest_path[src]:
outp.write("%s -> %s %s\n" % (self._hostname_Check(src),
self._hostname_Check(dst),
[self._hostname_Check(i) for i in shortest_path[src][dst]]))
# print self._hostname_Check(src), " -> ", self._hostname_Check(dst),\
# " ", [self._hostname_Check(i)
# for i in shortest_path[src][dst]]
def _all_paths_shortest_path(self):
# print all the shortest paths for each node pair
# print "_all_paths_shortest_path ", self.net
with open(OFP_ALL_PATHS_SHOREST_PATH, 'w') as outp:
for src in self.net.nodes():
for dst in self.net.nodes():
if src != dst:
try:
shortest_path = nx.all_shortest_paths(self.net, src, dst)
except Exception as e:
self.logger.info("_all_path_shortest_path %s", e)
finally:
for each_path_list in shortest_path:
outp.write("%s -> %s %s" % (self._hostname_Check(src),
self._hostname_Check(dst),
[self._hostname_Check(i) for i in each_path_list]))
outp.write("\n")
###################################################################
# write mac_to_port in every 10s
####################################################################
def _mac_to_port(self):
# print "_mac_to_port updating"
with open(OFP_MAC_TO_PORT, 'w') as outp:
# outp.write("hello")
for dpid in self.mac_to_port.keys():
for src in self.mac_to_port[dpid]:
outp.write("dpid=%s src_mac=%s out_port=%s\n" %
(dpid, src, self.mac_to_port[dpid][src]))
###################################################################
# Refresh Network nodes and links every 10s
####################################################################
@set_ev_cls(event.EventSwitchEnter)
def get_topology_data(self, ev):
# self.logger.info("get_topology_data()")
switch_list = get_switch(self.topology_data_app, None)
switches = [switch.dp.id for switch in switch_list]
# print "switches: ", switches
self.net.add_nodes_from(switches)
# print "net nodes: ", self.net.nodes()
for node in self.net.nodes():
self.link_port.setdefault(node, {})
with open(OFP_LINK_PORT, 'wr') as outp:
# src_dpid dst_dpid src_dpid_output_port dst_dpid_input_port
links_list = get_link(self.topology_data_app, None)
# print links_list
# add link from one direction
links = [(link.src.dpid, link.dst.dpid,
{'out_port': link.src.port_no}) for link in links_list]
# print links
self.net.add_edges_from(links)
for link in links:
# outp.write("%s %s %s\n" % (self._hostname_Check(link[0]),
# self._hostname_Check(link[1]), link[2]['out_port']))
outp.write("%s %s %s\n" % (link[0], link[1], link[2]['out_port']))
self.link_port[link[0]][link[1]] = link[2]['out_port']
# add links from oppsite direction
links = [(link.dst.dpid, link.src.dpid,
{'out_port': link.dst.port_no}) for link in links_list]
# print "reverse:", links
self.net.add_edges_from(links)
for link in links:
# outp.write("%s %s %s\n" % (self._hostname_Check(link[0]),
# self._hostname_Check(link[1]), link[2]['out_port']))
outp.write("%s %s %s\n" % (link[0], link[1], link[2]['out_port']))
self.link_port[link[0]][link[1]] = link[2]['out_port']
####################################################################
# Traffc monitor section
####################################################################
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if datapath.id not in self.datapaths:
self.logger.info('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.info('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def _monitor(self):
# wait fof around 10s until all the swtiches connected to controller
self._update_switch_dpid_list()
hub.sleep(10)
while True:
for dp in self.datapaths.values():
self._mac_to_port()
self._request_stats(dp)
self._update_switch_dpid_list()
self._all_single_shortest_path()
self._all_paris_shortest_path()
self._all_paths_shortest_path()
hub.sleep(10)
# send flow and port stats request
def _request_stats(self, datapath):
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# flow stats request C->S
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
# port status request C->S
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
body = ev.msg.body
# print "flow body:", body[1]
switch_name = self._hostname_Check(ev.msg.datapath.id)
with open(OFP_SWITCHES_FLOW_STATS.format(switch_name), 'w') as iff:
self.logger.debug("\n> Flow Stats:")
self.logger.debug('datapath '
'hostname '
'in-port duration_sec duration_nsec '
' eth-dst out-port packets bytes')
iff.write('datapath '
'hostname '
'in-port duration_sec duration_nsec '
' eth-dst out-port packets bytes\n')
self.logger.debug('---------------- '
'---------------- '
'-------- ---------------- -------------- '
'---------------- -------- -------- --------')
iff.write('---------------- '
'---------------- '
'-------- ---------------- -------------- '
'---------------- -------- -------- --------\n')
for stat in sorted([flow for flow in body if flow.priority == 1],
key=lambda flow: (flow.match['in_port'],
flow.match['eth_dst'])):
iff.write('%16d %16s %8x %16d %16d %17s %8x %8d %8d' %
(ev.msg.datapath.id,
self._hostname_Check(ev.msg.datapath.id),
stat.match['in_port'], stat.duration_sec,
stat.duration_nsec, stat.match['eth_dst'],
stat.instructions[0].actions[0].port,
stat.packet_count, stat.byte_count))
iff.write("\n")
self.logger.debug('%16d %16s %8x %16d %16d %17s %8x %8d %8d',
ev.msg.datapath.id,
self._hostname_Check(ev.msg.datapath.id),
stat.match['in_port'], stat.duration_sec,
stat.duration_nsec, stat.match['eth_dst'],
stat.instructions[0].actions[0].port,
stat.packet_count, stat.byte_count)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
body = ev.msg.body
self.get_topology_data(ev)
# print "port body:", body[1]
switch_name = self._hostname_Check(ev.msg.datapath.id)
with open(OFP_SWITCHES_PORT_STATS.format(switch_name), 'w') as iff:
self.logger.debug("\n> Port Stats:")
self.logger.debug('datapath '
'hostname '
'port duration_sec duration_nsec '
'rx-pkts rx-bytes rx-error '
'tx-pkts tx-bytes tx-error')
iff.write('datapath '
'hostname '
'port duration_sec duration_nsec '
'rx-pkts rx-bytes rx-error '
'tx-pkts tx-bytes tx-error\n')
self.logger.debug('---------------- '
'-------------- '
'-------- ---------------- -------------- '
'-------- -------- -------- '
'-------- -------- --------')
iff.write('---------------- '
'-------------- '
'-------- ------------ -------------- '
'-------- -------- -------- '
'-------- -------- --------\n')
for stat in sorted(body, key=attrgetter('port_no')):
self.logger.debug('%016x %8s %8x %16d %16d %8d %8d %8d %8d %8d %8d',
ev.msg.datapath.id,
self._hostname_Check(ev.msg.datapath.id),
stat.port_no, stat.duration_sec, stat.duration_nsec,
stat.rx_packets, stat.rx_bytes,
stat.rx_errors, stat.tx_packets,
stat.tx_bytes, stat.tx_errors)
iff.write('%016x %8s %8x %16d %16d %8d %8d %8d %8d %8d %8d' %
(ev.msg.datapath.id,
self._hostname_Check(ev.msg.datapath.id),
stat.port_no, stat.duration_sec, stat.duration_nsec,
stat.rx_packets, stat.rx_bytes, stat.rx_errors,
stat.tx_packets, stat.tx_bytes, stat.tx_errors))
iff.write("\n")
# The switch notifies controller of change of ports.
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
self.logger.info("port added %s", port_no)
elif reason == ofproto.OFPPR_DELETE:
self.logger.info("port deleted %s", port_no)
elif reason == ofproto.OFPPR_MODIFY:
self.logger.info("port modified %s", port_no)
else:
self.logger.info("Illeagal port state %s %s", port_no, reason)
# This will turn on Web restAPI
app_manager.require_app('ryu.app.rest_topology')
app_manager.require_app('ryu.app.ws_topology')
app_manager.require_app('ryu.app.ofctl_rest')
# app_manager.require_app('my_traffic_monitor')
app_manager.require_app('ryu.app.gui_topology.gui_topology')
# print "Project Path", PATH
| |
import sys
import os
#==========================Read Learner's Infomation=============
learnerEmail = raw_input('Login (Email address): ')
learnerSecret = raw_input('One-time Password (from the assignment page. This is NOT your own account\'s password): ')
#==========================Assignment Language Modeling=========
assignmentKey = '6PCisAHtEeaWaBL9aqpmIw'
partId1 = 'EKpDb'
partId2 = 'uniSi'
partIdx = raw_input('Please enter which parts you want to submit: \n1: Language Modeling\n2: Part of Speech Tagging\nFor example, type "1 2" will submit part 1 and part 2\n')
#=========================Evaluation====================
__author__ = 'linkuo'
import os
import subprocess
import shutil
import datetime
import time
class student:
def __init__(self, p, uni='uni', pin=''):
self.uni=uni
self.pin=pin
self.accuracy=[float(0)]*11
self.rawgrade=[]
self.leverage=[5,10,5,5,10,5,5,20,5]
self.grade=float(0)
self.address='/home/'+uni+'/hidden/'+pin+'/Homework1/'
self.lateday=0
self.partIdx = p
def setaccuracy(self,i,x):
self.accuracy[i]=x
def accuracy2grade(self):
for question in self.accuracy[:9]:
if question>=0.95:
self.rawgrade.append(float(1))
elif question>=0.85:
self.rawgrade.append(float(0.9))
elif question>=0.65:
self.rawgrade.append(float(0.8))
elif question>=0.35:
self.rawgrade.append(float(0.5))
elif question>=0.30:
self.rawgrade.append(float(0.3))
else:
self.rawgrade.append(0.0)
if self.partIdx == 1:
for item in zip(self.rawgrade[:4],self.leverage[:4]):
self.grade=self.grade+item[0]*item[1]
if self.accuracy[9] == 1:
self.grade += 15
else:
for item in zip(self.rawgrade[4:],self.leverage[4:]):
self.grade=self.grade+item[0]*item[1]
if self.accuracy[10] == 1:
self.grade += 15
'''
def get_lateday(self,duedate):
try:
lastm=(datetime.date.fromtimestamp(os.path.getmtime(self.address+'solutionsA.py'))-duedate).days
except:
lastm=0
try:
if lastm<(datetime.date.fromtimestamp(os.path.getmtime(self.address+'solutionsB.py'))-duedate).days:
lastm=(datetime.date.fromtimestamp(os.path.getmtime(self.address+'solutionsB.py'))-duedate).days
except:
lastm=0
if lastm<0:
lastm=0
self.lateday=lastm
'''
def get_runningtime(self,starttime,endtime,i):
finaltime=(endtime-starttime)/60
if i==1:
if finaltime<=5:
self.setaccuracy(9,float(1))
if i==2:
if finaltime<=25:
self.setaccuracy(10,float(1))
#Grader Class
#When initialize, will take inputs of gold standard files, and convert them into the format they should be.
#for transition and emission probabilities, the gold standard will be stored in dictionary where the ngrams or word/tags as the key, and the log-probability as the value
#for probability of sentences, the probabilities will be stored as a list of number in the order of sentences
#convertdict() and convertnum() convert the line of files to dictionary and number lists
#gradenum(), gradedic() and gradepos() functions will take the students' output files as input. And calculate the percentage of similarity of the student's file and the gold standard.
#setaccuracy() function will take the index of questions and the percentage of similarity as input and set the students' accuracy attribute.
#grade() function calls gradenum(), gradedic() and gradepos() functions and setaccuracy(), to set all accuracies for the current student
class grader:
gradefiles=['A1.txt','A2.uni.txt','A2.bi.txt','A2.tri.txt','A3.txt','Sample1_scored.txt','Sample2_scored.txt','B2.txt','B3.txt','B4.txt','B5.txt','B6.txt']
Gold=['A1_GS.txt','A2_GS.uni.txt','A2_GS.bi.txt','A2_GS.tri.txt','A3_GS.txt','Sample1_GS_scored.txt','Sample2_GS_scored.txt','B2_GS.txt','B3_GS.txt','B4_GS.txt','Brown_tagged_dev.txt','Brown_tagged_dev.txt']
goldstandard=[]
gradefiles = [ 'output/'+f for f in gradefiles]
Gold =['data/GS/'+f for f in Gold]
def __init__(self):
for item in self.Gold:
file=open(item,'r')
self.goldstandard.append(file.readlines())
file.close()
self.goldstandard[0]=self.convertdict(self.goldstandard[0])
self.goldstandard[7]=self.convertdict(self.goldstandard[7])
self.goldstandard[9]=self.convertdict(self.goldstandard[9])
self.goldstandard[1]=self.convertnum(self.goldstandard[1])
self.goldstandard[2]=self.convertnum(self.goldstandard[2])
self.goldstandard[3]=self.convertnum(self.goldstandard[3])
self.goldstandard[4]=self.convertnum(self.goldstandard[4])
self.goldstandard[5]=self.convertnum(self.goldstandard[5])
self.goldstandard[6]=self.convertnum(self.goldstandard[6])
def convertdict(self,file):
dict={}
for line in file:
try:
dict[line.rsplit(' ',1)[0]]=float(line.strip().rsplit(' ',1)[1])
except:
continue
return dict
def convertnum(self,file):
list=[]
for line in file:
try:
list.append(float(line.strip()))
except:
list.append(float(0))
return list
def grade(self,currentstudent):
#print 'grading',currentstudent.uni
try:
currentstudent.setaccuracy(0,self.gradedict(0))
except:
print 'error on ',currentstudent.uni,0
try:
currentstudent.setaccuracy(4,self.gradedict(7))
except:
print 'error on ',currentstudent.uni,4
try:
currentstudent.setaccuracy(6,self.gradedict(9))
except:
print 'error on ',currentstudent.uni,6
try:
currentstudent.setaccuracy(2,self.gradenum(4))
except:
print 'error on ',currentstudent.uni,2
try:
currentstudent.setaccuracy(5,self.gradepos(8))
except:
print 'error on ',currentstudent.uni,5
score=float(0)
try:
score=float(self.gradepos(10))-float(0.933249946254)
if score >=float(0):
score=1
else:
score=abs(self.gradepos(10))/float(0.933249946254)
except:
print 'error on ',currentstudent.uni,7
currentstudent.setaccuracy(7,score)
score=float(0)
try:
score=float(self.gradepos(11))-float(0.879985146677)
if score >=float(0):
score=1
else:
score=abs(self.gradepos(11))/float(0.879985146677)
except:
print 'error on ',currentstudent.uni,8
currentstudent.setaccuracy(8,score)
score=float(0)
try:
score=(self.gradenum(1)+self.gradenum(2)+self.gradenum(3))/3
except:
print 'error on ',currentstudent.uni,1
currentstudent.setaccuracy(1,score)
score=float(0)
try:
score=(self.gradenum(5)+self.gradenum(6))/2
except:
print 'error on ',currentstudent.uni,3
currentstudent.setaccuracy(3,score)
def gradedict(self,i):
score=float(0)
wrong=float(0)
sum=float(0)
try:
files=open(self.gradefiles[i],'r')
lines=files.readlines()
files.close()
lines=self.convertdict(lines)
for item in self.goldstandard[i]:
try:
if self.goldstandard[i][item]!=0:
wrong+= min(abs(float(lines[item]-self.goldstandard[i][item])/float(self.goldstandard[i][item])),1)
sum+= 1
else:
wrong+= min(abs(float(lines[item]-self.goldstandard[i][item])),1)
sum+=1
except:
wrong+=1
sum+= 1
try:
score=float(sum-wrong)/float(sum)
return score
except:
print "error on",i
except IOError:
return score
def gradenum(self,i):
score=float(0)
wrong=float(0)
sum=float(0)
try:
files=open(self.gradefiles[i],'r')
lines=files.readlines()
files.close()
lines=self.convertnum(lines)
for j in range(0,len(self.goldstandard[i])):
try:
if self.goldstandard[i][j]!=0:
wrong+= min(abs(float(lines[j]-self.goldstandard[i][j])/float(self.goldstandard[i][j])),1)
sum+= 1
else:
wrong+= min(abs(float(lines[j]-self.goldstandard[i][j])),1)
sum+=1
except:
wrong+=1
sum+= 1
try:
score=float(sum-wrong)/float(sum)
return score
except:
print "error on",i
except IOError:
return score
def gradepos(self,i):
score=float(0)
try:
files=open(self.gradefiles[i],'r')
lines=files.readlines()
files.close()
num_correct = 0
total = 1
for user_sent, correct_sent in zip(lines, self.goldstandard[i]):
user_tok = user_sent.split()
correct_tok = correct_sent.split()
if len(user_tok) != len(correct_tok):
continue
for u, c in zip(user_tok, correct_tok):
if u == c:
num_correct += 1
total += 1
score = float(num_correct) / total
return score
except IOError:
return score
gradernow=grader()
def evaluate_part(partIdx):
currentstudent= student(partIdx)
#running the student's scripts and get the running time
if partIdx == 1:
strattime=time.time()
try:
subprocess.check_call(" python solutionsA.py",shell=True)
except:
print 'solutionsA failed',currentstudent.uni
endtime=time.time()
currentstudent.get_runningtime(strattime,endtime,1)
else:
strattime=time.time()
try:
subprocess.check_call(" python solutionsB.py",shell=True)
except:
print 'solutionsB failed',currentstudent.uni
endtime=time.time()
currentstudent.get_runningtime(strattime,endtime,2)
#grading current student, this process will generate a list of accuracies for each question
gradernow.grade(currentstudent)
#transfer the accuracies to the final grade
currentstudent.accuracy2grade()
print 'Your accuracy',
if partIdx == 1:
print currentstudent.accuracy[:4]
else:
print currentstudent.accuracy[4:9]
print 'Your grade', currentstudent.grade
#print currentstudent.accuracy
if partIdx == 1:
return currentstudent.grade / 40.0
else:
return currentstudent.grade / 60.0
output1 = '0.0'
output2 = '0.0'
if '1' in partIdx:
print 'Evaluating Part A...'
output1 = str(evaluate_part(1))
if '2' in partIdx:
print 'Evaluating Part B...'
output2 = str(evaluate_part(2))
#======================Submit Score========================
cmd = 'curl -X POST -H "Cache-Control: no-cache" -H "Content-Type: application/json" -d '
url = 'https://www.coursera.org/api/onDemandProgrammingScriptSubmissions.v1'
data = {
"assignmentKey": assignmentKey,
"submitterEmail": learnerEmail,
"secret": learnerSecret,
"parts": {
partId1: {
"output": output1
},
partId2: {
"output": output2
}
}
}
'''
if partIdx == 1:
data = {
"assignmentKey": assignmentKey,
"submitterEmail": learnerEmail,
"secret": learnerSecret,
"parts": {
partId1: {
"output": output
},
partId2: {
},
partId3: {
}
}
}
elif partIdx == 2:
data = {
"assignmentKey": assignmentKey,
"submitterEmail": learnerEmail,
"secret": learnerSecret,
"parts": {
partId1: {
},
partId2: {
"output": output
},
partId3: {
}
}
}
elif partIdx == 3:
data = {
"assignmentKey": assignmentKey,
"submitterEmail": learnerEmail,
"secret": learnerSecret,
"parts": {
partId1: {
},
partId2: {
},
partId3: {
"output": output
}
}
}
else:
print 'Invalid partID'
sys.exit()
'''
# curlcmd = cmd + "'" + str(data).replace("'",'"') + "'" + " '" + url + "'"
curlcmd = cmd + '"' + str(data).replace("'",'"').replace('"','\\"') + '"' + ' "'+ url + '"'
print curlcmd
print
os.system(curlcmd)
| |
#!/usr/bin/env python
# Copyright 2009-2015 Jasper Poppe <jgpoppe@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Jasper Poppe <jgpoppe@gmail.com>'
__copyright__ = 'Copyright (c) 2009-2015 Jasper Poppe'
__credits__ = ''
__license__ = 'Apache License, Version 2.0'
__version__ = '2.0.0rc7'
__maintainer__ = 'Jasper Poppe'
__email__ = 'jgpoppe@gmail.com'
__status__ = 'production'
import fnmatch
import logging
import os
import utils
class Manage:
"""manage netboot images, ISO, syslinux and permissions files"""
def __init__(self, cfg):
"""initialize class variables"""
self.cfg = cfg
self.temp = os.path.join(self.cfg['paths']['temp'], 'seedbank')
def copy_dir_contents(self, src, dst):
"""find and copy all files from src to dst"""
utils.make_dirs(dst)
files = [os.path.join(root, file_name) for root, _, files in
os.walk(src) if files for file_name in files]
for src in files:
utils.file_copy(src, dst)
def _download(self, src, dst_path):
"""download a file"""
src_file = os.path.basename(src)
dst = os.path.join(dst_path, src_file)
if os.path.isfile(dst):
logging.info('"%s" already exists, download skipped', dst)
else:
utils.make_dirs(dst_path)
utils.download(src, dst)
def _extract(self, prefix, files, src, dst, target):
"""extract files to the seedbank temp directory and move those"""
archive = os.path.join(dst, os.path.basename(src))
files = (os.path.join(prefix, file_name) for file_name in files)
temp_manage = os.path.join(self.temp, 'manage')
if os.path.isdir(temp_manage):
utils.rmtree(temp_manage)
utils.make_dirs(temp_manage)
utils.untar_files(archive, files, temp_manage)
self.copy_dir_contents(temp_manage, target)
utils.rmtree(temp_manage)
def _extract_debs(self, directory):
"""extract files from all debian packages in a directory"""
pattern = self.cfg['debian']['firmware_filter']
os.chdir(directory)
for file_name in os.listdir(directory):
if fnmatch.fnmatch(file_name, pattern):
result = utils.call(['dpkg', '-x', file_name, 'temp'])
if result:
err = 'failed to extract package "%s"' % file_name
raise utils.FatalException(err)
logging.info('extracted "%s"', file_name)
def _pxe_default(self):
"""manage the pxelinux.cfg default file"""
src = os.path.join(self.cfg['paths']['templates'], 'pxe_default')
directory = os.path.join(self.cfg['paths']['tftpboot'], 'pxelinux.cfg')
dst = os.path.join(directory, 'default')
if os.path.isfile(dst):
return
logging.info('created default pxelinux.cfg file "%s"', dst)
utils.make_dirs(directory)
utils.file_copy(src, dst)
def _debian_firmware(self, name):
"""integrate Debian non free firmware"""
temp_initrd = os.path.join(self.temp, 'initrd')
initrd = os.path.join(self.cfg['paths']['tftpboot'], 'seedbank', name,
'initrd.gz')
utils.make_dirs(temp_initrd)
utils.initrd_extract(temp_initrd, initrd)
dst = os.path.join(self.temp, 'initrd/lib/firmware')
self._add_firmware(name, dst)
utils.initrd_create(temp_initrd, initrd)
utils.rmtree(temp_initrd)
def _add_firmware(self, name, dst):
"""download, extract and copy Debian non free firmware"""
distribution, release, _ = name.split('-')
path = '-'.join(('firmware', distribution, release))
archive_dst = os.path.join(self.cfg['paths']['archives'], path)
temp_firmware = os.path.join(self.temp, 'firmware')
archive = os.path.join(archive_dst, 'firmware.tar.gz')
url = self.cfg[distribution]['url_firmware'].replace('${release}',
release)
self._download(url, archive_dst)
utils.untar(archive, temp_firmware)
self._extract_debs(temp_firmware)
src = os.path.join(temp_firmware, 'temp', 'lib/firmware')
utils.file_move(src, dst)
utils.rmtree(temp_firmware)
def syslinux(self):
"""download syslinux and extract required files"""
dst = os.path.join(self.cfg['paths']['archives'], 'syslinux')
files = ('core/pxelinux.0', 'com32/menu/menu.c32',
'com32/menu/vesamenu.c32')
prefix = os.path.basename(self.cfg['urls']['syslinux'])
prefix = prefix.rstrip('.tar.gz')
self._download(self.cfg['urls']['syslinux'], dst)
self._extract(prefix, files, self.cfg['urls']['syslinux'], dst,
self.cfg['paths']['tftpboot'])
self._pxe_default()
def iso_debian(self, name):
"""download a Debian ISO"""
distribution, release, architecture, flavour = name.split('-')
if flavour == 'mini':
url = self.cfg[distribution]['url_main']
url = os.path.join(url, 'debian/dists', release, 'main/installer-' +
architecture, 'current/images/netboot/mini.iso')
else:
if release == self.cfg[distribution]['iso_current']:
release = 'current'
url = self.cfg[distribution]['url_iso']
url = os.path.join(url, release, architecture, 'iso-cd')
data = utils.scrape_tag(url, 'a')
isos = [link for link in data if link.endswith('.iso')]
iso_split = isos[0].split('-')
version = iso_split[1]
iso_file = '-'.join(('debian', version, architecture, flavour))
iso_file = iso_file + '.iso'
url = os.path.join(url, iso_file)
return url
def iso_ubuntu(self, name):
"""download an Ubuntu ISO, the mini ISO comes from a different source
then the others, it will also detect and choose the current beta or
alpha release when there is no stable release"""
distribution, release, architecture, flavour = name.split('-')
if flavour == 'mini':
url = self.cfg[distribution]['url_main']
url = os.path.join(url, 'ubuntu/dists', release, 'main/installer-' +
architecture, 'current/images/netboot/mini.iso')
else:
url = self.cfg[distribution]['url_iso']
url = os.path.join(url, release)
data = utils.scrape_tag(url, 'a')
isos = [link for link in data if link.endswith('.iso')]
iso_split = isos[0].split('-')
version = iso_split[1]
if len(iso_split) == 4:
iso_file = '-'.join((distribution, version, flavour,
architecture))
elif len(iso_split) == 5:
state = iso_split[2]
iso_file = '-'.join((distribution, version, state, flavour,
architecture))
logging.warning('no stable release found for "%s"', name)
iso_file = iso_file + '.iso'
url = os.path.join(url, iso_file)
return url
def iso(self, name):
"""download ISOs"""
dst = os.path.join(self.cfg['paths']['isos'], name + '.iso')
if os.path.isfile(dst):
logging.info('nothing to do, "%s" already exists', dst)
return
distribution = name.split('-', 1)[0]
if distribution == 'ubuntu':
url = self.iso_ubuntu(name)
elif distribution == 'debian':
url = self.iso_debian(name)
utils.make_dirs(self.cfg['paths']['isos'])
utils.download(url, dst)
def netboot(self, name):
"""download, extract and patch netboot images"""
distribution, release, architecture = name.split('-')
src = '%s/%s/dists/%s/main/installer-%s/current/images/netboot/'\
'netboot.tar.gz' % (self.cfg[distribution]['url_main'], distribution,
release, architecture)
dst = os.path.join(self.cfg['paths']['archives'], name)
prefix = os.path.join('./%s-installer' % distribution, architecture)
files = ('initrd.gz', 'linux')
target = os.path.join(self.cfg['paths']['tftpboot'], 'seedbank', name)
self._download(src, dst)
self._extract(prefix, files, src, dst, target)
if 'firmwares' in self.cfg[distribution]:
if release in self.cfg[distribution]['firmwares']:
self._debian_firmware(name)
def _remove_netboot(self, name):
"""remove a netboot image and if defined the related firmware files"""
path = os.path.join(self.cfg['paths']['tftpboot'], 'seedbank', name)
if not utils.rmtree(path):
logging.info('release "%s" has not been installed', name)
else:
utils.rmtree(os.path.join(self.cfg['paths']['archives'], name))
release = name.split('-')[1]
firmware = os.path.join(self.cfg['paths']['archives'],
'firmware-' + release)
if not utils.rmtree(firmware):
logging.info('firmware "%s" not found, nothing to do', firmware)
def _remove_iso(self, name):
"""remove an installation ISO"""
file_name = os.path.join(self.cfg['paths']['isos'], name + '.iso')
if not utils.file_delete(file_name):
logging.info('release "%s" has not been installed', name)
def remove(self, name):
"""remove a release"""
if name in self.cfg['distributions']['netboots']:
self._remove_netboot(name)
elif name in self.cfg['distributions']['isos']:
self._remove_iso(name)
else:
logging.error('release "%s" has not been defined in settings', name)
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hickle as hkl
import numpy as np
np.random.seed(2 ** 10)
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers.core import Activation
from keras.utils.vis_utils import plot_model
from keras.layers.wrappers import TimeDistributed
from keras.layers import Layer
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import Conv2DTranspose
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import Conv3DTranspose
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.merge import multiply
from keras.layers.merge import concatenate
from keras.layers.core import Permute
from keras.layers.core import RepeatVector
from keras.layers.core import Dense
from keras.layers.core import Lambda
from keras.layers.core import Reshape
from keras.layers.core import Flatten
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.callbacks import LearningRateScheduler
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input
from keras.models import Model
from keras import metrics
from config_aa import *
import tb_callback
import lrs_callback
import argparse
import math
import os
import cv2
from sys import stdout
def encoder_model():
model = Sequential()
# 10x128x128
model.add(Conv3D(filters=128,
strides=(1, 4, 4),
kernel_size=(3, 11, 11),
padding='same',
input_shape=(int(VIDEO_LENGTH/2), 128, 128, 3)))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x32x32
model.add(Conv3D(filters=64,
strides=(1, 2, 2),
kernel_size=(3, 5, 5),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x16x16
model.add(Conv3D(filters=32,
strides=(1, 1, 1),
kernel_size=(3, 5, 5),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
return model
def decoder_model():
inputs = Input(shape=(10, 16, 16, 32))
# 10x64x64
conv_1 = Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 1, 1))(inputs)
x = TimeDistributed(BatchNormalization())(conv_1)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_1 = TimeDistributed(Dropout(0.5))(x)
# 10x32x32
conv_2 = Conv3DTranspose(filters=128,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2))(out_1)
x = TimeDistributed(BatchNormalization())(conv_2)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_2 = TimeDistributed(Dropout(0.5))(x)
# 10x64x64
conv_3 = Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2))(out_2)
x = TimeDistributed(BatchNormalization())(conv_3)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_3 = TimeDistributed(Dropout(0.5))(x)
# Learn alpha_1
conv3D_1 = Conv3D(filters=1,
strides=(1, 1, 1),
kernel_size=(3, 3, 3),
dilation_rate=(2, 2, 2),
padding='same')(out_3)
x = TimeDistributed(BatchNormalization())(conv3D_1)
x = TimeDistributed(Dropout(0.5))(x)
# conv3D_2 = Conv3D(filters=1,
# strides=(1, 1, 1),
# kernel_size=(3, 3, 3),
# dilation_rate=(3, 3, 3),
# padding='same')(x)
# x = TimeDistributed(BatchNormalization())(conv3D_2)
# x = TimeDistributed(Dropout(0.5))(x)
flat_1 = TimeDistributed(Flatten())(x)
dense_1 = TimeDistributed(Dense(units=64 * 64, activation='softmax'))(flat_1)
x = TimeDistributed(Dropout(0.5))(dense_1)
a = Reshape(target_shape=(10, 64, 64, 1))(x)
# Custom loss layer
class CustomLossLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomLossLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
super(CustomLossLayer, self).build(input_shape) # Be sure to call this somewhere!
def attn_loss(self, a):
attn_loss = K.sum(K.flatten(K.square(1 - K.sum(a, axis=1))), axis=-1)
return ATTN_COEFF * K.mean(attn_loss)
def call(self, inputs):
x = inputs
print (inputs.shape)
loss = self.attn_loss(x)
self.add_loss(loss, inputs=inputs)
# We do use this output.
return x
def compute_output_shape(self, input_shape):
return (input_shape[0], 10, 64, 64, 1)
x = CustomLossLayer()(a)
x = Flatten()(x)
x = RepeatVector(n=64)(x)
x = Permute((2, 1))(x)
x = Reshape(target_shape=(10, 64, 64, 64))(x)
attn_1 = multiply([out_3, x])
# 10x128x128
conv_4 = Conv3DTranspose(filters=3,
kernel_size=(3, 11, 11),
strides=(1, 2, 2),
padding='same')(attn_1)
x = TimeDistributed(BatchNormalization())(conv_4)
x = TimeDistributed(Activation('tanh'))(x)
predictions = TimeDistributed(Dropout(0.5))(x)
# x = TimeDistributed(Dropout(0.5))(x)
model = Model(inputs=inputs, outputs=predictions)
return model
def set_trainability(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def combine_images(X, y, generated_images):
# Unroll all generated video frames
n_frames = generated_images.shape[0] * generated_images.shape[1]
frames = np.zeros((n_frames,) + generated_images.shape[2:], dtype=generated_images.dtype)
frame_index = 0
for i in range(generated_images.shape[0]):
for j in range(generated_images.shape[1]):
frames[frame_index] = generated_images[i, j]
frame_index += 1
num = frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = frames.shape[1:]
image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=generated_images.dtype)
for index, img in enumerate(frames):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
n_frames = X.shape[0] * X.shape[1]
orig_frames = np.zeros((n_frames,) + X.shape[2:], dtype=X.dtype)
# Original frames
frame_index = 0
for i in range(X.shape[0]):
for j in range(X.shape[1]):
orig_frames[frame_index] = X[i, j]
frame_index += 1
num = orig_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = orig_frames.shape[1:]
orig_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=X.dtype)
for index, img in enumerate(orig_frames):
i = int(index / width)
j = index % width
orig_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
# Ground truth
truth_frames = np.zeros((n_frames,) + y.shape[2:], dtype=y.dtype)
frame_index = 0
for i in range(y.shape[0]):
for j in range(y.shape[1]):
truth_frames[frame_index] = y[i, j]
frame_index += 1
num = truth_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = truth_frames.shape[1:]
truth_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=y.dtype)
for index, img in enumerate(truth_frames):
i = int(index / width)
j = index % width
truth_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
return orig_image, truth_image, image
def load_weights(weights_file, model):
model.load_weights(weights_file)
def run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS):
if PRINT_MODEL_SUMMARY:
print (encoder.summary())
print (decoder.summary())
print (autoencoder.summary())
# exit(0)
# Save model to file
if SAVE_MODEL:
print ("Saving models to file...")
model_json = encoder.to_json()
with open(os.path.join(MODEL_DIR, "encoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = decoder.to_json()
with open(os.path.join(MODEL_DIR, "decoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = autoencoder.to_json()
with open(os.path.join(MODEL_DIR, "autoencoder.json"), "w") as json_file:
json_file.write(model_json)
if PLOT_MODEL:
plot_model(encoder, to_file=os.path.join(MODEL_DIR, 'encoder.png'), show_shapes=True)
plot_model(decoder, to_file=os.path.join(MODEL_DIR, 'decoder.png'), show_shapes=True)
plot_model(autoencoder, to_file=os.path.join(MODEL_DIR, 'autoencoder.png'), show_shapes=True)
if ENC_WEIGHTS != "None":
print ("Pre-loading encoder with weights...")
load_weights(ENC_WEIGHTS, encoder)
if DEC_WEIGHTS != "None":
print ("Pre-loading decoder with weights...")
load_weights(DEC_WEIGHTS, decoder)
def load_X(videos_list, index, data_dir):
X = np.zeros((BATCH_SIZE, VIDEO_LENGTH,) + IMG_SIZE)
for i in range(BATCH_SIZE):
for j in range(VIDEO_LENGTH):
filename = "frame_" + str(videos_list[(index*BATCH_SIZE + i), j]) + ".png"
im_file = os.path.join(data_dir, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i, j] = (frame.astype(np.float32) - 127.5) / 127.5
except AttributeError as e:
print (im_file)
print (e)
return X
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS):
print ("Loading data...")
frames_source = hkl.load(os.path.join(DATA_DIR, 'sources_train_128.hkl'))
# Build video progressions
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + 1
end_frame_index = end_frame_index + 1
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
n_videos = videos_list.shape[0]
if SHUFFLE:
# Shuffle images to aid generalization
videos_list = np.random.permutation(videos_list)
# Build the Spatio-temporal Autoencoder
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
NB_ITERATIONS = int(n_videos/BATCH_SIZE)
# Setup TensorBoard Callback
TC = tb_callback.TensorBoard(log_dir=TF_LOG_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS.set_model(autoencoder)
print ("Beginning Training...")
# Begin Training
for epoch in range(NB_EPOCHS):
print("\n\nEpoch ", epoch)
loss = []
# Set learning rate every epoch
LRS.on_epoch_begin(epoch=epoch)
lr = K.get_value(autoencoder.optimizer.lr)
print ("Learning rate: " + str(lr))
for index in range(NB_ITERATIONS):
# Train Autoencoder
X = load_X(videos_list, index, DATA_DIR)
X_train = X[:, 0 : int(VIDEO_LENGTH/2)]
y_train = X[:, int(VIDEO_LENGTH/2) :]
loss.append(autoencoder.train_on_batch(X_train, y_train))
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS-1) + " " +
"loss: " + str(loss[len(loss)-1]) +
"\t [" + "{0}>".format("="*(arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
predicted_images = autoencoder.predict(X_train, verbose=0)
orig_image, truth_image, pred_image = combine_images(X_train, y_train, predicted_images)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
if epoch == 0 :
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_pred.png"), pred_image)
# then after each epoch/iteration
avg_loss = sum(loss)/len(loss)
logs = {'loss': avg_loss}
TC.on_epoch_end(epoch, logs)
# Log the losses
with open(os.path.join(LOG_DIR, 'losses.json'), 'a') as log_file:
log_file.write("{\"epoch\":%d, \"d_loss\":%f};\n" % (epoch, avg_loss))
print("\nAvg loss: " + str(avg_loss))
# Save model weights per epoch to file
encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_'+str(epoch)+'.h5'), True)
decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
# End TensorBoard Callback
TC.on_train_end('_')
def test(ENC_WEIGHTS, DEC_WEIGHTS):
# Create models
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
for i in range(len(decoder.layers)):
print (decoder.layers[i], str(i))
# exit(0)
def build_intermediate_model(encoder, decoder):
# convlstm-13, conv3d-25
intermediate_decoder_1 = Model(inputs=decoder.layers[0].input, outputs=decoder.layers[19].output)
# intermediate_decoder_2 = Model(inputs=decoder.layers[0].input, outputs=decoder.layers[12].output)
imodel_1 = Sequential()
imodel_1.add(encoder)
imodel_1.add(intermediate_decoder_1)
# imodel_2 = Sequential()
# imodel_2.add(encoder)
# imodel_2.add(intermediate_decoder_2)
return imodel_1
imodel_1 = build_intermediate_model(encoder, decoder)
imodel_1.compile(loss='mean_squared_error', optimizer=OPTIM)
# imodel_2.compile(loss='mean_squared_error', optimizer=OPTIM)
# imodel = build_intermediate_model(encoder, decoder)
# Build video progressions
frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_128.hkl'))
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + VIDEO_LENGTH
end_frame_index = end_frame_index + VIDEO_LENGTH
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
n_videos = videos_list.shape[0]
# Test model by making predictions
loss = []
NB_ITERATIONS = int(n_videos / BATCH_SIZE)
for index in range(NB_ITERATIONS):
# Test Autoencoder
X = load_X(videos_list, index, TEST_DATA_DIR)
X_test = X[:, 0: int(VIDEO_LENGTH / 2)]
y_test = X[:, int(VIDEO_LENGTH / 2):]
loss.append(autoencoder.test_on_batch(X_test, y_test))
y_pred = autoencoder.predict_on_batch(X_test)
a_pred_1 = imodel_1.predict_on_batch(X_test)
# a_pred_2 = imodel_2.predict_on_batch(X_test)
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS - 1) + " " +
"loss: " + str(loss[len(loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
orig_image, truth_image, pred_image = combine_images(X_test, y_test, y_pred)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_pred.png"), pred_image)
#------------------------------------------
a_pred_1 = np.reshape(a_pred_1, newshape=(10, 10, 64, 64, 1))
np.save(os.path.join(TEST_RESULTS_DIR, 'attention_weights_' + str(index) +'.npy'), a_pred_1)
orig_image, truth_image, pred_image = combine_images(X_test, y_test, a_pred_1)
# pred_image = (pred_image*100) * 127.5 + 127.5
# y_pred = y_pred * 127.5 + 127.5
# np.save(os.path.join(TEST_RESULTS_DIR, 'attention_weights_' + str(index) + '.npy'), y_pred)
# cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_attn_1.png"), pred_image)
# a_pred_2 = np.reshape(a_pred_2, newshape=(10, 10, 16, 16, 1))
# with open('attention_weights.txt', mode='w') as file:
# file.write(str(a_pred_2[0, 4]))
# orig_image, truth_image, pred_image = combine_images(X_test, y_test, a_pred_2)
# pred_image = (pred_image*100) * 127.5 + 127.5
# cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_attn_2.png"), pred_image)
avg_loss = sum(loss) / len(loss)
print("\nAvg loss: " + str(avg_loss))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--enc_weights", type=str, default="None")
parser.add_argument("--dec_weights", type=str, default="None")
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size,
ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
if args.mode == "test":
test(ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import json
import math
import os
import shutil
import struct
import h5py
import numpy as np
import obspy
import xarray
# LASIF can already deal with the binary SES3D models. Thus we can utilize
# this here!
from lasif.ses3d_models import RawSES3DModelHandler
from lasif.scripts.lasif_cli import _find_project_comm
def binary_ses3d_to_hdf5_model(input_folder, lasif_project, output_filename):
"""
Function converting a binary SES3D model consisting of many files to an
HDF5 model.
Requires access to a LASIF project that determines the potentially
rotated geometry. Not super clean but workable.
:param input_folder: The folder containing the input model.
:param lasif_project: The folder with the LASIF project.
:param output_filename: The output filename.
"""
assert not os.path.exists(output_filename), \
"'%s' already exists" % output_filename
_dirname = os.path.dirname(output_filename)
if not os.path.exists(_dirname) and _dirname:
os.makedirs(_dirname)
# We need the project to get the domain definition which stores the
# rotation settings.
comm = _find_project_comm(lasif_project, read_only_caches=True)
if any(_i.startswith("grad_") for _i in os.listdir(input_folder)):
model_type = "kernel"
else:
model_type = "earth_model"
m = RawSES3DModelHandler(
directory=input_folder, domain=comm.project.domain,
model_type=model_type)
f = h5py.File(output_filename)
try:
data_group = f.create_group("data")
if model_type == "earth_model":
# We will also store A, C, and Q which we don't invert for but
# have to take into account in any case.
components = ["vp", "vsh", "vsv", "rho", "A", "C"]
# Q might not exist.
if "Q" in m.components:
components.append("Q")
elif model_type == "kernel":
components = ["grad_cp", "grad_csh", "grad_csv", "grad_rho"]
else:
raise NotImplementedError
# Make it compatible with seismopt.
rename_dict = {
"grad_cp": "vp",
"grad_csh": "vsh",
"grad_csv": "vsv",
"grad_rho": "rho",
}
for c in components:
m.parse_component(c)
_d = xarray.DataArray(
np.require(m.parsed_components[c],
requirements=["C_CONTIGUOUS"]),
coords=[90.0 - m.collocation_points_lats[::-1],
m.collocation_points_lngs,
(6371.0 - m.collocation_points_depth) * 1000.0],
dims=["colatitude", "longitude", "radius_in_m"])
# Write to HDF5 file.
if c in rename_dict:
c = rename_dict[c]
data = _d.data
# Make sure it is g\cm^3 in the hdf5 files.
if c == "rho":
if data.mean() > 1000.0:
data /= 1000.0
data_group[c] = np.require(data, dtype=np.float32)
data_group[c].attrs["variable_name"] = \
np.string_((c + "\x00").encode())
# Write coordinate axes.
f["coordinate_0"] = np.require(_d.colatitude.data, dtype=np.float32)
f["coordinate_0"].attrs["name"] = \
np.string_(("colatitude" + "\x00").encode())
f["coordinate_1"] = np.require(_d.longitude.data, dtype=np.float32)
f["coordinate_1"].attrs["name"] = \
np.string_(("longitude" + "\x00").encode())
f["coordinate_2"] = np.require(_d.radius_in_m.data, dtype=np.float32)
f["coordinate_2"].attrs["name"] = \
np.string_(("radius_in_m" + "\x00").encode())
# Create dimension scales.
for c in components:
if c in rename_dict:
c = rename_dict[c]
f["data"][c].dims[0].label = "colatitude"
f["data"][c].dims[1].label = "longitude"
f["data"][c].dims[2].label = "radius_in_m"
f["data"][c].dims.create_scale(f["coordinate_0"], "values")
f["data"][c].dims.create_scale(f["coordinate_1"], "values")
f["data"][c].dims.create_scale(f["coordinate_2"], "values")
f["data"][c].dims[0].attach_scale(f["coordinate_0"])
f["data"][c].dims[1].attach_scale(f["coordinate_1"])
f["data"][c].dims[2].attach_scale(f["coordinate_2"])
# Also add some meta information.
_meta = f.create_group("_meta")
model_name = os.path.split(os.path.normpath(os.path.abspath(
input_folder)))[-1]
_meta.attrs["model_name"] = np.string_((model_name + "\x00").encode())
# Everything needed to reconstruct the domain objects.
_domain = _meta.create_group("domain")
d = comm.project.domain
_domain.attrs["min_longitude"] = d.min_longitude
_domain.attrs["max_longitude"] = d.max_longitude
_domain.attrs["min_latitude"] = d.min_latitude
_domain.attrs["max_latitude"] = d.max_latitude
_domain.attrs["min_depth_in_km"] = d.min_depth_in_km
_domain.attrs["max_depth_in_km"] = d.max_depth_in_km
_domain.attrs["rotation_axis"] = d.rotation_axis
_domain.attrs["rotation_angle_in_degree"] = d.rotation_angle_in_degree
_domain.attrs["boundary_width_in_degree"] = d.boundary_width_in_degree
# We also need to store the boxfile.
_meta.create_dataset("boxfile",
data=np.fromfile(m.boxfile, dtype=np.uint8))
finally:
try:
f.close()
except:
pass
def hdf5_model_to_binary_ses3d_model(input_filename, output_folder):
with h5py.File(input_filename, "r") as f:
_hdf5_model_to_binary_ses3d_model(f=f,
output_folder=output_folder)
def _hdf5_model_to_binary_ses3d_model(f, output_folder):
lpd = 4
assert not os.path.exists(output_folder), \
"Folder '%s' already exists." % output_folder
os.makedirs(output_folder)
with io.BytesIO(f["_meta"]["boxfile"].value.tostring()) as buf:
setup = _read_boxfile(buf)
# Also write to output folder
buf.seek(0, 0)
with io.open(os.path.join(output_folder, "boxfile"), "wb") as fh:
fh.write(buf.read())
data = {}
# SES3D internally expects a density in kg/m^3 - The hdf5 files might
# have g/cm^3.
rho = f["data"]["rho"][:]
if rho.mean() < 1000:
rho *= 1000.0
data["rhoinv"] = 1.0 / rho
data["mu"] = (f["data"]["vsh"][:] * 1000) ** 2 / data["rhoinv"]
data["lambda"] = \
(f["data"]["vp"][:] * 1000) ** 2 / data["rhoinv"] - 2 * data["mu"]
data["A"] = (f["data"]["A"][:])
data["B"] = (f["data"]["vsv"][:] * 1000) ** 2 / data["rhoinv"] - data["mu"]
data["C"] = (f["data"]["C"][:])
# Q might now always be given.
if "Q" in f["data"]:
data["Q"] = (f["data"]["Q"][:])
for key in sorted(data.keys()):
for number, domain in enumerate(setup["subdomains"]):
x_min, x_max = domain["boundaries_x"]
y_min, y_max = domain["boundaries_y"]
z_min, z_max = domain["boundaries_z"]
# Minimum indices
x_min, y_min, z_min = \
[lpd * _j for _j in (x_min, y_min, z_min)]
# Maximum indices
x_max, y_max, z_max = \
[lpd * (_j + 1) for _j in (x_max, y_max, z_max)]
_d = data[key][x_min: x_max + 1,
y_min: y_max + 1,
z_min: z_max + 1]
# Invert last components.
_d = _d[:, :, ::-1]
# Reduplicate the GLL points.
for _i in xrange(3):
_s = _d.shape[_i]
left_idx = np.arange(_s - lpd)[::lpd]
right_idx = np.arange(_s + lpd)[lpd + 1::lpd]
if _i == 0:
_t = [_d[_l:_r, :, :]
for _l, _r in zip(left_idx, right_idx)]
elif _i == 1:
_t = [_d[:, _l:_r, :]
for _l, _r in zip(left_idx, right_idx)]
elif _i == 2:
_t = [_d[:, :, _l:_r]
for _l, _r in zip(left_idx, right_idx)]
else:
raise NotImplementedError
_d = np.concatenate(_t, axis=_i)
# Reshape to restore 6 dimensional layout.
_d = np.require(_d, requirements=["C_CONTIGUOUS"])
shape = (domain["index_x_count"], lpd + 1,
domain["index_y_count"], lpd + 1,
domain["index_z_count"], lpd + 1)
_d = _d.reshape(shape, order="C")
# Roll to retrieve original SES3D memory order.
_d = np.rollaxis(_d, 2, 1)
_d = np.rollaxis(_d, 4, 2)
_d = np.require(_d, requirements=["F_CONTIGUOUS"])
filename = os.path.join(output_folder, "%s%i" % (key, number))
with io.open(filename, "wb") as fh:
fh.write(struct.pack("<I", 520000))
fh.write(_d.tobytes(order="F"))
fh.write(struct.pack("<I", 520000))
def _read_boxfile(fh):
"""
Copied straight from LASIF.
"""
setup = {"subdomains": []}
# The first 14 lines denote the header
lines = fh.readlines()[14:]
# Strip lines and remove empty lines.
lines = [_i.strip() for _i in lines if _i.strip()]
# The next 4 are the global CPU distribution.
setup["total_cpu_count"] = int(lines.pop(0))
setup["cpu_count_in_x_direction"] = int(lines.pop(0))
setup["cpu_count_in_y_direction"] = int(lines.pop(0))
setup["cpu_count_in_z_direction"] = int(lines.pop(0))
if set(lines[0]) == set("-"):
lines.pop(0)
# Small sanity check.
if setup["total_cpu_count"] != setup["cpu_count_in_x_direction"] * \
setup["cpu_count_in_y_direction"] * \
setup["cpu_count_in_z_direction"]:
msg = ("Invalid boxfile. Total and individual processor "
"counts do not match.")
raise ValueError(msg)
# Now parse the rest of file which contains the subdomains.
def subdomain_generator(data):
"""
Simple generator looping over each defined box and yielding
a dictionary for each.
:param data: The text.
"""
while data:
subdom = {}
# Convert both indices to 0-based indices
subdom["single_index"] = int(data.pop(0)) - 1
subdom["multi_index"] = map(lambda x: int(x) - 1,
data.pop(0).split())
subdom["boundaries_x"] = map(int, data.pop(0).split())
subdom["boundaries_y"] = map(int, data.pop(0).split())
subdom["boundaries_z"] = map(int, data.pop(0).split())
# Convert radians to degree.
subdom["physical_boundaries_x"] = map(
lambda x: math.degrees(float(x)), data.pop(0).split())
subdom["physical_boundaries_y"] = map(
lambda x: math.degrees(float(x)), data.pop(0).split())
# z is in meter.
subdom["physical_boundaries_z"] = \
map(float, data.pop(0).split())
for component in ("x", "y", "z"):
idx = "boundaries_%s" % component
index_count = subdom[idx][1] - subdom[idx][0] + 1
subdom["index_%s_count" % component] = index_count
# The boxfiles are slightly awkward in that the indices
# are not really continuous. For example if one box
# has 22 as the last index, the first index of the next
# box will also be 22, even though it should be 23. The
# next snippet attempts to fix this deficiency.
offset = int(round(subdom[idx][0] /
float(index_count - 1)))
subdom[idx][0] += offset
subdom[idx][1] += offset
# Remove separator_line if existent.
if set(lines[0]) == set("-"):
lines.pop(0)
yield subdom
# Sort them after with the single index.
setup["subdomains"] = sorted(list(subdomain_generator(lines)),
key=lambda x: x["single_index"])
# Do some more sanity checks.
if len(setup["subdomains"]) != setup["total_cpu_count"]:
msg = ("Invalid boxfile. Number of processors and subdomains "
"to not match.")
raise ValueError(msg)
for component in ("x", "y", "z"):
idx = "index_%s_count" % component
if len(set([_i[idx] for _i in setup["subdomains"]])) != 1:
msg = ("Invalid boxfile. Unequal %s index count across "
"subdomains.") % component
raise ValueError(msg)
# Now generate the absolute indices for the whole domains.
for component in ("x", "y", "z"):
setup["boundaries_%s" % component] = (
min([_i["boundaries_%s" % component][0]
for _i in setup["subdomains"]]),
max([_i["boundaries_%s" %
component][1] for _i in setup["subdomains"]]))
setup["physical_boundaries_%s" % component] = (
min([_i["physical_boundaries_%s" % component][0] for
_i in setup["subdomains"]]),
max([_i["physical_boundaries_%s" % component][1] for _i in
setup["subdomains"]]))
return setup
def plot_hdf5_model(filename, plot_type="horizontal", *args, **kwargs):
with h5py.File(filename, "r") as f:
if plot_type == "horizontal":
_plot_hdf5_model_horizontal(f=f, *args, **kwargs)
elif plot_type == "vertical":
_plot_hdf5_model_vertical(f=f, *args, **kwargs)
else:
raise NotImplementedError
def _plot_hdf5_model_vertical(f, component, output_filename, vmin=None,
vmax=None):
import matplotlib.cm
import matplotlib.pylab as plt
data = xarray.DataArray(
f["data"][component][:], [
("latitude", 90.0 - f["coordinate_0"][:]),
("longitude", f["coordinate_1"][:]),
("radius", f["coordinate_2"][:] / 1000.0)])
plt.style.use('seaborn-pastel')
plt.figure(figsize=(32, 18))
plt.suptitle("Component %s - File %s" % (component, output_filename),
fontsize=20)
count = 12
lats = plt.linspace(data["latitude"].min(), data["latitude"].max(),
count)
lngs = plt.linspace(data["longitude"].min(), data["longitude"].max(),
count)
import lasif.colors
my_colormap = lasif.colors.get_colormap(
"tomo_full_scale_linear_lightness")
# Overwrite colormap things if given.
if vmin is not None and vmax is not None:
min_val_plot = vmin
max_val_plot = vmax
else:
mean = data.mean()
max_diff = max(abs(mean - data.min()),
abs(data.max() - mean))
min_val_plot = mean - max_diff
max_val_plot = mean + max_diff
# Plotting essentially constant models.
min_delta = 0.001 * abs(max_val_plot)
if (max_val_plot - min_val_plot) < min_delta:
max_val_plot = max_val_plot + min_delta
min_val_plot = min_val_plot - min_delta
for _i in range(count):
plt.subplot(4, count // 2, _i + 1)
x, y = np.meshgrid(data.longitude, data.radius)
plot_data = data.sel(latitude=lats[_i], method="nearest")
plot_data = np.ma.masked_invalid(plot_data.data)
# Plot.
plt.pcolormesh(
x, y, plot_data.T,
cmap=my_colormap, vmin=min_val_plot, vmax=max_val_plot,
shading="flat")
# make a colorbar and title
plt.colorbar()
plt.title("@Latitude: " + str(lats[_i]))
for _i in range(count):
plt.subplot(4, count // 2, count + _i + 1)
x, y = np.meshgrid(data.latitude, data.radius)
plot_data = data.sel(longitude=lngs[_i], method="nearest")
plot_data = np.ma.masked_invalid(plot_data.data)
# Plot.
plt.pcolormesh(
x, y, plot_data.T,
cmap=my_colormap, vmin=min_val_plot, vmax=max_val_plot,
shading="flat")
# make a colorbar and title
plt.colorbar()
plt.title("@Longitude: " + str(lngs[_i]))
plt.tight_layout(rect=(0, 0, 1, 0.95))
plt.savefig(output_filename, dpi=150)
plt.close()
def _plot_hdf5_model_horizontal(f, component, output_filename,
vmin=None, vmax=None):
import matplotlib.cm
import matplotlib.pylab as plt
data = xarray.DataArray(
f["data"][component][:], [
("latitude", 90.0 - f["coordinate_0"][:]),
("longitude", f["coordinate_1"][:]),
("radius", f["coordinate_2"][:] / 1000.0)])
plt.style.use('seaborn-pastel')
from lasif.domain import RectangularSphericalSection
domain = RectangularSphericalSection(**dict(f["_meta"]["domain"].attrs))
plt.figure(figsize=(32, 18))
depth_position_map = {
50: (0, 0),
100: (0, 1),
150: (1, 0),
250: (1, 1),
400: (2, 0),
600: (2, 1)
}
for depth, location in depth_position_map.items():
ax = plt.subplot2grid((3, 5), location)
radius = 6371.0 - depth
# set up a map and colourmap
m = domain.plot(ax=ax, resolution="c", skip_map_features=True)
import lasif.colors
my_colormap = lasif.colors.get_colormap(
"tomo_full_scale_linear_lightness")
from lasif import rotations
x, y = np.meshgrid(data.longitude, data.latitude)
x_shape = x.shape
y_shape = y.shape
lat_r, lon_r = rotations.rotate_lat_lon(
y.ravel(), x.ravel(),
domain.rotation_axis,
domain.rotation_angle_in_degree)
x, y = m(lon_r, lat_r)
x.shape = x_shape
y.shape = y_shape
plot_data = data.sel(radius=radius, method="nearest")
plot_data = np.ma.masked_invalid(plot_data.data)
# Overwrite colormap things if given.
if vmin is not None and vmax is not None:
min_val_plot = vmin
max_val_plot = vmax
else:
mean = plot_data.mean()
max_diff = max(abs(mean - plot_data.min()),
abs(plot_data.max() - mean))
min_val_plot = mean - max_diff
max_val_plot = mean + max_diff
# Plotting essentially constant models.
min_delta = 0.001 * abs(max_val_plot)
if (max_val_plot - min_val_plot) < min_delta:
max_val_plot = max_val_plot + min_delta
min_val_plot = min_val_plot - min_delta
# Plot.
im = m.pcolormesh(
x, y, plot_data,
cmap=my_colormap, vmin=min_val_plot, vmax=max_val_plot,
shading="gouraud")
# make a colorbar and title
m.colorbar(im, "right", size="3%", pad='2%')
plt.title(str(depth) + ' km')
# Depth based statistics.
plt.subplot2grid((3, 5), (0, 4), rowspan=3)
plt.title("Depth statistics")
mean = data.mean(axis=(0, 1))
std = data.std(axis=(0, 1))
_min = data.min(axis=(0, 1))
_max = data.max(axis=(0, 1))
plt.fill_betweenx(data.radius, mean - std, mean + std,
label="std", color="#FF3C83")
plt.plot(mean, data.radius, label="mean", color="k", lw=2)
plt.plot(_min, data.radius, color="grey", label="min")
plt.plot(_max, data.radius, color="grey", label="max")
plt.legend(loc="best")
plt.xlabel("Value")
plt.ylabel("Radius")
plt.hlines(data.radius, plt.xlim()[0], plt.xlim()[1], color="0.8",
zorder=-10, linewidth=0.5)
# Roughness plots.
plt.subplot2grid((3, 5), (0, 2))
_d = np.abs(data.diff("latitude", n=1)).sum("latitude").data
plt.title("Roughness in latitude direction, Total: %g" % _d.sum())
plt.pcolormesh(data.longitude.data, data.radius.data,
_d.T, cmap=matplotlib.cm.viridis)
try:
plt.colorbar()
except:
pass
plt.xlabel("Longitude")
plt.ylabel("Radius")
plt.subplot2grid((3, 5), (1, 2))
_d = np.abs(data.diff("longitude", n=1)).sum("longitude").data
plt.title("Roughness in longitude direction. Total: %g" % data.sum())
plt.pcolormesh(data.latitude.data, data.radius.data, _d.T,
cmap=matplotlib.cm.viridis)
try:
plt.colorbar()
except:
pass
plt.xlabel("Latitude")
plt.ylabel("Radius")
plt.subplot2grid((3, 5), (2, 2))
_d = np.abs(data.diff("radius", n=1)).sum("radius").data
plt.title("Roughness in radius direction. Total: %g" % _d.sum())
plt.pcolormesh(data.longitude.data, data.latitude.data,
_d, cmap=matplotlib.cm.viridis)
try:
plt.colorbar()
except:
pass
plt.xlabel("Longitude")
plt.ylabel("Latitude")
# L2
plt.subplot2grid((3, 5), (0, 3))
_d = (data ** 2).sum("latitude").data
plt.title("L2 Norm in latitude direction, Total: %g" % _d.sum())
plt.pcolormesh(data.longitude.data, data.radius.data,
_d.T, cmap=matplotlib.cm.viridis)
try:
plt.colorbar()
except:
pass
plt.xlabel("Longitude")
plt.ylabel("Radius")
plt.subplot2grid((3, 5), (1, 3))
_d = (data ** 2).sum("longitude").data
plt.title("L2 Norm in longitude direction, Total: %g" % _d.sum())
plt.pcolormesh(data.latitude.data, data.radius.data, _d.T,
cmap=matplotlib.cm.viridis)
try:
plt.colorbar()
except:
pass
plt.xlabel("Latitude")
plt.ylabel("Radius")
plt.subplot2grid((3, 5), (2, 3))
_d = (data ** 2).sum("radius").data
plt.title("L2 Norm in radius direction, Total: %g" % _d.sum())
plt.pcolormesh(data.longitude.data, data.latitude.data,
_d, cmap=matplotlib.cm.viridis)
try:
plt.colorbar()
except:
pass
plt.xlabel("Longitude")
plt.ylabel("Latitude")
plt.suptitle("Component %s - File %s" % (component, output_filename),
fontsize=20)
plt.tight_layout(rect=(0, 0, 1, 0.95))
plt.savefig(output_filename, dpi=150)
plt.close()
def taper_and_precondition_hdf5_model(
input_filename, output_filename, taper_colatitude_offset_in_km,
taper_colatitude_width_in_km, taper_longitude_offset_in_km,
taper_longitude_width_in_km, taper_depth_offset_in_km,
taper_depth_width_in_km, scaling_file):
# Make a copy of the file and then modify in-place.
assert not os.path.exists(output_filename), "File '%s' already exists." % \
output_filename
shutil.copy2(input_filename, output_filename)
with h5py.File(output_filename, "r+") as f:
_taper_and_precondition_hdf5_model(
f=f,
taper_colatitude_offset_in_km=taper_colatitude_offset_in_km,
taper_colatitude_width_in_km=taper_colatitude_width_in_km,
taper_longitude_offset_in_km=taper_longitude_offset_in_km,
taper_longitude_width_in_km=taper_longitude_width_in_km,
taper_depth_offset_in_km=taper_depth_offset_in_km,
taper_depth_width_in_km=taper_depth_width_in_km,
scaling_file=scaling_file)
def _taper_and_precondition_hdf5_model(f, taper_colatitude_offset_in_km,
taper_colatitude_width_in_km,
taper_longitude_offset_in_km,
taper_longitude_width_in_km,
taper_depth_offset_in_km,
taper_depth_width_in_km,
scaling_file):
# Read the scaling file and make sure it plays nice with the gradient at
# hand.
with io.open(scaling_file, "rb") as fh:
scaling = json.load(fh)
np.testing.assert_allclose(scaling["radius"], f["coordinate_2"][:])
scaling = np.array(scaling["weights"], dtype=np.float32)
fac = 111.19492664455873
colatitude_in_km = f["coordinate_0"][:] * fac
longitude_in_km = f["coordinate_1"][:] * fac
radius_in_km = f["coordinate_2"][:] / 1000.0
# Convert into distance from either end.
for _i in [colatitude_in_km, longitude_in_km]:
_i[:] = np.fmin(_i - _i.min(), _i.max() - _i)
# In the radial direction we only taper at the bottom.
radius_in_km -= radius_in_km.min()
# Apply the offsets
colatitude_in_km -= taper_colatitude_offset_in_km
longitude_in_km -= taper_longitude_offset_in_km
radius_in_km -= taper_depth_offset_in_km
# Apply the taper width
colatitude_in_km /= taper_colatitude_width_in_km
longitude_in_km /= taper_longitude_width_in_km
radius_in_km /= taper_depth_width_in_km
# Clip
longitude_in_km = longitude_in_km.clip(min=0.0, max=1.0)
colatitude_in_km = colatitude_in_km.clip(min=0.0, max=1.0)
radius_in_km = radius_in_km.clip(min=0.0, max=1.0)
# Apply Hanning taper. This finalizes the taper we have to multiply the
# data with.
for x in [longitude_in_km, colatitude_in_km, radius_in_km]:
x[:] = 0.5 * (1.0 - np.cos(x * np.pi))
# Apply the tapers.
for name, data in f["data"].items():
data = data[:]
data *= colatitude_in_km[:, np.newaxis, np.newaxis]
data *= longitude_in_km[np.newaxis, :, np.newaxis]
data *= radius_in_km[np.newaxis, np.newaxis, :]
# Apply the depth weighting.
data *= scaling[np.newaxis, np.newaxis, :]
f["data"][name][:] = data
def determine_depth_scaling(input_filename, output_filename, max_kernel_value):
with h5py.File(input_filename, mode="r") as f:
_determine_depth_scaling(f=f,
output_filename=output_filename,
max_kernel_value=max_kernel_value)
def _determine_depth_scaling(f, output_filename, max_kernel_value):
all_scales = []
for data in f["data"].values():
data = data[:]
# Zeros mess with everything - replace with the smallest
# non-zero number!
data[data == 0] = np.abs(data[data != 0]).min()
# Damping factor - the higher the damping the lesser the effect of
# the depth scaling.
damp = 0.1
m = np.max(np.abs(data))
fac = np.zeros(data.shape[-1])
for _i in range(len(fac)):
fac[_i] = 1.0 / (damp * m + np.abs(data[:, :, _i]).max())
all_scales.append(fac)
s = np.sum(all_scales, axis=0)
import scipy.signal
# Smooth a tiny bit to avoid wild oscillations.
w = scipy.signal.gaussian(5, 3)
w /= w.sum()
# Scale this for funsies.
s /= s.min()
# Avoid boundary effects.
l = len(s)
s = np.concatenate([np.ones_like(s) * s[0], s, np.ones_like(s) * s[-1]])
smooth_s = np.convolve(s, w, mode="same")
# Cut out the original segment.
smooth_s = smooth_s[l:-l]
s = s[l:-l]
# Abuse ObsPy to taper a bit at both ends.
smooth_s = obspy.Trace(data=smooth_s).taper(
max_percentage=0.2, type="cosine", side="left").data.clip(min=1.0)
# Get the max absolute value in depth for the vsv kernel.
max_vsv = np.abs(f["data"]["vsv"][:]).max(axis=(0, 1))
factor = max_kernel_value / (smooth_s * max_vsv).max()
import matplotlib.pyplot as plt
plt.style.use("ggplot")
y = f["coordinate_2"][:] / 1000.0
plt.subplot(141)
m = max_vsv
plt.plot(m, y)
plt.xlim(-0.1 * m.ptp(), 1.1 * m.max())
plt.ylim(y[0], y[-1])
plt.title("max abs vsv")
plt.subplot(142)
plt.plot(s, y)
plt.ylim(y[0], y[-1])
plt.title("raw")
plt.subplot(143)
plt.plot(smooth_s, y)
plt.xlim(0, smooth_s.max() * 1.5)
plt.ylim(y[0], y[-1])
plt.title("smoothed")
plt.subplot(144)
m = smooth_s * factor * max_vsv
plt.plot(m, y)
plt.xlim(-0.1 * max_kernel_value, 1.1 * max_kernel_value)
plt.ylim(y[0], y[-1])
plt.title("after")
plt.suptitle("Factor: %s" % str(factor))
output = {
"radius": [float(i) for i in f["coordinate_2"][:]],
"weights": [float(i) for i in smooth_s * factor]
}
with io.open(output_filename, "wb") as fh:
json.dump(output, fh)
plt.show()
| |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009-2019 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aquilon.aqdb.depends
from aquilon.aqdb.model.base import Base, SingleInstanceMixin
from aquilon.aqdb.model.stateengine import StateEngine
from aquilon.aqdb.model.lifecycleengine import LifecycleEngine
# PARAMETERIZED
from aquilon.aqdb.model.parameterized import Parameterized
# AUTHORIZATION
from aquilon.aqdb.model.role import Role
from aquilon.aqdb.model.realm import Realm
from aquilon.aqdb.model.user_principal import UserPrincipal
from aquilon.aqdb.model.user import (
User,
UserType,
)
from aquilon.aqdb.model.netgroup_whitelist import NetGroupWhiteList
# DNS DOMAINS
from aquilon.aqdb.model.dns_domain import DnsDomain
# LOCATION
from aquilon.aqdb.model.location import Location
from aquilon.aqdb.model.organization import Organization
from aquilon.aqdb.model.hub import Hub
from aquilon.aqdb.model.continent import Continent
from aquilon.aqdb.model.country import Country
from aquilon.aqdb.model.campus import Campus
from aquilon.aqdb.model.city import City
from aquilon.aqdb.model.building import Building
from aquilon.aqdb.model.room import Room
from aquilon.aqdb.model.bunker import Bunker
from aquilon.aqdb.model.rack import Rack
from aquilon.aqdb.model.desk import Desk
# ENVIRONMENT
from aquilon.aqdb.model.host_environment import HostEnvironment
# GRN
from aquilon.aqdb.model.grn import (
Grn,
ParameterizedGrn,
)
# NETWORK
from aquilon.aqdb.model.dns_environment import DnsEnvironment
from aquilon.aqdb.model.network_environment import NetworkEnvironment
from aquilon.aqdb.model.network_compartment import NetworkCompartment
from aquilon.aqdb.model.network import Network
from aquilon.aqdb.model.static_route import StaticRoute
from aquilon.aqdb.model.fqdn import Fqdn
from aquilon.aqdb.model.dns_record import DnsRecord
from aquilon.aqdb.model.dns_record_target_mixin import DnsRecordTargetMixin
from aquilon.aqdb.model.a_record import ARecord, DynamicStub
from aquilon.aqdb.model.reserved_name import ReservedName
from aquilon.aqdb.model.address_alias import AddressAlias
from aquilon.aqdb.model.alias import Alias
from aquilon.aqdb.model.srv_record import SrvRecord
from aquilon.aqdb.model.ns_record import NsRecord
from aquilon.aqdb.model.router_address import RouterAddress
# CONFIG
from aquilon.aqdb.model.archetype import (
Archetype,
ParameterizedArchetype,
)
from aquilon.aqdb.model.personality import (
ParameterizedPersonality,
Personality,
PersonalityGrnMap,
PersonalityStage,
)
from aquilon.aqdb.model.asset_lifecycle import AssetLifecycle
from aquilon.aqdb.model.operating_system import OperatingSystem
# HARDWARE
from aquilon.aqdb.model.vendor import Vendor
from aquilon.aqdb.model.model import Model
from aquilon.aqdb.model.hardware_entity import HardwareEntity, DeviceLinkMixin
from aquilon.aqdb.model.machine import Machine
from aquilon.aqdb.model.hostlifecycle import HostLifecycle
from aquilon.aqdb.model.network_device import NetworkDevice
from aquilon.aqdb.model.chassis import Chassis
from aquilon.aqdb.model.console_server import ConsoleServer, ConsolePort
from aquilon.aqdb.model.disk import Disk, LocalDisk
from aquilon.aqdb.model.machine_specs import MachineSpecs
# HOST
from aquilon.aqdb.model.branch import Branch, Domain, Sandbox, Review
from aquilon.aqdb.model.compileable import CompileableMixin
from aquilon.aqdb.model.host import Host, HostGrnMap
# HARDWARE/SYSTEM LINKAGES
from aquilon.aqdb.model.observed_mac import ObservedMac
from aquilon.aqdb.model.vlan import PortGroup, VlanInfo
from aquilon.aqdb.model.interface import (Interface, PublicInterface,
ManagementInterface, OnboardInterface,
VlanInterface, BondingInterface,
BridgeInterface, LoopbackInterface,
VirtualInterface, PhysicalInterface)
from aquilon.aqdb.model.chassis_slot import ChassisSlot, MachineChassisSlot,\
NetworkDeviceChassisSlot
from aquilon.aqdb.model.address_assignment import (AddressAssignment,
SharedAddressAssignment)
# FEATURES
from aquilon.aqdb.model.feature import (Feature, FeatureLink, HostFeature,
HardwareFeature, InterfaceFeature)
from aquilon.aqdb.model.parameter_definition import (ParamDefinition, ParamDefHolder,
ArchetypeParamDef, FeatureParamDef)
from aquilon.aqdb.model.parameter import Parameter, PersonalityParameter
# CLUSTER
from aquilon.aqdb.model.clusterlifecycle import ClusterLifecycle
from aquilon.aqdb.model.building_preference import BuildingPreference
from aquilon.aqdb.model.cluster import (Cluster, HostClusterMember, EsxCluster,
ComputeCluster, StorageCluster)
from aquilon.aqdb.model.personality_cluster_info import (PersonalityClusterInfo,
PersonalityESXClusterInfo)
from aquilon.aqdb.model.cluster_group import ClusterGroup
from aquilon.aqdb.model.virtual_switch import VirtualSwitch
from aquilon.aqdb.model.metacluster import MetaCluster
# SERVICE
from aquilon.aqdb.model.service import Service, PersonalityServiceListItem
from aquilon.aqdb.model.service_instance import ServiceInstance
from aquilon.aqdb.model.service_map import ServiceMap
from aquilon.aqdb.model.xtn import Xtn, XtnDetail, XtnEnd
# Resources
from aquilon.aqdb.model.resource import (
ArchetypeResource,
ClusterResource,
GrnResource,
HostResource,
PersonalityResource,
Resource,
ResourceHolder,
)
from aquilon.aqdb.model.filesystem import Filesystem
from aquilon.aqdb.model.application import Application
from aquilon.aqdb.model.intervention import Intervention
from aquilon.aqdb.model.resourcegroup import ResourceGroup, BundleResource
from aquilon.aqdb.model.reboot_schedule import (RebootSchedule,
RebootIntervention)
from aquilon.aqdb.model.virtual_machine import VirtualMachine
from aquilon.aqdb.model.service_address import ServiceAddress
from aquilon.aqdb.model.share import Share
from aquilon.aqdb.model.virtual_disk import VirtualDisk
from aquilon.aqdb.model.cluster_asl import (PriorityList, MemberPriority,
SystemList, AutoStartList)
from aquilon.aqdb.model.service_instance_server import ServiceInstanceServer
from aquilon.aqdb.model.shared_service_name import SharedServiceName
# ENTITLEMENTS
from aquilon.aqdb.model.entitlement import (
Entitlement,
EntitlementId,
EntitlementArchetypeGrnMap,
EntitlementArchetypeUserMap,
EntitlementClusterGrnMap,
EntitlementClusterUserMap,
EntitlementGrnGrnMap,
EntitlementGrnUserMap,
EntitlementHostGrnMap,
EntitlementHostUserMap,
EntitlementOnArchetype,
EntitlementOnCluster,
EntitlementOnGrn,
EntitlementOnHost,
EntitlementOnHostEnvironment,
EntitlementOnLocation,
EntitlementOnPersonality,
EntitlementPersonalityGrnMap,
EntitlementPersonalityUserMap,
EntitlementToGrn,
EntitlementToUser,
EntitlementType,
EntitlementTypeUserTypeMap,
)
# Resources dependent on entitlements
from aquilon.aqdb.model.hostlink import (
Hostlink,
HostlinkEntitlementMap,
HostlinkParentMap,
)
| |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains implementations of various third-party
authentication schemes.
All the classes in this file are class mixins designed to be used with
the `tornado.web.RequestHandler` class. They are used in two ways:
* On a login handler, use methods such as ``authenticate_redirect()``,
``authorize_redirect()``, and ``get_authenticated_user()`` to
establish the user's identity and store authentication tokens to your
database and/or cookies.
* In non-login handlers, use methods such as ``facebook_request()``
or ``twitter_request()`` to use the authentication tokens to make
requests to the respective services.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OAuth:
.. testcode::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument('code', False):
user = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. testoutput::
:hide:
.. versionchanged:: 4.0
All of the callback interfaces in this module are now guaranteed
to run their callback with an argument of ``None`` on error.
Previously some functions would do this while others would simply
terminate the request on their own. This change also ensures that
errors are more consistently reported through the ``Future`` interfaces.
"""
from __future__ import absolute_import, division, print_function
import base64
import binascii
import functools
import hashlib
import hmac
import time
import uuid
from tornado.concurrent import Future, return_future, chain_future, future_set_exc_info
from tornado import gen
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
from tornado.log import gen_log
from tornado.stack_context import ExceptionStackContext
from tornado.util import unicode_type, ArgReplacer, PY3
if PY3:
import urllib.parse as urlparse
import urllib.parse as urllib_parse
long = int
else:
import urlparse
import urllib as urllib_parse
class AuthError(Exception):
pass
def _auth_future_to_callback(callback, future):
try:
result = future.result()
except AuthError as e:
gen_log.warning(str(e))
result = None
callback(result)
def _auth_return_future(f):
"""Similar to tornado.concurrent.return_future, but uses the auth
module's legacy callback interface.
Note that when using this decorator the ``callback`` parameter
inside the function will actually be a future.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = Future()
callback, args, kwargs = replacer.replace(future, args, kwargs)
if callback is not None:
future.add_done_callback(
functools.partial(_auth_future_to_callback, callback))
def handle_exception(typ, value, tb):
if future.done():
return False
else:
future_set_exc_info(future, (typ, value, tb))
return True
with ExceptionStackContext(handle_exception):
f(*args, **kwargs)
return future
return wrapper
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
Class attributes:
* ``_OPENID_ENDPOINT``: the identity provider's URI.
"""
@return_future
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Redirects to the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI with additional parameters including ``openid.mode``.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the `authenticate_redirect()` method (which is
often the same as the one that calls it; in that case you would
call `get_authenticated_user` if the ``openid.mode`` parameter
is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items())
args["openid.mode"] = u"check_authentication"
url = self._OPENID_ENDPOINT
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(url, functools.partial(
self._on_authentication_verified, callback),
method="POST", body=urllib_parse.urlencode(args))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urlparse.urljoin(url, '/'),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, future, response):
if response.error or b"is_valid:true" not in response.body:
future.set_exception(AuthError(
"Invalid OpenID response: %s" % (response.error or
response.body)))
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name in self.request.arguments:
if name.startswith("openid.ns.") and \
self.get_argument(name) == u"http://openid.net/srv/ax/1.0":
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns:
return u""
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.keys():
if self.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
return u""
return self.get_argument(ax_name, u"")
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u" ".join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
future.set_result(user)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuthMixin(object):
"""Abstract implementation of OAuth 1.0 and 1.0a.
See `TwitterMixin` below for an example implementation.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url.
* ``_OAUTH_VERSION``: May be either "1.0" or "1.0a".
* ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires
advance registration of callbacks.
Subclasses must also override the `_oauth_get_user_future` and
`_oauth_consumer_token` methods.
"""
@return_future
def authorize_redirect(self, callback_uri=None, extra_params=None,
http_client=None, callback=None):
"""Redirects the user to obtain OAuth authorization for this service.
The ``callback_uri`` may be omitted if you have previously
registered a callback URI with the third-party service. For
some services (including Friendfeed), you must use a
previously-registered callback URI and cannot specify a
callback via this method.
This method sets a cookie called ``_oauth_request_token`` which is
subsequently used (and cleared) in `get_authenticated_user` for
security purposes.
Note that this method is asynchronous, although it calls
`.RequestHandler.finish` for you so it may not be necessary
to pass a callback or use the `.Future` it returns. However,
if this method is called from a function decorated with
`.gen.coroutine`, you must call it with ``yield`` to keep the
response from being closed prematurely.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
if http_client is None:
http_client = self.get_auth_http_client()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http_client.fetch(
self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
functools.partial(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
else:
http_client.fetch(
self._oauth_request_token_url(),
functools.partial(
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Gets the OAuth authorized user and access token.
This method should be called from the handler for your
OAuth callback URL to complete the registration process. We run the
callback with the authenticated user dictionary. This dictionary
will contain an ``access_key`` which can be used to make authorized
requests to this service on behalf of the user. The dictionary will
also contain other fields such as ``name``, depending on the service
used.
"""
future = callback
request_key = escape.utf8(self.get_argument("oauth_token"))
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
future.set_exception(AuthError(
"Missing OAuth request token cookie"))
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
future.set_exception(AuthError(
"Request token does not match cookie"))
return
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token["verifier"] = oauth_verifier
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(self._oauth_access_token_url(token),
functools.partial(self._on_access_token, callback))
def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
if callback_uri == "oob":
args["oauth_callback"] = "oob"
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
if extra_params:
args.update(extra_params)
signature = _oauth10a_signature(consumer_token, "GET", url, args)
else:
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, callback,
response):
if response.error:
raise Exception("Could not get request token: %s" % response.error)
request_token = _oauth_parse_response(response.body)
data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" +
base64.b64encode(escape.utf8(request_token["secret"])))
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri == "oob":
self.finish(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
return
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(request_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if "verifier" in request_token:
args["oauth_verifier"] = request_token["verifier"]
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, "GET", url, args,
request_token)
else:
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_access_token(self, future, response):
if response.error:
future.set_exception(AuthError("Could not fetch access token"))
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user_future(access_token).add_done_callback(
functools.partial(self._on_oauth_get_user, access_token, future))
def _oauth_consumer_token(self):
"""Subclasses must override this to return their OAuth consumer keys.
The return value should be a `dict` with keys ``key`` and ``secret``.
"""
raise NotImplementedError()
@return_future
def _oauth_get_user_future(self, access_token, callback):
"""Subclasses must override this to get basic information about the
user.
Should return a `.Future` whose result is a dictionary
containing information about the user, which may have been
retrieved by using ``access_token`` to make a request to the
service.
The access token will be added to the returned dictionary to make
the result of `get_authenticated_user`.
For backwards compatibility, the callback-based ``_oauth_get_user``
method is also supported.
"""
# By default, call the old-style _oauth_get_user, but new code
# should override this method instead.
self._oauth_get_user(access_token, callback)
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, future, user_future):
if user_future.exception() is not None:
future.set_exception(user_future.exception())
return
user = user_future.result()
if not user:
future.set_exception(AuthError("Error getting user"))
return
user["access_token"] = access_token
future.set_result(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(access_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
args = {}
args.update(base_args)
args.update(parameters)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, method, url, args,
access_token)
else:
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = escape.to_basestring(signature)
return base_args
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuth2Mixin(object):
"""Abstract implementation of OAuth 2.0.
See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example
implementations.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
"""
@return_future
def authorize_redirect(self, redirect_uri=None, client_id=None,
client_secret=None, extra_params=None,
callback=None, scope=None, response_type="code"):
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a redirect URL with
your application instead of passing one via this method. You
should call this method to log the user in, and then call
``get_authenticated_user`` in the handler for your
redirect URL to complete the authorization process.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
args = {
"redirect_uri": redirect_uri,
"client_id": client_id,
"response_type": response_type
}
if extra_params:
args.update(extra_params)
if scope:
args['scope'] = ' '.join(scope)
self.redirect(
url_concat(self._OAUTH_AUTHORIZE_URL, args))
callback()
def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
client_secret=None, code=None,
extra_params=None):
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
redirect_uri=redirect_uri,
code=code,
client_id=client_id,
client_secret=client_secret,
)
if extra_params:
args.update(extra_params)
return url_concat(url, args)
@_auth_return_future
def oauth2_request(self, url, callback, access_token=None,
post_args=None, **args):
"""Fetches the given URL auth an OAuth2 access token.
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.oauth2_request(
"https://graph.facebook.com/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
.. versionadded:: 4.3
"""
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib_parse.urlencode(all_args)
callback = functools.partial(self._on_oauth2_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_oauth2_request(self, future, response):
if response.error:
future.set_exception(AuthError("Error response %s fetching %s" %
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
.. versionadded:: 4.3
"""
return httpclient.AsyncHTTPClient()
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key
and Consumer Secret to the application
`~tornado.web.Application.settings` ``twitter_consumer_key`` and
``twitter_consumer_secret``. Use this mixin on the handler for the
URL you registered as your application's callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with Twitter and get access to their stream:
.. testcode::
class TwitterLoginHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
.. testoutput::
:hide:
The user object returned by `~OAuthMixin.get_authenticated_user`
includes the attributes ``username``, ``name``, ``access_token``,
and all of the custom Twitter user attributes described at
https://dev.twitter.com/docs/api/1.1/get/users/show
"""
_OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = False
_TWITTER_BASE_URL = "https://api.twitter.com/1.1"
@return_future
def authenticate_redirect(self, callback_uri=None, callback=None):
"""Just like `~OAuthMixin.authorize_redirect`, but
auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
functools.partial(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
None, callback))
@_auth_return_future
def twitter_request(self, path, callback=None, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., ``statuses/user_timeline/btaylor``
The path should not include the format or API version number.
(we automatically use JSON format and API version 1).
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at http://dev.twitter.com/
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned through that
process includes an 'access_token' attribute that can be used
to make authenticated requests via this method. Example
usage:
.. testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
"""
if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the
# usual pattern: http://search.twitter.com/search.json
url = path
else:
url = self._TWITTER_BASE_URL + path + ".json"
# Add the OAuth resource request signature if we have credentials
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http_callback = functools.partial(self._on_twitter_request, callback)
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=http_callback)
else:
http.fetch(url, callback=http_callback)
def _on_twitter_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token):
user = yield self.twitter_request(
"/account/verify_credentials",
access_token=access_token)
if user:
user["username"] = user["screen_name"]
raise gen.Return(user)
class GoogleOAuth2Mixin(OAuth2Mixin):
"""Google authentication using OAuth2.
In order to use, register your application with Google and copy the
relevant parameters to your application settings.
* Go to the Google Dev Console at http://console.developers.google.com
* Select a project, or create a new one.
* In the sidebar on the left, select APIs & Auth.
* In the list of APIs, find the Google+ API service and set it to ON.
* In the sidebar on the left, select Credentials.
* In the OAuth section of the page, select Create New Client ID.
* Set the Redirect URI to point to your auth handler
* Copy the "Client secret" and "Client ID" to the application settings as
{"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}
.. versionadded:: 3.2
"""
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
_OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
_OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = 'google_oauth'
@_auth_return_future
def get_authenticated_user(self, redirect_uri, code, callback):
"""Handles the login for the Google user, returning an access token.
The result is a dictionary containing an ``access_token`` field
([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)).
Unlike other ``get_authenticated_user`` methods in this package,
this method does not return any additional information about the user.
The returned access token can be used with `OAuth2Mixin.oauth2_request`
to request additional information (perhaps from
``https://www.googleapis.com/oauth2/v2/userinfo``)
Example usage:
.. testcode::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument('code', False):
access = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
user = yield self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=access["access_token"])
# Save the user and access token with
# e.g. set_secure_cookie.
else:
yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. testoutput::
:hide:
"""
http = self.get_auth_http_client()
body = urllib_parse.urlencode({
"redirect_uri": redirect_uri,
"code": code,
"client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'],
"client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'],
"grant_type": "authorization_code",
})
http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
functools.partial(self._on_access_token, callback),
method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
def _on_access_token(self, future, response):
"""Callback function for the exchange to the access token."""
if response.error:
future.set_exception(AuthError('Google auth error: %s' % str(response)))
return
args = escape.json_decode(response.body)
future.set_result(args)
class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2."""
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
_OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?"
_OAUTH_NO_CALLBACKS = False
_FACEBOOK_BASE_URL = "https://graph.facebook.com"
@_auth_return_future
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
"""Handles the login for the Facebook user, returning a user object.
Example usage:
.. testcode::
class FacebookGraphLoginHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("code", False):
user = yield self.get_authenticated_user(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"})
.. testoutput::
:hide:
This method returns a dictionary which may contain the following fields:
* ``access_token``, a string which may be passed to `facebook_request`
* ``session_expires``, an integer encoded as a string representing
the time until the access token expires in seconds. This field should
be used like ``int(user['session_expires'])``; in a future version of
Tornado it will change from a string to an integer.
* ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``,
``link``, plus any fields named in the ``extra_fields`` argument. These
fields are copied from the Facebook graph API `user object <https://developers.facebook.com/docs/graph-api/reference/user>`_
.. versionchanged:: 4.5
The ``session_expires`` field was updated to support changes made to the
Facebook API in March 2017.
"""
http = self.get_auth_http_client()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['id', 'name', 'first_name', 'last_name',
'locale', 'picture', 'link'])
if extra_fields:
fields.update(extra_fields)
http.fetch(self._oauth_request_token_url(**args),
functools.partial(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
future, fields, response):
if response.error:
future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
return
args = escape.json_decode(response.body)
session = {
"access_token": args.get("access_token"),
"expires_in": args.get("expires_in")
}
self.facebook_request(
path="/me",
callback=functools.partial(
self._on_get_user_info, future, session, fields),
access_token=session["access_token"],
appsecret_proof=hmac.new(key=client_secret.encode('utf8'),
msg=session["access_token"].encode('utf8'),
digestmod=hashlib.sha256).hexdigest(),
fields=",".join(fields)
)
def _on_get_user_info(self, future, session, fields, user):
if user is None:
future.set_result(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
# session_expires is converted to str for compatibility with
# older versions in which the server used url-encoding and
# this code simply returned the string verbatim.
# This should change in Tornado 5.0.
fieldmap.update({"access_token": session["access_token"],
"session_expires": str(session.get("expires_in"))})
future.set_result(fieldmap)
@_auth_return_future
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
This method is a wrapper around `OAuth2Mixin.oauth2_request`;
the only difference is that this method takes a relative path,
while ``oauth2_request`` takes a complete url.
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
"""
url = self._FACEBOOK_BASE_URL + path
# Thanks to the _auth_return_future decorator, our "callback"
# argument is a Future, which we cannot pass as a callback to
# oauth2_request. Instead, have oauth2_request return a
# future and chain them together.
oauth_future = self.oauth2_request(url, access_token=access_token,
post_args=post_args, **args)
chain_future(oauth_future, callback)
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))]
key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode_type):
val = val.encode("utf-8")
return urllib_parse.quote(val, safe="~")
def _oauth_parse_response(body):
# I can't find an officially-defined encoding for oauth responses and
# have never seen anyone use non-ascii. Leave the response in a byte
# string for python 2, and use utf8 on python 3.
body = escape.native_str(body)
p = urlparse.parse_qs(body, keep_blank_values=False)
token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
# Add the extra parameters the Provider included to the token
special = ("oauth_token", "oauth_token_secret")
token.update((k, p[k][0]) for k in p if k not in special)
return token
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import netaddr
import numbers
import random
import six
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
from oslo_utils import uuidutils
from conveyor.common import plan_status as p_status
from conveyor import compute
from conveyor.conveyorheat.api import api as heat
from conveyor.db import api as db_api
from conveyor import exception
from conveyor import manager
from conveyor import network
from conveyor.objects import plan as plan_cls
from conveyor.resource import api as resource_api
from conveyor.resource.driver import instances
from conveyor.resource.driver import networks
from conveyor.resource.driver import secgroup
from conveyor.resource.driver import volumes
from conveyor.resource import resource
from conveyor import utils
from conveyor import volume
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class PlanManager(manager.Manager):
"""Get detail resource."""
target = messaging.Target(version='1.18')
# How long to wait in seconds before re-issuing a shutdown
# signal to a instance during power off. The overall
# time to wait is set by CONF.shutdown_timeout.
SHUTDOWN_RETRY_INTERVAL = 10
def __init__(self, *args, **kwargs):
"""."""
self.nova_api = compute.API()
self.cinder_api = volume.API()
self.resource_api = resource_api.ResourceAPI()
self.neutron_api = network.API()
self.heat_api = heat.API()
self.db_api = db_api
super(PlanManager, self).__init__(service_name=
"conveyor-plan",
*args, **kwargs)
def create_plan(self, context, plan_type, resources, plan_name=None):
if plan_type not in ["clone", "migrate"]:
msg = "Plan type must be 'clone' or 'migrate'."
LOG.error(msg)
raise exception.PlanTypeNotSupported(type=plan_type)
LOG.info("Begin to create a %s plan by resources: %s.", plan_type,
resources)
plan_id = uuidutils.generate_uuid()
if not plan_name:
plan_name = plan_id
new_plan = plan_cls.Plan(plan_id, plan_type,
context.project_id,
context.user_id,
plan_name=plan_name,
clone_resources=resources)
# Save to database.
plan_dict = new_plan.to_dict()
plan_cls.save_plan_to_db(context, plan_dict)
LOG.info("Create plan succeed. Plan_id is %s", plan_id)
return plan_dict
def build_plan_by_template(self, context, plan_dict, template):
LOG.info("Begin to build plan <%s> by template.",
plan_dict['plan_id'])
# extract resources
plan = plan_cls.Plan.from_dict(plan_dict)
plan_id = plan.plan_id
resources = {}
template_res = template.get('resources')
for key, value in template_res.items():
res_id = value.get('extra_properties', {}).get('id', '')
if not res_id:
res_id = uuidutils.generate_uuid()
template_res[key].get('extra_properties', {}).pop('id', '')
resource_obj = resource.Resource(key,
value.get('type'),
res_id,
properties=
value.get('properties'),
extra_properties=
value.get('extra_properties'))
resource_obj.rebuild_parameter(template.get('parameters'))
resources[key] = resource_obj
plan.original_resources = resources
plan.rebuild_dependencies(is_original=True)
plan.plan_status = p_status.AVAILABLE
# Resources of migrate plan are not allowed to be modified,
# so 'updated fields' are empty.
if plan.plan_type == "clone":
plan.updated_resources = copy.deepcopy(resources)
plan.updated_dependencies = \
copy.deepcopy(plan.original_dependencies)
plan_dict = plan.to_dict()
update_values = {
'plan_status': p_status.AVAILABLE,
'original_resources': plan_dict['original_resources'],
'updated_resources': plan_dict['updated_resources']
}
try:
# Update to database
plan_cls.update_plan_to_db(context, plan_id,
update_values)
LOG.info("Create plan by template finished. Plan_id is %s"
% plan_id)
except Exception as e:
msg = "Create plan by template failed! %s" % unicode(e)
LOG.error(msg)
# Roll back: change plan status to error
plan_cls.update_plan_to_db(context, plan_id,
{'plan_status': p_status.ERROR})
raise exception.PlanCreateFailed(message=msg)
def get_plan_by_id(self, context, plan_id, detail=True):
LOG.info("Get plan with id of %s", plan_id)
plan_dict = plan_cls.read_plan_from_db(context, plan_id)
if detail:
return plan_dict
else:
fields = ('original_resources', 'updated_resources',
'original_dependencies', 'updated_dependencies')
for field in fields:
plan_dict.pop(field, None)
return plan_dict
def delete_plan(self, context, plan_id):
@utils.synchronized(plan_id)
def _lock_do_delete_plan(context, plan_id, ):
self._delete_plan(context, plan_id)
_lock_do_delete_plan(context, plan_id)
def _delete_plan(self, context, plan_id):
LOG.info("Begin to delete plan with id of %s", plan_id)
plan = db_api.plan_get(context, plan_id)
if not plan:
LOG.error('Delete plan %s failed' % plan_id)
raise exception.PlanNotFound(plan_id=plan_id)
plan_cls.update_plan_to_db(context, plan_id,
{'plan_status': p_status.DELETING})
# delete heat stack info
self.heat_api.clear_table(context, plan['stack_id'], plan_id)
# delete template info
try:
db_api.plan_template_delete(context, plan_id)
except exception.PlanNotFoundInDb:
LOG.warn('Plan does not have template :%s', plan_id)
except Exception as e:
LOG.error('Delete plan %(id)s template info failed: %(err)s',
{'id': plan_id, 'err': e})
raise
try:
db_api.plan_cloned_resource_delete(context, plan_id)
except exception.PlanNotFoundInDb:
LOG.warn('Plan does not have cloned resources :%s', plan_id)
except Exception as e:
LOG.error('Delete plan %(id)s cloned resources failed: %(err)s',
{'id': plan_id, 'err': e})
raise
try:
db_api.plan_availability_zone_mapper_delete(context, plan_id)
except exception.PlanNotFoundInDb:
LOG.warn('Plan does not have az map :%s', plan_id)
except Exception as e:
LOG.error('Delete plan %(id)s az map failed: %(err)s',
{'id': plan_id, 'err': e})
raise
# delete plan info
try:
db_api.plan_delete(context, plan_id)
except exception.PlanNotFoundInDb:
LOG.warn('Plan does not have az map :%s', plan_id)
except Exception as e:
LOG.error('Delete plan %(id)s az map failed: %(err)s',
{'id': plan_id, 'err': e})
raise
LOG.info("Delete plan with id of %s succeed!", plan_id)
def force_delete_plan(self, context, plan_id):
plan = db_api.plan_get(context, plan_id)
stack_id = plan.get('stack_id', None) if plan else None
self.heat_api.clear_table(context, stack_id, plan_id)
try:
db_api.plan_template_delete(context, plan_id)
except exception.PlanNotFoundInDb:
LOG.warn('Plan does not have template :%s', plan_id)
except Exception as e:
LOG.error('Delete plan %(id)s template info failed: %(err)s',
{'id': plan_id, 'err': e})
raise
try:
db_api.plan_cloned_resource_delete(context, plan_id)
except exception.PlanNotFoundInDb:
LOG.warn('Plan does not have cloned resources :%s', plan_id)
except Exception as e:
LOG.error('Delete plan %(id)s cloned resources failed: %(err)s',
{'id': plan_id, 'err': e})
raise
try:
db_api.plan_availability_zone_mapper_delete(context, plan_id)
except exception.PlanNotFoundInDb:
LOG.warn('Plan does not have az map :%s', plan_id)
except Exception as e:
LOG.error('Delete plan %(id)s az map failed: %(err)s',
{'id': plan_id, 'err': e})
raise
# delete plan info
try:
db_api.plan_delete(context, plan_id)
except exception.PlanNotFoundInDb:
LOG.warn('Plan does not have az map :%s', plan_id)
except Exception as e:
LOG.error('Delete plan %(id)s az map failed: %(err)s',
{'id': plan_id, 'err': e})
raise
def update_plan(self, context, plan_id, values):
@utils.synchronized(plan_id)
def _lock_do_update_plan(context, plan_id, values):
self._do_update_plan(context, plan_id, values)
_lock_do_update_plan(context, plan_id, values)
def _do_update_plan(self, context, plan_id, values):
LOG.info("Update plan <%s> with values: %s", plan_id, values)
allowed_properties = ['task_status', 'plan_status',
'stack_id', 'updated_resources',
'sys_clone', 'copy_data']
# Verify the keys and values
for k, v in values.items():
if k not in allowed_properties:
msg = ("Update plan failed. %s field "
"not found or unsupported to update." % k)
LOG.error(msg)
raise exception.PlanUpdateError(message=msg)
elif k == 'plan_status' and v not in p_status.PLAN_STATUS:
msg = "Update plan failed. '%s' plan_status unsupported." % v
LOG.error(msg)
raise exception.PlanUpdateError(message=msg)
# If values contain updated_resources, set update time.
if 'updated_resources' in values:
values['updated_at'] = timeutils.utcnow()
# Update in database
plan_cls.update_plan_to_db(context, plan_id,
values)
LOG.info("Update plan with id of %s succeed!", plan_id)
def _extract_resources(self, context, id, type, updated_res):
if type == 'OS::Cinder::VolumeType':
vtr = volumes.VolumeType(context, updated_res)
# resource_ids = [id]
# resources = vtr.extract_volume_types(resource_ids)
return vtr.get_collected_resources()
elif type == 'OS::Cinder::Qos':
vor = volumes.QosResource(context, updated_res)
# resources = vor.extract_qos(id)
return vor.get_collected_resources()
def update_plan_resources(self, context, plan_id, resources):
LOG.info("Update resources of plan <%s> with values: %s", plan_id,
resources)
# Get plan object
plan_dict = self.get_plan_by_id(context, plan_id)
plan = plan_cls.Plan.from_dict(plan_dict)
updated_res = copy.deepcopy(plan.updated_resources)
updated_dep = copy.deepcopy(plan.updated_dependencies)
resources_list = copy.deepcopy(resources)
# Update resources
for res in resources:
if res.get('action') == 'delete':
# Remind: dep delete and add
resource_id = res.pop('resource_id', None)
updated_res.pop(resource_id)
for key, value in updated_dep.items():
if resource_id in value.dependencies:
msg = 'have resource denpend on the %s ' \
'resource ,delete failed' % resource_id
raise exception.PlanResourcesUpdateError(message=msg)
dependencies = updated_dep.get(resource_id).dependencies
if dependencies:
self._remove_org_depends(dependencies, updated_dep,
updated_res)
elif res.get('action') == 'add':
# Remind: dep delete and add
LOG.debug('the add resource info is %s' % res)
resource_id = res.pop('resource_id', None)
id = res.get('id')
type = res.get('resource_type')
self._resource_id_to_actual_id(updated_res)
updated_res = self._extract_resources(context, id, type,
updated_res)
self._actual_id_to_resource_id(updated_res)
elif res.get('action') == 'edit':
self._edit_plan_resource(context, plan, updated_res,
updated_dep, res, resources_list)
# Update to memory
plan.updated_resources = updated_res
# plan.updated_resources =
# self._actual_id_to_resource_id(context, updated_res)
plan.rebuild_dependencies()
# Update to database
updated_resources = {}
for k, v in updated_res.items():
updated_resources[k] = v.to_dict()
plan_cls.update_plan_to_db(context, plan_id,
{"updated_resources": updated_resources})
LOG.info("Update resource of plan <%s> succeed.", plan_id)
def _edit_plan_resource(self, context, plan, updated_res,
updated_dep, resource, resources_list):
resource.pop('action', None)
properties = copy.deepcopy(resource)
resource_id = properties.pop('resource_id', None)
resource_obj = updated_res.get(resource_id)
new_res_id = properties.pop('id', None)
properties.pop('resource_type', None)
copy_data = properties.pop('copy_data', None)
if not resource_id or not resource_obj:
msg = "%s resource not found." % resource_id
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Validate properties
res_type = resource_obj.type
heat_api = heat.API()
try:
uncheck_type = ['OS::Neutron::Vip']
if res_type not in uncheck_type:
heat_res_type = heat_api.get_resource_type(context, res_type)
res_properties = heat_res_type.get('properties')
LOG.debug("Validate the properties to be updated.")
self._simple_validate_update_properties(properties,
res_properties)
except exception.PlanResourcesUpdateError:
raise
except Exception as e:
LOG.error(unicode(e))
raise exception.PlanResourcesUpdateError(message=unicode(e))
def _update_simple_fields(resource_id, properties):
for k, v in properties.items():
updated_res[resource_id].properties[k] = v
simple_handle_type = ['OS::Neutron::Vip']
# Update resource
if 'OS::Nova::Server' == res_type:
allowed_fields = ['user_data', 'metadata']
for key, value in properties.items():
if key in allowed_fields:
resource_obj.properties[key] = value
else:
msg = ("'%s' field of server is not "
"allowed to update." % key)
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
elif res_type in ('OS::Nova::KeyPair'):
public_key = properties.get('public_key', None)
if not new_res_id and not public_key:
msg = ("'id' or 'public_key' must be provided "
"when updating keypair resource.")
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
if new_res_id and new_res_id != resource_obj.id:
ir = instances.InstanceResource(context)
kp_res = ir.extract_keypairs([new_res_id])[0]
kp_res.name = resource_id
updated_res[resource_id] = kp_res
else:
resource_obj.id = None
# Update other fields.
_update_simple_fields(resource_id, properties)
elif 'OS::Neutron::SecurityGroup' == res_type:
rules = properties.get('rules', None)
if not new_res_id and not rules:
msg = ("'id' or 'rules' must be provided "
"when updating security group resource.")
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
if new_res_id and new_res_id != resource_obj.id:
nr = secgroup.SecGroup(context)
sec_res = nr.extract_secgroups([new_res_id])[0]
sec_res.name = resource_id
updated_res[resource_id] = sec_res
else:
resource_obj.id = None
# Update other fields.
_update_simple_fields(resource_id, properties)
elif 'OS::Neutron::FloatingIP' == res_type:
if not new_res_id:
msg = "'id' must be provided when " \
"updating floating ip resource."
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
if new_res_id != resource_obj.id:
floatingip = self.neutron_api.get_floatingip(context,
new_res_id)
if floatingip.get('port_id'):
msg = "FloatingIp <%s> is in use."
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Extracted floatingip resource.
self._resource_id_to_actual_id(updated_res)
nr = networks.NetworkResource(context,
collected_resources=updated_res)
floatingip_res = nr.extract_floatingips([new_res_id])[0]
floatingip_res.name = resource_id
# Reserve port_id
port_id = resource_obj.properties.get('port_id')
if port_id:
floatingip_res.properties['port_id'] = port_id
# Remove original floatingip resource
updated_res.pop(resource_obj.id, None)
self._actual_id_to_resource_id(updated_res)
else:
resource_obj.id = None
# Update other fields.
_update_simple_fields(resource_id, properties)
elif 'OS::Neutron::Port' == res_type:
self._update_port_resource(context, updated_res, resource)
elif 'OS::Neutron::Net' == res_type:
self._update_network_resource(context, updated_res, updated_dep,
resource)
elif 'OS::Neutron::Subnet' == res_type:
self._update_subnet_resource(context, updated_res, updated_dep,
resource, resources_list)
elif res_type in simple_handle_type:
_update_simple_fields(resource_id, properties)
elif 'OS::Cinder::Volume' == res_type:
org_volume_id = resource_obj.id
org_dependices = updated_dep.get(resource_id).dependencies
if new_res_id and new_res_id != org_volume_id:
self._resource_id_to_actual_id(updated_res)
vr = volumes.Volume(context, updated_res)
volume_res = vr.extract_volume(new_res_id)
volume_res.name = resource_id
volume_res.extra_properties['exist'] = 'true'
# openstackid:object
updated_res = vr.get_collected_resources()
updated_res.pop(org_volume_id, None)
self._actual_id_to_resource_id(updated_res)
plan.updated_resources = updated_res
plan.rebuild_dependencies()
new_updated_dep = copy.deepcopy(plan.updated_dependencies)
if org_dependices:
self._remove_org_depends(org_dependices,
new_updated_dep, updated_res)
if copy_data is not None:
resource_obj.extra_properties['copy_data'] = copy_data
# Update other fields.
_update_simple_fields(resource_id, properties)
elif 'OS::Cinder::VolumeType' == res_type:
org_volume_type_id = resource_obj.id
org_dependices = updated_dep.get(resource_id).dependencies
if new_res_id != org_volume_type_id:
self._resource_id_to_actual_id(updated_res)
vtr = volumes.VolumeType(context, updated_res)
vt_res = vtr.extract_volume_type(new_res_id)
vt_res.name = resource_id
updated_res = vtr.get_collected_resources()
updated_res.pop(org_volume_type_id, None)
self._actual_id_to_resource_id(updated_res)
plan.updated_resources = updated_res
plan.rebuild_dependencies()
new_updated_dep = copy.deepcopy(plan.updated_dependencies)
if org_dependices:
self._remove_org_depends(org_dependices,
new_updated_dep, updated_res)
else:
msg = "%s resource is unsupported to update." % res_type
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
def _remove_org_depends(self, org_dependices,
new_updated_dep, updated_res):
org_dep_also_exist = []
for dep in org_dependices:
for key, value in new_updated_dep.items():
if dep in value.dependencies:
org_dep_also_exist.append(dep)
break
delete_deps = [item for item in org_dependices if
item not in org_dep_also_exist]
for dep in delete_deps:
updated_res.pop(dep)
dependices = new_updated_dep.get(dep).dependencies
new_updated_dep.pop(dep)
if dependices:
self._remove_org_depends(dependices, new_updated_dep,
updated_res)
def _update_port_resource(self, context, updated_res, resource):
LOG.debug("Update port %s resource with %s.",
resource['resource_id'], resource)
properties = resource
resource_id = properties.pop('resource_id', None)
resource_obj = updated_res[resource_id]
properties.pop('resource_type', None)
# Only fixed_ips can be updated.
ips_to_update = properties.pop('fixed_ips')
if not ips_to_update:
msg = "Only 'fixed_ips' property is allowed be updated on a port."
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Validate the number of ips on a port
original_ips = resource_obj.properties.get('fixed_ips')
if len(original_ips) != len(ips_to_update):
msg = "The number of fixed ips must remain the same."
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
def _get_pools(subnet_id):
"""Get subnet allocation_pools by neutron api."""
try:
subnet = self.neutron_api.get_subnet(context, subnet_id)
return subnet.get('allocation_pools', [])
except Exception as e:
msg = "Subnet <%s> not found. %s" % (subnet_id, unicode(e))
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Validate whether ip address matches the subnet.
for item in ips_to_update:
ip_address = item.get('ip_address')
subnet_id = item.get('subnet_id')
LOG.debug("Check fixed ip: %s", item)
# subnet_id is required, ip_address is optional
if not subnet_id:
msg = "subnet_id must be provided when updating fixed_ips."
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# If ip_address is provided, validate it.
if ip_address:
LOG.debug("Validate ip address %s.", ip_address)
# Get subnet range from exist subnet resource.
allocation_pools = []
if isinstance(subnet_id, dict) and len(subnet_id) == 1:
# Only support 'get_param' and 'get_resource'
if subnet_id.get('get_param'):
sub_param_id = subnet_id['get_param']
if isinstance(sub_param_id, six.string_types):
subnet_id = resource_obj.\
parameters .\
get(sub_param_id, {}).get('default')
LOG.debug("Get subnet id <%s> "
"from parameter <%s>.", subnet_id,
sub_param_id)
if subnet_id:
allocation_pools = _get_pools(subnet_id)
else:
msg = "%s parameter not found." % sub_param_id
LOG.error(msg)
raise exception.\
PlanResourcesUpdateError(message=msg)
elif subnet_id.get('get_resource'):
sub_res_id = subnet_id['get_resource']
if isinstance(sub_res_id, six.string_types) \
and updated_res.get(sub_res_id):
allocation_pools = updated_res[sub_res_id].\
properties.get('allocation_pools')
else:
msg = "%s resource not found." % sub_res_id
LOG.error(msg)
raise exception.\
PlanResourcesUpdateError(message=msg)
elif isinstance(subnet_id, six.string_types):
if uuidutils.is_uuid_like(subnet_id):
allocation_pools = _get_pools(subnet_id)
else:
msg = "Subnet id must be uuid."
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
if not allocation_pools:
msg = "Can not found subnet allocation_pools information."
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Validate whether ip address in ip range.
ip_valid = False
for pool in allocation_pools:
start = pool.get('start')
end = pool.get('end')
if isinstance(start, six.string_types) \
and isinstance(end, six.string_types) \
and netaddr.IPAddress(ip_address) in \
netaddr.IPRange(start, end):
ip_valid = True
if not ip_valid:
msg = ("Ip address doesn't match allocation_pools %s."
% allocation_pools)
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Begin to update.
ip_index = ips_to_update.index(item)
original_ip_item = original_ips[ip_index]
original_subnet = original_ip_item.get('subnet_id')
# Update ip_address
if ip_address:
original_ips[ip_index]['ip_address'] = ip_address
# If subnets are the same, only update ip_address if provided.
if original_subnet == subnet_id:
pass
# If subnet_id is from other exist resource, replace directly.
elif isinstance(subnet_id, dict) and len(subnet_id) == 1 \
and subnet_id.get('get_resource'):
sub_res_id = subnet_id['get_resource']
if isinstance(sub_res_id, six.string_types) \
and updated_res.get(sub_res_id):
original_ips[ip_index]['subnet_id'] = subnet_id
LOG.debug("Update ip_address property %s.",
original_ips[ip_index])
else:
msg = "%s resource not found." % sub_res_id
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# If subnet_id is a uuid, get resource by neutron driver.
# If this subnet has been extracted, it won't be extracted again.
elif uuidutils.is_uuid_like(subnet_id):
# Replace the keys by actual_id
LOG.debug("Extract subnet <%s> resource.", subnet_id)
# Extracted subnet resource.
self._resource_id_to_actual_id(updated_res)
nr = networks.NetworkResource(context,
collected_resources=updated_res)
subnet_res = nr.extract_subnets([subnet_id])[0]
# Restore the keys
self._actual_id_to_resource_id(updated_res)
original_ips[ip_index]['subnet_id'] = {'get_resource':
subnet_res.name}
LOG.debug("Update ip_address property %s.",
original_ips[ip_index])
else:
msg = "subnet_id (%s) is invalid." % subnet_id
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# we need to create new port
resource_obj.id = None
# Update other fields.
for k, v in properties.items():
updated_res[resource_id].properties[k] = v
def _update_subnet_resource(self, context, updated_res, updated_dep,
resource, resources_list):
LOG.debug("Update subnet %s resource with %s.",
resource['resource_id'], resource)
properties = resource
new_res_id = properties.pop('id', None)
resource_id = properties.pop('resource_id', None)
properties.pop('resource_type', None)
resource_obj = updated_res[resource_id]
if new_res_id and not uuidutils.is_uuid_like(new_res_id):
msg = "Subnet id <%s> must be uuid." % new_res_id
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
if new_res_id:
subnet = self.neutron_api.get_subnet(context, new_res_id)
new_net_id = subnet['network_id']
org_net_res_id = resource_obj.properties \
.get('network_id', {}).get('get_resource')
org_net_res = updated_res.get(org_net_res_id)
if not org_net_res:
msg = "Network resource <%s> not found." % org_net_res_id
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Update network and all corresponding subnets
if org_net_res.id != new_net_id:
res_to_update = {'id': new_net_id,
'resource_id': org_net_res_id}
self._update_network_resource(context, updated_res,
updated_dep,
res_to_update, resource_id)
# Update currect subnet resource.
self._update_subnet_and_port(context, updated_res, updated_dep,
resource_id, new_res_id)
else:
self._update_org_subnet_info(context, updated_res,
updated_dep, resource_id,
resources_list)
# Update other fields.
for k, v in properties.items():
updated_res[resource_id].properties[k] = v
def _update_subnet_and_port(self, context, updated_res,
updated_dep, resource_id, subnet_id):
resource_obj = updated_res[resource_id]
org_subnet_id = resource_obj.id
if not uuidutils.is_uuid_like(subnet_id):
msg = "Subnet id <%s> must be uuid." % subnet_id
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
if org_subnet_id == subnet_id:
LOG.info("Subnet <%s> is the same as original subnet. "
"Skip updating.", subnet_id)
return
# Extracted subnet resource.
nr = networks.NetworkResource(context)
subnet_res = nr.extract_subnets([subnet_id])[0]
# Update subnet info
subnet_res.name = resource_id
updated_res[resource_id] = subnet_res
# Remove fixed ip on all ports corresponding to this subnet.
# add by liuling
# need to remove the port_id
for rid, dep in updated_dep.items():
if dep.type == "OS::Neutron::Port" and \
resource_id in dep.dependencies:
port_res = updated_res.get(rid)
if not port_res:
continue
port_res.id = None
fixed_ips = port_res.properties.get('fixed_ips')
if not fixed_ips:
continue
for fip in fixed_ips:
if fip.get('ip_address') and fip.get('subnet_id') == \
{'get_resource': resource_id}:
del fip['ip_address']
def _update_org_net_info(self, context, updated_res,
updated_dep, resource_id):
# set the related resourece id dependencied on net resource_id
net_update_resource = ["OS::Neutron::Subnet", "OS::Neutron::Port",
"OS::Neutron::FloatingIP",
"OS::Neutron::Router"]
sub_update_resource = ["OS::Neutron::RouterInterface",
"OS::Neutron::Port"]
for rid, dep in updated_dep.items():
if dep.type in net_update_resource\
and resource_id in dep.dependencies:
net_related_res = updated_res.get(rid)
net_related_res.id = None
for res_id, dep_object in updated_dep.items():
if dep_object.type in sub_update_resource\
and rid in dep_object.dependencies:
sub_related_res = updated_res.get(res_id)
sub_related_res.id = None
# set the net resourece id
net_res = updated_res.get(resource_id)
net_res.id = None
def _update_org_subnet_info(self, context, updated_res,
updated_dep, resource_id, resources_list):
res_dependencies_key = updated_dep.get(resource_id).dependencies
for key in res_dependencies_key:
res_obj = updated_res.get(key)
if res_obj.type == "OS::Neutron::Net":
self._update_org_net_info(context, updated_res,
updated_dep, key)
need_pop_seg = True
for res in resources_list:
if key == res.get('resource_id'):
need_pop_seg = False
break
if need_pop_seg:
if updated_res[key].properties.get('value_specs').\
get('provider:segmentation_id'):
updated_res[key].properties.get('value_specs').\
pop('provider:segmentation_id')
def _update_network_resource(self, context, updated_res, updated_dep,
resource, except_subnet=None):
LOG.debug("Update network %s resource with %s.",
resource['resource_id'], resource)
properties = resource
new_res_id = properties.pop('id', None)
resource_id = properties.pop('resource_id', None)
properties.pop('resource_type', None)
org_net = updated_res[resource_id]
org_net_id = org_net.id
if new_res_id and not uuidutils.is_uuid_like(new_res_id):
msg = "Network id <%s> must be uuid." % new_res_id
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
if new_res_id and new_res_id != org_net_id:
# Make sure the number of subnets larger than one.
net = self.neutron_api.get_network(context, new_res_id)
subnets = net.get('subnets', [])
if not subnets:
msg = "No subnets found in network %s." % new_res_id
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Validate whether network exists on a server.
self._validate_server_network_duplication(updated_res,
resource_id, new_res_id)
# Extracted network resource.
nr = networks.NetworkResource(context)
net_res = nr.extract_nets([new_res_id])[0]
# Update network resource.
net_res.name = resource_id
updated_res[resource_id] = net_res
# Update corresponding subnet resources.
for rid, dep in updated_dep.items():
if dep.type == "OS::Neutron::Subnet" and resource_id in \
dep.dependencies:
subnet_res = updated_res.get(rid)
if not subnet_res or except_subnet == subnet_res.name:
continue
# Randomly choose a subnet.
random_index = random.randint(0, len(subnets) - 1)
random_sub_id = subnets[random_index]
self._update_subnet_and_port(context, updated_res,
updated_dep, rid,
random_sub_id)
else:
# need to modify
LOG.info("Network <%s> is the same as original network. "
"updating the org_net info", org_net_id)
self._update_org_net_info(context, updated_res,
updated_dep, resource_id)
if properties.get('value_specs') and \
not properties.get('value_specs').\
get('provider:segmentation_id'):
if updated_res[resource_id].properties.\
get('value_specs').get('provider:segmentation_id'):
updated_res[resource_id].properties.\
get('value_specs').pop('provider:segmentation_id')
elif not properties.get('value_specs'):
if updated_res[resource_id].properties.\
get('value_specs').get('provider:segmentation_id'):
updated_res[resource_id].properties.\
get('value_specs').pop('provider:segmentation_id')
# Update other fields.
for k, v in properties.items():
updated_res[resource_id].properties[k] = v
def _validate_server_network_duplication(self, updated_res,
net_res_id_to_update, net_id):
LOG.debug("Validate whether network exists on a server.")
for res in updated_res.values():
if res.type != "OS::Nova::Server":
continue
networks = res.properties.get('networks')
if not networks:
continue
exist_nets = []
need_validate = False
def _get_param(res, param_id):
if isinstance(param_id, six.string_types):
return res.parameters.get(param_id, {}).get('default')
def _get_net_id(uuid_or_network):
net = uuid_or_network
if uuidutils.is_uuid_like(net):
exist_nets.append(net)
elif isinstance(net, dict) and len(net) == 1:
if net.get('get_param'):
net_param = _get_param(res, net['get_param'])
if net_param and uuidutils.is_uuid_like(net_param):
exist_nets.append(net_param)
elif net.get('get_resource'):
net_res_id = net['get_resource']
if net_res_id == net_res_id_to_update:
return True
elif isinstance(net_res_id, six.string_types) \
and updated_res.get(net_res_id):
exist_nets.append(updated_res[net_res_id].id)
for net in networks:
port_res_id = net.get('port', {}).get('get_resource')
net_uuid = net.get('uuid', {})
network = net.get('network', {})
if port_res_id:
port_res = updated_res.get(port_res_id)
if not port_res:
continue
network_id = port_res.properties.get('network_id')
if uuidutils.is_uuid_like(network_id):
exist_nets.append(network_id)
elif isinstance(network_id, dict) and \
len(network_id) == 1:
if network_id.get('get_param'):
net_param = _get_param(port_res,
network_id['get_param'])
if uuidutils.is_uuid_like(net_param):
exist_nets.append(net_param)
elif network_id.get('get_resource'):
net_res_id = network_id['get_resource']
if net_res_id == net_res_id_to_update:
need_validate = True
else:
net_res = updated_res.get(net_res_id)
if net_res:
exist_nets.append(net_res.id)
if net_uuid:
if _get_net_id(net_uuid) is True:
need_validate = True
if network:
if _get_net_id(network) is True:
need_validate = True
if need_validate and net_id in exist_nets:
msg = ("Duplicate networks <%s> found on server <%s>."
% (net_id, res.name))
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
def _simple_validate_update_properties(self, args, properties):
"""Simply validate properties to be updated."""
# If properties info not found, return.
if not isinstance(properties, dict) or len(properties) < 1:
return
if not isinstance(args, dict):
msg = "The type of update properties(%s) is incorrect." % args
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
type_map = {"integer": numbers.Number,
"number": numbers.Number,
"boolean": bool,
"string": six.string_types,
"list": list,
"map": dict
}
def _validate_type(value, expected_type):
if isinstance(value, expected_type):
return True
elif expected_type == bool:
return False
elif expected_type not in (list, dict) \
and isinstance(value, dict) and len(value) == 1 \
and (value.keys()[0] in ('get_resource', 'get_param',
'get_attr')):
return True
else:
return False
for key, value in args.items():
# Validate whether property exists.
if key in properties.keys():
pro = properties[key]
elif len(properties) == 1 and properties.keys()[0] == '*':
pro = properties.values()[0]
else:
msg = "Unknown property %s." % args
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Validate property type.
expected_type = pro.get('type')
if isinstance(expected_type, six.string_types):
expected_type = expected_type.lower()
if expected_type not in type_map.keys():
continue
expected_type = type_map.get(expected_type)
# Transform special type.
if expected_type == six.string_types:
if isinstance(value, numbers.Number):
args[key] = value = str(value)
elif not value:
args[key] = value = ''
# Validate type
if not _validate_type(value, expected_type):
msg = ("The type of property (%s: %s) is incorrect "
"(expect %s type)." % (key, value, expected_type))
LOG.error(msg)
raise exception.PlanResourcesUpdateError(message=msg)
# Validate children properties of dict type
if isinstance(value, dict) and pro.get('schema'):
self._simple_validate_update_properties(value, pro['schema'])
# Validate children properties of list type
if isinstance(value, list) and pro.get('schema') \
and len(pro['schema']) == 1 \
and pro['schema'].keys()[0] == "*":
child_schema = pro['schema'].values()[0]
child_type = child_schema.get('type')
child_type = type_map.get(child_type)
if child_type == dict and child_schema.get('schema'):
for v in value:
self._simple_validate_update_properties(
v,
child_schema['schema'])
elif child_type not in (list, dict):
for v in value:
if not _validate_type(v, child_type):
msg = "%s is not string type." % v
LOG.error(msg)
raise exception.\
PlanResourcesUpdateError(message=msg)
def _actual_id_to_resource_id(self, res_or_dep):
new_res = {}
if isinstance(res_or_dep, dict):
for v in res_or_dep.values():
if isinstance(v, resource.Resource):
new_res[v.name] = v
elif isinstance(v, resource.ResourceDependency):
new_res[v.name_in_template] = v
res_or_dep.clear()
res_or_dep.update(new_res)
return res_or_dep
def _resource_id_to_actual_id(self, res_or_dep):
new_res = {}
if isinstance(res_or_dep, dict):
for v in res_or_dep.values():
new_res[v.id] = v
res_or_dep.clear()
res_or_dep.update(new_res)
return res_or_dep
| |
import csv
import os
import time
from collections import OrderedDict
from mock import MagicMock
from delivery.models.project import RunfolderProject
from delivery.models.runfolder import Runfolder, RunfolderFile
from delivery.models.sample import SampleFile, Sample
from delivery.services.metadata_service import MetadataService
class MockIOLoop():
def __init__(self):
pass
def spawn_callback(self, f, **args):
f(**args)
class TestUtils:
DUMMY_CONFIG = {"monitored_directory": "/foo"}
class DummyConfig:
def __getitem__(self, key):
return TestUtils.DUMMY_CONFIG[key]
fake_directories = ["160930_ST-E00216_0111_BH37CWALXX",
"160930_ST-E00216_0112_BH37CWALXX"]
fake_projects = ["ABC_123", "DEF_456", "GHI_789"]
def mock_file_system_service(directories, projects, fastq_files=None):
mock_file_system_service_instance = MagicMock()
mock_file_system_service_instance.find_runfolder_directories.return_value = directories
mock_file_system_service_instance.find_project_directories.return_value = projects
mock_file_system_service_instance.list_files_recursively.return_value = fastq_files or []
return mock_file_system_service_instance
def mock_metadata_service(checksums=None):
mock_metadata_service_instance = MagicMock(spec=MetadataService)
mock_metadata_service_instance.parse_checksum_file.return_value = checksums or {}
return mock_metadata_service_instance
def _item_generator(prefix=None, suffix=None):
nxt = 1
while True:
yield "".join([prefix or "", str(nxt), suffix or ""])
nxt += 1
def sample_name_generator():
yield from _item_generator(prefix="MockSample_")
def sample_index_generator():
yield from _item_generator(prefix="S")
def lane_generator():
yield from _item_generator()
def report_type_generator():
report_types = ["multiqc", "seqreports", "sisyphus"]
while True:
for rt in report_types:
yield rt
def project_sample(project, sample_name, sample_index, lane_no, sample_id=None):
sample_files = []
if sample_id:
sample_dir = os.path.join(project.path, sample_id)
else:
sample_dir = project.path
for is_index in [False, True]:
for read_no in [1, 2]:
sample_path = os.path.join(
sample_dir,
"{}_{}_L00{}_{}{}_001.fastq.gz".format(
sample_name,
sample_index,
str(lane_no),
"I" if is_index else "R",
str(read_no)))
sample_files.append(
SampleFile(
sample_path=sample_path,
sample_name=sample_name,
sample_index=sample_index,
lane_no=int(lane_no),
read_no=int(read_no),
is_index=is_index,
checksum="checksum-for-{}".format(sample_path)))
return Sample(
name=sample_name,
project_name=project.name,
sample_id=sample_id,
sample_files=sample_files
)
def runfolder_project(
runfolder,
project_name="ABC_123",
sample_indexes=sample_index_generator(),
lane_numbers=lane_generator(),
project_root="Unaligned",
report_type=report_type_generator()):
project = RunfolderProject(
name=project_name,
path=os.path.join(runfolder.path, project_root, project_name),
runfolder_path=runfolder.path,
runfolder_name=runfolder.name
)
project.project_files = project_report_files(project, next(report_type))
sample_names = sample_name_generator()
# a straight-forward sample with files on one lane
lane_number = next(lane_numbers)
samples = [project_sample(project, next(sample_names), next(sample_indexes), lane_number)]
# a sample with files on two lanes
sample_name = next(sample_names)
sample_index = next(sample_indexes)
sample = project_sample(project, sample_name, sample_index, lane_number)
lane_number = next(lane_numbers)
t_sample = project_sample(project, sample_name, sample_index, lane_number)
sample.sample_files.extend(t_sample.sample_files)
samples.append(sample)
# a sample with two preps on two lanes and sample files in subdirectories
sample_name = next(sample_names)
t_samples = [
project_sample(
project,
sample_name=sample_name,
sample_index=si,
lane_no=l,
sample_id="{}-{}-{}".format(sample_name, si, l))
for si in [next(sample_indexes), next(sample_indexes)] for l in [next(lane_numbers), next(lane_numbers)]]
samples.extend(t_samples)
project.samples = samples
return project
def unorganised_runfolder(name="180124_A00181_0019_BH72M5DMXX", root_path="/foo"):
sample_indexes = sample_index_generator()
lane_numbers = lane_generator()
runfolder = Runfolder(name=name, path=os.path.join(root_path, name))
runfolder.projects = [
runfolder_project(
runfolder,
project_name=p,
sample_indexes=sample_indexes,
lane_numbers=lane_numbers) for p in fake_projects]
# add another project with missing files
project = runfolder_project(
runfolder,
project_name="JKL_123",
sample_indexes=sample_indexes,
lane_numbers=lane_numbers)
project.project_files = []
runfolder.projects.append(project)
checksums = {}
for project in runfolder.projects:
for sample in project.samples:
for sample_file in sample.sample_files:
checksums[os.path.relpath(
sample_file.file_path,
os.path.dirname(runfolder.path))] = sample_file.checksum
for project_file in project.project_files:
checksums[os.path.relpath(
project_file.file_path,
os.path.dirname(runfolder.path))] = project_file.checksum
runfolder.checksums = checksums
return runfolder
def samplesheet_data_for_runfolder(runfolder):
samplesheet_data_headers = [
"Lane",
"Sample_ID",
"Sample_Name",
"Sample_Plate",
"Sample_Well",
"index",
"Sample_Project",
"Description"
]
samplesheet_data = []
for project in runfolder.projects:
for sample in project.samples:
for sample_file in sample.sample_files:
if sample_file.read_no == 1 and not sample_file.is_index:
samplesheet_data.append(
OrderedDict(zip(
samplesheet_data_headers,
[
str(sample_file.lane_no),
sample.sample_id,
sample_file.sample_name,
str(),
str(),
"index_seq_{}".format(sample_file.sample_index),
project.name,
"PROJECT:{};SAMPLE:{};LANE:{};INDEX:{}".format(
project.name,
sample.name,
str(sample_file.lane_no),
sample_file.sample_index)])))
return samplesheet_data
def samplesheet_file_from_runfolder(runfolder):
header_stuff = """[Header],,,,,,,,
IEMFileVersion,4,,,,,,,
Experiment Name,Hiseq-2500-single-index,,,,,,,
Date,02/26/2019,,,,,,,
Workflow,Resequencing,,,,,,,
Application,Human Genome Resequencing,,,,,,,
Assay,TruSeq LT,,,,,,,
Description,,,,,,,,
Chemistry,Default,,,,,,,
,,,,,,,,
[Reads],,,,,,,,
50,,,,,,,,
50,,,,,,,,
,,,,,,,,
[Settings],,,,,,,,
FlagPCRDuplicates,1,,,,,,,
Adapter,,,,,,,,
AdapterRead2,,,,,,,,
,,,,,,,,
[Data],,,,,,,,
"""
samplesheet_data = samplesheet_data_for_runfolder(runfolder)
samplesheet_file = os.path.join(runfolder.path, "SampleSheet.csv")
with open(samplesheet_file, "w") as fh:
fh.write(header_stuff)
writer = csv.DictWriter(fh, fieldnames=samplesheet_data[0].keys())
writer.writeheader()
writer.writerows(samplesheet_data)
return samplesheet_file, samplesheet_data
def project_report_files(project, report_type):
if "multiqc" in report_type:
report_dir = project.path
report_files = [os.path.join(report_dir, "{}_multiqc_report.html".format(project.name)),
os.path.join(report_dir, "{}_multiqc_report_data.zip".format(project.name))]
elif "seqreports" in report_type:
report_dir = os.path.join(project.runfolder_path, "seqreports", "projects", project.name)
report_files = [os.path.join(report_dir, "{}_{}_multiqc_report.html".format(project.runfolder_name, project.name)),
os.path.join(report_dir, "{}_{}_multiqc_report_data.zip".format(project.runfolder_name, project.name))]
elif "sisyphus" in report_type:
report_dir = os.path.join(project.runfolder_path, "Summary", project.name)
report_files = list(map(lambda f: os.path.join(report_dir, "report.{}".format(f)), ["html", "xml", "xsl"]))
report_files.append(os.path.join(report_dir, "Plots", "file_in_plots.png"))
report_files.append(os.path.join(report_dir, "Plots", "subdir", "file_in_plots_subdir"))
return [
RunfolderFile(report_file, file_checksum="checksum-for-{}".format(report_file))
for report_file in report_files
]
_runfolder1 = Runfolder(name="160930_ST-E00216_0111_BH37CWALXX",
path="/foo/160930_ST-E00216_0111_BH37CWALXX")
_runfolder1.projects = [RunfolderProject(name="ABC_123",
path="/foo/160930_ST-E00216_0111_BH37CWALXX/Projects/ABC_123",
runfolder_path=_runfolder1.path,
runfolder_name="160930_ST-E00216_0111_BH37CWALXX"),
RunfolderProject(name="DEF_456",
path="/foo/160930_ST-E00216_0111_BH37CWALXX/Projects/DEF_456",
runfolder_path=_runfolder1.path,
runfolder_name="160930_ST-E00216_0111_BH37CWALXX"),
RunfolderProject(name="GHI_789",
path="/foo/160930_ST-E00216_0111_BH37CWALXX/Projects/GHI_789",
runfolder_path=_runfolder1.path,
runfolder_name="160930_ST-E00216_0111_BH37CWALXX")]
_runfolder2 = Runfolder(name="160930_ST-E00216_0112_BH37CWALXX",
path="/foo/160930_ST-E00216_0112_BH37CWALXX")
_runfolder2.projects = [RunfolderProject(name="ABC_123",
path="/foo/160930_ST-E00216_0112_BH37CWALXX/Projects/ABC_123",
runfolder_path=_runfolder2.path,
runfolder_name="160930_ST-E00216_0112_BH37CWALXX"),
RunfolderProject(name="DEF_456",
path="/foo/160930_ST-E00216_0112_BH37CWALXX/Projects/DEF_456",
runfolder_path=_runfolder2.path,
runfolder_name="160930_ST-E00216_0112_BH37CWALXX"),
RunfolderProject(name="GHI_789",
path="/foo/160930_ST-E00216_0112_BH37CWALXX/Projects/GHI_789",
runfolder_path=_runfolder1.path,
runfolder_name="160930_ST-E00216_0112_BH37CWALXX")]
FAKE_RUNFOLDERS = [_runfolder1, _runfolder2]
UNORGANISED_RUNFOLDER = unorganised_runfolder()
def assert_eventually_equals(self, timeout, f, expected, delay=0.1):
start_time = time.time()
while True:
try:
value = f()
self.assertEqual(value, expected)
break
except AssertionError:
if time.time() - start_time <= timeout:
time.sleep(delay)
continue
else:
raise
| |
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from entree.models import *
from entree.forms import RegisterForm, EditUserForm
from entree_project.settings import APP_VERSION, NUM_RESULTS
from datetime import timedelta
from yelp.client import Client
from yelp.oauth1_authenticator import Oauth1Authenticator
import requests
import simplejson
import html.parser
FLICKR_REST_ROOT_URL = 'https://api.flickr.com/services/rest/?method='
# Global site context
context_dict = {
'version': 'v' + APP_VERSION
}
def bad_request(request):
context_dict['status_code'] = 400
context_dict['status_name'] = 'Bad Request'
context_dict['status_description'] = "Sorry, your request could not be understood by the server."
response = render(request, 'error.html', context_dict)
response.status_code = 400
return response
def permission_denied(request):
context_dict['status_code'] = 403
context_dict['status_name'] = 'Permission Denied'
context_dict['status_description'] = "Sorry, you do not have permission to access this page."
response = render(request, 'error.html', context_dict)
response.status_code = 403
return response
def page_not_found(request):
context_dict['status_code'] = 404
context_dict['status_name'] = 'Page Not Found'
context_dict['status_description'] = "Sorry, the page you were trying to view does not exist."
response = render(request, 'error.html', context_dict)
response.status_code = 404
return response
def server_error(request):
context_dict['status_code'] = 500
context_dict['status_name'] = 'Server Error'
context_dict['status_description'] = "Sorry, an error occured while processing your request."
response = render(request, 'error.html', context_dict)
response.status_code = 500
return response
def login(request):
login_failed = False
new_login = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth_login(request, user)
login_failed = False
return redirect('/entree/', context_dict)
login_failed = True
new_login = False
else:
# user redirected from registration page
if 'registered' in request.GET:
new_login = False
context_dict['login_failed'] = login_failed
context_dict['new_login'] = new_login
return render(request, 'entree/login.html', context_dict)
@login_required
def logout(request):
auth_logout(request)
return redirect('/entree/login/', context_dict)
def register(request):
if request.method == 'POST':
register_form = RegisterForm(request.POST)
if register_form.is_valid():
data = register_form.cleaned_data
user = User(
username=data['username'],
first_name=data['first_name'],
last_name=data['last_name'],
email=data['email']
)
user.set_password(data['password'])
user.save()
profile = UserProfile(
user=user
)
profile.save()
context_dict['new_login'] = False
return redirect('/entree/login/?registered=true', context_dict)
else:
register_form = RegisterForm()
context_dict['register_form'] = register_form
return render(request, 'entree/register.html', context_dict)
def about(request):
return render(request, 'entree/about.html', context_dict)
@login_required
def profile(request):
user = request.user
if request.method == 'POST':
user_form = EditUserForm(request.POST)
if user_form.is_valid():
data = user_form.cleaned_data
user.first_name = data['first_name']
user.last_name = data['last_name']
user.email = data['email']
user.save()
return redirect('/entree/user/profile/', context_dict)
else:
if 'edit' in request.GET:
editing = True
user_form = EditUserForm(
initial={
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email
}
)
else:
editing = False
user_form = None
context_dict['editing'] = editing
context_dict['user_form'] = user_form
return render(request, 'entree/profile.html', context_dict)
@login_required
def index(request):
if 'invalid_search' in request.GET:
context_dict['search_error'] = True
else:
context_dict['search_error'] = False
return render(request, 'entree/index.html', context_dict)
@login_required
def posts(request):
if 'city' not in request.GET:
return redirect('/entree/' + '?invalid_search=true', context_dict)
city = request.GET['city'].lower()
if city == '':
return redirect('/entree/' + '?invalid_search=true', context_dict)
if 'page' in request.GET:
page = int(request.GET['page'])
if not page >= 1:
page = 1
else:
page = 1 # start at the first page
context_dict['city'] = city.title()
context_dict['next_page'] = page + 1
posts = __search_flickr_posts(city, page)
for post in posts:
if not FlickrPost.objects.filter(id=post['id']).count():
# post not already in database
flickrpost = FlickrPost(
id=post['id'],
secret=post['secret'],
farm=post['farm'],
server=post['server'],
owner=post['owner'],
title=post['title'],
image_url=post['url'],
search_term=city,
page=page
)
flickrpost.save()
context_dict['post_list'] = FlickrPost.objects.filter(search_term=city).filter(page=page).order_by('-date_fetched')
return render(request, 'entree/posts.html', context_dict)
@login_required
def post_detail(request, photo_id):
try:
# get Flickr Post details
post = FlickrPost.objects.get(pk=photo_id)
context_dict['valid_post'] = True
h = html.parser.HTMLParser()
if not post.latitude or not post.longitude:
photo = __get_flickr_post_info(photo_id)
post.latitude = float(photo['location']['latitude'])
post.longitude = float(photo['location']['longitude'])
post.description = h.unescape(photo['description']['_content'])
post.save()
context_dict['post'] = post
# get Yelp reviews for that location
yelp = YelpClient.objects.get(pk=1)
auth = Oauth1Authenticator(
consumer_key=yelp.consumer_key,
consumer_secret=yelp.consumer_secret,
token=yelp.token,
token_secret=yelp.token_secret
)
client = Client(auth)
# get location that most closely matches the geolocation
best_business = None
alt_businesses = list()
yelp_api_error = False
try:
response = client.search_by_coordinates(post.latitude, post.longitude)
if response.businesses:
best_business = response.businesses[0]
if len(response.businesses) > 5:
alt_businesses = response.businesses[1:6]
elif len(response.businesses) > 1:
alt_businesses = response.businesses[1:]
except:
yelp_api_error = True
context_dict['yelp_api_error'] = yelp_api_error
context_dict['best_business'] = best_business
context_dict['alt_businesses'] = alt_businesses
except FlickrPost.DoesNotExist:
context_dict['valid_post'] = False
return render(request, 'entree/post_detail.html', context_dict)
def __get_flickr_post_info(photo_id):
method = 'flickr.photos.getInfo'
params = {
'photo_id': photo_id
}
url = __build_flickr_rest_url(method, params)
response = simplejson.loads(__flickr_json_fix(requests.get(url).text))
return response['photo']
def __search_flickr_posts(city, page):
method = 'flickr.photos.search'
params = {
'tags': city + ',food',
'tag_mode': 'all', # only return photos that include all of the tags
'privacy_filter': 1, # 1 = public
'has_geo': 1, # only return images that have geolocation information
'per_page': NUM_RESULTS,
'page': page
}
url = __build_flickr_rest_url(method, params)
response = simplejson.loads(__flickr_json_fix(requests.get(url).text))
photos = response['photos']['photo']
image_url = 'https://farm{0}.staticflickr.com/{1}/{2}_{3}.jpg'
post_list = list()
for i in range(len(photos)):
post = photos[i]
post['url'] = image_url.format(post['farm'], post['server'], post['id'], post['secret'])
post_list.append(post)
return post_list
def __build_flickr_rest_url(method, params):
flickr = FlickrClient.objects.get(pk=1)
url = FLICKR_REST_ROOT_URL
url += method
params['api_key'] = flickr.api_key
params['format'] = 'json'
for key in params.keys():
url += '&' + str(key) + '=' + str(params[key])
return url
def __flickr_json_fix(json_string):
# Removes the "jsonFlickrApi( ... )" wrapper around the response's JSON
return json_string.replace('jsonFlickrApi(', '').replace(')', '')
| |
# Copyright 2011 OpenStack Foundation
# aLL Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils import timeutils
import webob
from manila.api.v1 import share_types as types
from manila.api.views import types as views_types
from manila.common import constants
from manila import exception
from manila.share import share_types
from manila import test
from manila.tests.api import fakes
def stub_share_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5",
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: "true",
}
return dict(
id=id,
name='share_type_%s' % str(id),
extra_specs=specs,
required_extra_specs={
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: "true",
}
)
def return_share_types_get_all_types(context, search_opts=None):
return dict(
share_type_1=stub_share_type(1),
share_type_2=stub_share_type(2),
share_type_3=stub_share_type(3)
)
def return_empty_share_types_get_all_types(context, search_opts=None):
return {}
def return_share_types_get_share_type(context, id=1):
if id == "777":
raise exception.ShareTypeNotFound(share_type_id=id)
return stub_share_type(int(id))
def return_share_types_get_by_name(context, name):
if name == "777":
raise exception.ShareTypeNotFoundByName(share_type_name=name)
return stub_share_type(int(name.split("_")[2]))
@ddt.ddt
class ShareTypesApiTest(test.TestCase):
def setUp(self):
super(ShareTypesApiTest, self).setUp()
self.controller = types.ShareTypesController()
def test_share_types_index(self):
self.mock_object(share_types, 'get_all_types',
return_share_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['share_types']))
expected_names = ['share_type_1', 'share_type_2', 'share_type_3']
actual_names = map(lambda e: e['name'], res_dict['share_types'])
self.assertEqual(set(actual_names), set(expected_names))
for entry in res_dict['share_types']:
self.assertEqual('value1', entry['extra_specs']['key1'])
self.assertTrue('required_extra_specs' in entry)
required_extra_spec = entry['required_extra_specs'].get(
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS, '')
self.assertEqual('true', required_extra_spec)
def test_share_types_index_no_data(self):
self.mock_object(share_types, 'get_all_types',
return_empty_share_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['share_types']))
def test_share_types_show(self):
self.mock_object(share_types, 'get_share_type',
return_share_types_get_share_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(2, len(res_dict))
self.assertEqual('1', res_dict['share_type']['id'])
self.assertEqual('share_type_1', res_dict['share_type']['name'])
def test_share_types_show_not_found(self):
self.mock_object(share_types, 'get_share_type',
return_share_types_get_share_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, '777')
def test_share_types_default(self):
self.mock_object(share_types, 'get_default_share_type',
return_share_types_get_share_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
res_dict = self.controller.default(req)
self.assertEqual(2, len(res_dict))
self.assertEqual('1', res_dict['share_type']['id'])
self.assertEqual('share_type_1', res_dict['share_type']['name'])
def test_share_types_default_not_found(self):
self.mock_object(share_types, 'get_default_share_type',
mock.Mock(side_effect=exception.ShareTypeNotFound(
share_type_id="fake")))
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.default, req)
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_share_type = dict(
name='new_type',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
required_extra_specs={},
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_share_type)
self.assertIn('share_type', output)
expected_share_type = dict(
name='new_type',
extra_specs={},
required_extra_specs={},
id=42,
)
self.assertDictMatch(output['share_type'], expected_share_type)
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_share_types = []
for i in range(0, 10):
raw_share_types.append(
dict(
name='new_type',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
required_extra_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.index(request, raw_share_types)
self.assertIn('share_types', output)
for i in range(0, 10):
expected_share_type = dict(
name='new_type',
extra_specs={},
required_extra_specs={},
id=42 + i
)
self.assertDictMatch(output['share_types'][i],
expected_share_type)
@ddt.data(None, True, 'true', 'false', 'all')
def test_parse_is_public_valid(self, value):
result = self.controller._parse_is_public(value)
self.assertTrue(result in (True, False, None))
def test_parse_is_public_invalid(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._parse_is_public,
'fakefakefake')
| |
#----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
The GPO Reference Aggregate Manager, showing how to implement
the GENI AM API. This AggregateManager has only fake resources.
Invoked from gcf-am.py
The GENI AM API is defined in the AggregateManager class.
"""
from __future__ import absolute_import
import base64
import datetime
import dateutil.parser
import logging
import os
import xml.dom.minidom as minidom
import xmlrpclib
import zlib
from .SecureXMLRPCServer import SecureXMLRPCServer
from .util.cred_util import CredentialVerifier
from .util.tz_util import tzd
from .util.urn_util import publicid_to_urn
# See sfa/trust/rights.py
# These are names of operations
# from the rights.py privilege_table
# Credentials may list privileges that
# map to these operations, giving the caller permission
# to perform the functions
RENEWSLIVERPRIV = 'renewsliver'
CREATESLIVERPRIV = 'createsliver'
DELETESLIVERPRIV = 'deleteslice'
SLIVERSTATUSPRIV = 'getsliceresources'
SHUTDOWNSLIVERPRIV = 'shutdown'
# Publicid format resource namespace. EG Resource URNs
# will be <namespace>:resource:<resourcetype>_<resourceid>
# This is something like the name of your AM
# See gen-certs.CERT_AUTHORITY
RESOURCE_NAMESPACE = 'geni//gpo//gcf'
REFAM_MAXLEASE_DAYS = 365
class AggregateManager(object):
"""The public API for a GENI Aggregate Manager. This class provides the
XMLRPC interface and invokes a delegate for all the operations.
"""
def __init__(self, delegate):
self._delegate = delegate
def GetVersion(self):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
return self._delegate.GetVersion()
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
return self._delegate.ListResources(credentials, options)
def CreateSliver(self, slice_urn, credentials, rspec, users):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
return self._delegate.CreateSliver(slice_urn, credentials, rspec, users)
def DeleteSliver(self, slice_urn, credentials):
"""Delete the given sliver. Return true on success."""
return self._delegate.DeleteSliver(slice_urn, credentials)
def SliverStatus(self, slice_urn, credentials):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.'''
return self._delegate.SliverStatus(slice_urn, credentials)
def RenewSliver(self, slice_urn, credentials, expiration_time):
"""Extend the life of the given sliver until the given
expiration time. Return False on error."""
return self._delegate.RenewSliver(slice_urn, credentials,
expiration_time)
def Shutdown(self, slice_urn, credentials):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
return self._delegate.Shutdown(slice_urn, credentials)
class PrintingAggregateManager(object):
"""A dummy AM that prints the called methods."""
def GetVersion(self):
print 'GetVersion()'
return 1
def ListResources(self, credentials, options):
compressed = False
if options and 'geni_compressed' in options:
compressed = options['geni_compressed']
print 'ListResources(compressed=%r)' % (compressed)
# return an empty rspec
result = '<rspec type="GCF"/>'
if compressed:
result = xmlrpclib.Binary(zlib.compress(result))
return result
def CreateSliver(self, slice_urn, credentials, rspec, users):
print 'CreateSliver(%r)' % (slice_urn)
return '<rspec type="GCF"/>'
def DeleteSliver(self, slice_urn, credentials):
print 'DeleteSliver(%r)' % (slice_urn)
return False
def SliverStatus(self, slice_urn, credentials):
print 'SliverStatus(%r)' % (slice_urn)
raise Exception('No such slice.')
def RenewSliver(self, slice_urn, credentials, expiration_time):
print 'SliverStatus(%r, %r)' % (slice_urn, expiration_time)
return False
def Shutdown(self, slice_urn, credentials):
print 'Shutdown(%r)' % (slice_urn)
return False
class AggregateManagerServer(object):
"""An XMLRPC Aggregate Manager Server. Delegates calls to given delegate,
or the default printing AM."""
def __init__(self, addr, delegate=None, keyfile=None, certfile=None,
ca_certs=None, base_name=None):
# ca_certs arg here must be a file of concatenated certs
if ca_certs is None:
raise Exception('Missing CA Certs')
elif not os.path.isfile(os.path.expanduser(ca_certs)):
raise Exception('CA Certs must be an existing file of accepted root certs: %s' % ca_certs)
# FIXME: set logRequests=true if --debug
self._server = SecureXMLRPCServer(addr, keyfile=keyfile,
certfile=certfile, ca_certs=ca_certs)
if delegate is None:
delegate = PrintingAggregateManager()
self._server.register_instance(AggregateManager(delegate))
# Set the server on the delegate so it can access the
# client certificate.
delegate._server = self._server
if not base_name is None:
global RESOURCE_NAMESPACE
RESOURCE_NAMESPACE = base_name
def serve_forever(self):
self._server.serve_forever()
def register_instance(self, instance):
# Pass the AM instance to the generic XMLRPC server,
# which lets it know what XMLRPC methods to expose
self._server.register_instance(instance)
class Resource(object):
"""A Resource has an id, a type, and a boolean indicating availability."""
STATUS_CONFIGURING = 'configuring'
STATUS_READY = 'ready'
STATUS_FAILED = 'failed'
STATUS_UNKNOWN = 'unknown'
STATUS_SHUTDOWN = 'shutdown'
def __str__(self):
return ("ID: %d, Type: %s, Available: %s, Status: %s" %
(self._id, self._type, self.available, self.status))
def __init__(self, id, type):
self._id = id
self._type = type
self.available = True
self.status = Resource.STATUS_UNKNOWN
def urn(self):
# User in SliverStatus
publicid = 'IDN %s//resource//%s_%s' % (RESOURCE_NAMESPACE, self._type, str(self._id))
return publicid_to_urn(publicid)
def toxml(self):
template = ('<resource><urn>%s</urn><type>%s</type><id>%s</id>'
+ '<available>%r</available></resource>')
return template % (self.urn(), self._type, self._id, self.available)
def __eq__(self, other):
return self._id == other._id
def __neq__(self, other):
return self._id != other._id
@classmethod
def fromdom(cls, element):
"""Create a Resource instance from a DOM representation."""
type = element.getElementsByTagName('type')[0].firstChild.data
id = int(element.getElementsByTagName('id')[0].firstChild.data)
return Resource(id, type)
class Sliver(object):
"""A sliver has a URN, a list of resources, and an expiration time in UTC."""
def __init__(self, urn, expiration=datetime.datetime.utcnow()):
self.urn = urn.replace("+slice+", "+sliver+")
self.resources = list()
self.expiration = expiration
def status(self):
"""Determine the status of the sliver by examining the status
of each resource in the sliver.
"""
# If any resource is 'shutdown', the sliver is 'shutdown'
# Else if any resource is 'failed', the sliver is 'failed'
# Else if any resource is 'configuring', the sliver is 'configuring'
# Else if all resources are 'ready', the sliver is 'ready'
# Else the sliver is 'unknown'
rstat = [res.status for res in self.resources]
if Resource.STATUS_SHUTDOWN in rstat:
return Resource.STATUS_SHUTDOWN
elif Resource.STATUS_FAILED in rstat:
return Resource.STATUS_FAILED
elif Resource.STATUS_CONFIGURING in rstat:
return Resource.STATUS_CONFIGURING
elif rstat == [Resource.STATUS_READY for res in self.resources]:
# All resources report status of ready
return Resource.STATUS_READY
else:
return Resource.STATUS_UNKNOWN
class ReferenceAggregateManager(object):
'''A reference Aggregate Manager that manages fake resources.'''
# root_cert is a single cert or dir of multiple certs
# that are trusted to sign credentials
def __init__(self, root_cert):
self._slivers = dict()
self._resources = [Resource(x, 'Nothing') for x in range(10)]
self._cred_verifier = CredentialVerifier(root_cert)
self.max_lease = datetime.timedelta(days=REFAM_MAXLEASE_DAYS)
self.logger = logging.getLogger('gcf-am.reference')
def GetVersion(self):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
self.logger.info("Called GetVersion")
# FIXME: Fill in correct values for others
# url
# urn
# hostname
# code_tag
# hrn
default_ad = dict(type="GCF", version="0.1")
# FIXME: Those schema/namespace values are bogus. But the spec also says they are optional.
gcf_req = dict(type="GCF",
version="0.1",
schema="http://www.geni.net/resources/rspec/0.1/gcf-request.xsd",
namespace="http://www.geni.net/resources/rspec/0.1",
extensions=[])
gcf_ad = dict(type="GCF",
version="0.1",
schema="http://www.geni.net/resources/rspec/0.1/gcf-ad.xsd",
namespace="http://www.geni.net/resources/rspec/0.1",
extensions=[])
pgv2_req = dict(type="ProtoGENI",
version="2",
schema="http://www.protogeni.net/resources/rspec/2/request.xsd",
namespace="http://www.protogeni.net/resources/rspec/2",
extensions=[])
pgv2_ad = dict(type="ProtoGENI",
version="2",
schema="http://www.protogeni.net/resources/rspec/2/ad.xsd",
namespace="http://www.protogeni.net/resources/rspec/2",
extensions=[])
request_versions = [gcf_req, pgv2_req]
ad_versions = [gcf_ad, pgv2_ad]
versions = dict(default_ad_rspec=default_ad,
geni_api=1,
request_rspec_versions=request_versions,
ad_rspec_versions=ad_versions)
return versions
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
self.logger.info('ListResources(%r)' % (options))
slice_urn = None
if options and 'geni_slice_urn' in options:
slice_urn = options['geni_slice_urn']
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
# could require list or listnodes?
privileges = ()
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
try:
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if not options:
options = dict()
# Look to see what RSpec version the client requested
if 'rspec_version' in options:
# we only have one, so nothing to do here
# But an AM with multiple formats supported
# would use this to decide how to format the return.
# Can also error-check that the input value is supported.
rspec_type = options['rspec_version']['type']
if isinstance(rspec_type, str):
rspec_type = rspec_type.lower().strip()
rspec_version = options['rspec_version']['version']
if rspec_type != 'GCF':
self.logger.warn("Returning GCF rspec even though request said %s", rspec_type)
self.logger.info("ListResources requested rspec %s (%s)", rspec_type, rspec_version)
if 'geni_slice_urn' in options:
slice_urn = options['geni_slice_urn']
if slice_urn in self._slivers:
sliver = self._slivers[slice_urn]
result = ('<rspec type="GCF">'
+ ''.join([x.toxml() for x in sliver.resources])
+ '</rspec>')
else:
# return an empty rspec
result = '<rspec type="GCF"/>'
elif 'geni_available' in options and options['geni_available']:
# only include available items
result = ('<rspec type="GCF">' + ''.join([x.toxml() for x in self._resources])
+ '</rspec>')
# To make this AM return a fixed RSpec do:
# rspecfile = open('/tmp/sample-of-ad-rspec.xml')
# result = ''
# for line in rspecfile:
# result += line
# rspecfile.close()
else:
all_resources = list()
all_resources.extend(self._resources)
for sliver in self._slivers:
all_resources.extend(self._slivers[sliver].resources)
result = ('<rspec type="GCF">' + ''.join([x.toxml() for x in all_resources])
+ '</rspec>')
# self.logger.debug('Returning resource list %s', result)
# To make this AM return a fixed RSpec do:
# rspecfile = open('/tmp/sample-of-ad-rspec.xml')
# result = ''
# for line in rspecfile:
# result += line
# rspecfile.close()
# Optionally compress the result
if 'geni_compressed' in options and options['geni_compressed']:
try:
result = base64.b64encode(zlib.compress(result))
except Exception, exc:
import traceback
self.logger.error("Error compressing and encoding resource list: %s", traceback.format_exc())
raise Exception("Server error compressing resource list", exc)
return result
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def CreateSliver(self, slice_urn, credentials, rspec, users):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
self.logger.info('CreateSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (CREATESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
try:
creds = self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slivers:
self.logger.error('Sliver %s already exists.' % slice_urn)
raise Exception('Sliver %s already exists.' % slice_urn)
rspec_dom = None
try:
rspec_dom = minidom.parseString(rspec)
except Exception, exc:
self.logger.error("Cant create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
raise Exception("Cant create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
# Look at the version of the input request RSpec
# Make sure it is supported
# Then make sure that you return an RSpec in the same format
# EG if both V1 and V2 are supported, and the user gives V2 request,
# then you must return a V2 request and not V1
resources = list()
for elem in rspec_dom.documentElement.getElementsByTagName('resource'):
resource = None
try:
resource = Resource.fromdom(elem)
except Exception, exc:
import traceback
self.logger.warning("Failed to parse resource from RSpec dom: %s", traceback.format_exc())
raise Exception("Cant create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
if resource not in self._resources:
self.logger.info("Requested resource %d not available" % resource._id)
raise Exception('Resource %d not available' % resource._id)
resources.append(resource)
# determine max expiration time from credentials
# do not create a sliver that will outlive the slice!
expiration = datetime.datetime.utcnow() + self.max_lease
for cred in creds:
credexp = self._naiveUTC(cred.expiration)
if credexp < expiration:
expiration = credexp
sliver = Sliver(slice_urn, expiration)
# remove resources from available list
for resource in resources:
sliver.resources.append(resource)
self._resources.remove(resource)
resource.available = False
resource.status = Resource.STATUS_READY
self._slivers[slice_urn] = sliver
self.logger.info("Created new sliver for slice %s" % slice_urn)
return ('<rspec type="GCF">' + ''.join([x.toxml() for x in sliver.resources])
+ '</rspec>')
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def DeleteSliver(self, slice_urn, credentials):
'''Stop and completely delete the named sliver, and return True.'''
self.logger.info('DeleteSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (DELETESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
try:
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slivers:
sliver = self._slivers[slice_urn]
if sliver.status() == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not deleted because it is shutdown",
slice_urn)
return False
# return the resources to the pool
self._resources.extend(sliver.resources)
for resource in sliver.resources:
resource.available = True
resource.status = Resource.STATUS_UNKNOWN
del self._slivers[slice_urn]
self.logger.info("Sliver %r deleted" % slice_urn)
return True
else:
self._no_such_slice(slice_urn)
def SliverStatus(self, slice_urn, credentials):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.
Return a dict of sliver urn, status, and a list of dicts resource
statuses.'''
# Loop over the resources in a sliver gathering status.
self.logger.info('SliverStatus(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (SLIVERSTATUSPRIV,)
try:
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
if slice_urn in self._slivers:
sliver = self._slivers[slice_urn]
# Now calculate the status of the sliver
res_status = list()
for res in sliver.resources:
# Gather the status of all the resources
# in the sliver. This could be actually
# communicating with the resources, or simply
# reporting the state of initialized, started, stopped, ...
res_status.append(dict(geni_urn=res.urn(),
geni_status=res.status,
geni_error=''))
self.logger.info("Calculated and returning sliver %r status" % slice_urn)
return dict(geni_urn=sliver.urn,
geni_status=sliver.status(),
geni_resources=res_status)
else:
self._no_such_slice(slice_urn)
def RenewSliver(self, slice_urn, credentials, expiration_time):
'''Renew the local sliver that is part of the named Slice
until the given expiration time (in UTC with a TZ per RFC3339).
Requires at least one credential that is valid until then.
Return False on any error, True on success.'''
self.logger.info('RenewSliver(%r, %r)' % (slice_urn, expiration_time))
privileges = (RENEWSLIVERPRIV,)
try:
creds = self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# All the credentials we just got are valid
if slice_urn in self._slivers:
# If any credential will still be valid at the newly
# requested time, then we can do this.
sliver = self._slivers.get(slice_urn)
if sliver.status() == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not renewed because it is shutdown",
slice_urn)
return False
requested = dateutil.parser.parse(str(expiration_time), tzinfos=tzd)
# Per the AM API, the input time should be TZ-aware
# But since the slice cred may not (per ISO8601), convert
# it to naiveUTC for comparison
requested = self._naiveUTC(requested)
lastexp = 0
for cred in creds:
credexp = self._naiveUTC(cred.expiration)
lastexp = credexp
if credexp >= requested:
sliver.expiration = requested
self.logger.info("Sliver %r now expires on %r", slice_urn, expiration_time)
return True
else:
self.logger.debug("Valid cred %r expires at %r before %r", cred, credexp, requested)
# Fell through then no credential expires at or after
# newly requested expiration time
self.logger.info("Can't renew sliver %r until %r because none of %d credential(s) valid until then (last expires at %r)", slice_urn, expiration_time, len(creds), str(lastexp))
# FIXME: raise an exception so the client knows what
# really went wrong?
return False
else:
self._no_such_slice(slice_urn)
def Shutdown(self, slice_urn, credentials):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
self.logger.info('Shutdown(%r)' % (slice_urn))
privileges = (SHUTDOWNSLIVERPRIV,)
try:
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
if slice_urn in self._slivers:
sliver = self._slivers[slice_urn]
for resource in sliver.resources:
resource.status = Resource.STATUS_SHUTDOWN
self.logger.info("Sliver %r shut down" % slice_urn)
return True
else:
self.logger.info("Shutdown: No such slice: %s.", slice_urn)
self._no_such_slice(slice_urn)
def _no_such_slice(self, slice_urn):
"""Raise a no such slice exception."""
fault_code = 'No such slice.'
fault_string = 'The slice named by %s does not exist' % (slice_urn)
self.logger.warning(fault_string)
raise xmlrpclib.Fault(fault_code, fault_string)
def _naiveUTC(self, dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
| |
from __future__ import absolute_import
from __future__ import unicode_literals
from urlparse import urlparse
from uuid import uuid4
from webfriend import exceptions
from webfriend import rpc, utils
from webfriend.scripting.commands.base import CommandProxy
from webfriend.scripting.execute import execute_script
from webfriend.scripting.environment import Environment
from webfriend.scripting.scope import Scope
from webfriend.scripting.parser.exceptions import UserError
from webfriend.utils import autotype
from webfriend.utils.docs import document_commands
import json
import logging
import os
import re
import time
import urlnorm
class CoreProxy(CommandProxy):
"""
These represent very common tasks that one is likely to perform in a browser, such as
navigating to URLs, filling in form fields, and performing input with the mouse and keyboard.
"""
default_referrer_prefix = 'https://github.com/ghetzel/webfriend'
@classmethod
def qualify(cls, name):
return name
def configure(
self,
events=None,
demo=None,
user_agent=None,
extra_headers=None,
cache=None,
console=None,
referrer_prefix=None
):
"""
Configures various features of the Remote Debugging protocol and provides environment
setup.
#### Arguments
- **events** (`list`, optional):
A list of strings specifying what kinds of events Chrome should send to this
client. Valid values are: `console`, `dom`, `network`, `page`.
- **demo** (`dict`, optional):
A section describing various runtime options useful for demonstrations and
walkthroughs.
- **delay** (`int`, optional):
If specified, scripts will sleep for this amount of time (in milliseconds) between
each command that is processed. This is useful for slowing down the visual
effects of commands to a rate that is easier to see.
- **user_agent** (`str`, optional):
If specified, this will be the User-Agent header value that is sent with all HTTP(S)
requests initiated from here on.
- **extra_headers** (`dict`, optional):
If specified, these headers will be included in all HTTP(S) requests initiated from
here on. An empty dict will clear previously set headers.
- **cache** (`bool`, optional):
Whether caching is enabled or not for this session.
- **console** (`bool`, optional):
Whether console messages emitted from pages are logged to standard error.
- **referrer_prefix** (`str`, optional):
The domain portion of the "Referer" header to send.
"""
if events and hasattr(events, 'values') and isinstance(events.values, list):
for domain in events.values:
domain = str(domain).lower()
if hasattr(self.tab, domain):
r = getattr(self.tab, domain)
if isinstance(r, rpc.Base):
logging.info('Enabling events for domain "{}"'.format(domain))
r.enable()
if isinstance(demo, dict):
if 'delay' in demo:
self.environment.set_execution_option(
'demo.post_command_delay',
float(demo['delay'])
)
if isinstance(user_agent, basestring):
self.tab.network.set_user_agent(user_agent)
if isinstance(extra_headers, dict):
self.tab.network.set_headers(extra_headers)
if cache is True:
self.tab.network.enable_cache()
elif cache is False:
self.tab.network.disable_cache()
if console is True:
self.tab.enable_console_messages()
else:
self.tab.disable_console_messages()
if referrer_prefix:
self._referrer_prefix = referrer_prefix
else:
self._referrer_prefix = self.default_referrer_prefix
def go(
self,
uri,
referrer='random',
wait_for_load=True,
timeout=30000,
clear_requests=True,
continue_on_error=False,
continue_on_timeout=True,
load_event_name='Page.loadEventFired'
):
"""
Nagivate to a URL.
#### Arguments
- **referrer** (`str`, optional):
If a URL is specified, it will be used as the HTTP Referer [sic] header field when
going to the given page. If the URL of the currently-loaded page and the referrer
are the same, the page will no change.
For this reason, you may specify the special value 'random', which will generate a
URL with a randomly generated path component to ensure that it is always different
from the current page. Specifying None will omit the field from the request.
- **wait_for_load** (`bool`):
Whether to block until the page has finished loading.
- **timeout** (`int`):
The amount of time, in milliseconds, to wait for the the to load.
- **clear_requests** (`bool`, optional):
Whether the resources stack that is queried in [page::resources](#pageresources) and
[page::resource](#pageresource) is cleared before navigating. Set this to _false_ to
preserve the ability to retrieve data that was loaded on previous pages.
- **continue_on_error** (`bool`, optional):
Whether to continue execution if an error is encountered during page load (e.g.: HTTP
4xx/5xx, SSL, TCP connection errors).
- **continue_on_timeout** (`bool`, optional):
Whether to continue execution if **load_event_name** is not seen before **timeout**
elapses.
- **load_event_name** (`str`, optional):
The RPC event to wait for before proceeding to the next command.
#### Returns
The URL that was loaded (`str`)
"""
if referrer is 'random':
referrer = '{}/{}'.format(
self._referrer_prefix.rstrip('/'),
uuid4()
)
if clear_requests:
# since we've explicitly navigating, clear the network requests
self.tab.dom.clear_requests()
uri_p = urlparse(uri)
if not len(uri_p.scheme):
uri = 'https://{}'.format(uri)
elif uri_p.scheme != 'file':
uri = urlnorm.norm(uri)
reply = self.tab.page.navigate(uri, referrer=referrer)
if wait_for_load and load_event_name:
try:
self.tab.wait_for(load_event_name, timeout=timeout)
except exceptions.TimeoutError:
if continue_on_timeout:
logging.error('Timed out waiting for {} event after {}ms.'.format(
load_event_name,
timeout
))
else:
raise
except exceptions.WebfriendError as e:
if continue_on_error:
logging.error('Got exception while navigating, but proceeding: {}'.format(e))
else:
raise
return reply
def reload(self):
self.tab.page.reload()
def stop(self):
self.tab.page.stop()
def wait(self, milliseconds):
"""
Pauses execution of the current script for the given number of milliseconds.
#### Arguments
- **milliseconds** (`int`):
The number of milliseconds to sleep for; can be fractional.
#### Returns
The number of milliseconds.
"""
time.sleep(milliseconds / 1e3)
return milliseconds
def resize(
self,
width=0,
height=0,
scale=0,
mobile=False,
fit_window=False,
orientation=None,
angle=0
):
"""
Resizes the active viewport of the current page using the Chrome Device Emulation API. This
does not resize the window itself, but rather the area the current page interprets the
window to be.
This is useful for setting the size of the area that will be rendered for screenshots and
screencasts.
#### Arguments
- **width** (`int`, optional):
The desired width of the viewport.
- **height** (`int`, optional):
The desired height of the viewport.
- **scale** (`float`, optional):
The scaling factor of the content.
- **mobile** (`bool`, dict, optional):
Whether to emulate a mobile device or not. If a dict is provided, mobile emulation
will be enabled and configured using the following keys:
- *width* (`int`, optional):
The width of the mobile screen to emulate.
- *height* (`int`, optional):
The height of the mobile screen to emulate.
- *x* (`int`, optional):
The horizontal position of the currently viewable portion of the mobile screen.
- *y* (`int`, optional):
The vertical position of the currently viewable portion of the mobile screen.
- **fit_window** (`bool`, optional):
Whether to fit the viewport contents to the available area or not.
- **orientation** (`str`, optional):
Which screen orientation to emulate, if any. Can be one of: `portraitPrimary`,
`portraitSecondary`, `landscapePrimary`, `landscapeSecondary`.
- **angle** (`int`, optional):
The angle of the screen to emulate (in degrees; 0-360).
#### Returns
A `dict` containing the resulting *width* and *height* as keys.
"""
if hasattr(mobile, 'as_dict'):
_mobile = True
cfg = mobile.as_dict()
mobile_sw = cfg.get('width', width)
mobile_sh = cfg.get('height', height)
mobile_x = cfg.get('x', 0)
mobile_y = cfg.get('y', 0)
else:
_mobile = False
mobile_sw = 0
mobile_sh = 0
mobile_x = 0
mobile_y = 0
self.tab.emulation.set_device_metrics_override(
width=width,
height=height,
device_scale_factor=scale,
mobile=_mobile,
fit_window=fit_window,
screen_width=mobile_sw,
screen_height=mobile_sh,
position_x=mobile_x,
position_y=mobile_y,
screen_orientation_type=orientation,
screen_orientation_angle=angle,
)
return {
'width': width,
'height': height,
}
def put(self, *args, **kwargs):
"""
Store a value in the current scope. Strings will be automatically converted into the
appropriate data types (float, int, bool) if possible.
#### Arguments
If a single argument is given, automatic type detection will be applied to it and the
resulting value will be returned.
If options are provided, they will be interpreted as an object, each of whose values will
have automatic type detection applied. The resulting object will be returned.
#### Returns
The given value with automatic type detection applied.
"""
if len(args) == 1:
return utils.autotype(args[0])
elif len(args) > 1:
return [utils.autotype(a) for a in args]
elif len(kwargs):
return dict([
(k, utils.autotype(v)) for k, v in kwargs.items()
])
return None
def log(self, line=None, level='info', indent=4, **kwargs):
"""
Outputs a line to the log.
#### Arguments
- **line** (`str`):
A line of text that will have all current variables, as well as any given
kwargs, interpolated into it using the Python format() function.
- **level** (`str`):
The logging severity level to out as. Can be one of: 'debug', 'info', 'warning',
or 'error'.
- **indent** (`int`):
If 'line' is a dictionary, list, or tuple, it will be printed as a JSON document
with an indentation of this many spaces per level. The special value -1 will
disable JSON serialization for these types.
- **kwargs**:
All remaining arguments will be passed along to format() when interpolating 'line'.
#### Returns
None
#### Raises
`AttributeError` if the specified log level is not known.
"""
# handle the case where we want to log a data structure without options or a format string
if line is None:
line = kwargs
if hasattr(self.environment.log, level):
if isinstance(line, (dict, list, tuple)) and indent >= 0:
try:
line = json.dumps(line, indent=4)
except:
pass
# actually log the line
getattr(self.environment.log, level)(line)
return None
else:
raise AttributeError("Unknown log level '{}'".format(level))
def fail(self, message):
"""
Immediately exit the script in an error-like fashion with a specific message.
#### Arguments
- **message** (`str`):
The message to display whilst exiting immediately.
#### Raises
- `webfriend.exceptions.UserError`
"""
self.environment.log.error(message)
raise UserError(message)
def rpc(self, method, **kwargs):
"""
Directly call an RPC method with the given parameters.
#### Arguments
- **method** (`str`):
The name of the backend RPC method to call.
- **kwargs**:
Zero of more arguments to pass in the 'params' section of the RPC call.
#### Returns
A `dict` representation of the `webfriend.rpc.Reply` class.
"""
return self.tab.rpc(method, **kwargs).as_dict()
def wait_for(self, event_name, timeout=30000, match=None):
"""
Block until a specific event is received, or until **timeout** elapses (whichever comes
first).
#### Arguments
- **event_name** (`str`):
The name of the event to wait for.
- **timeout** (`int`):
The timeout, in milliseconds, before raising a `webfriend.exceptions.TimeoutError`.
- **match** (`dict`, optional):
If specified, all keys in the given object must correspond to keys in the received
event payload, and the values must match. Regular expressions must match the
corresponding payload value, and all other types must match exactly.
#### Returns
`webfriend.rpc.Event`
#### Raises
`webfriend.exceptions.TimeoutError`
"""
if isinstance(match, dict):
started_at = time.time()
eventstream = self.tab.wait_for_caller_response(event_name, timeout=timeout)
for event in eventstream:
if event.matches_criteria(match):
try:
eventstream.send(True)
except StopIteration:
pass
return {
'sequence': [event],
'duration': (time.time() - started_at),
}
else:
return self.tab.wait_for(event_name, timeout=timeout)
def wait_for_idle(self, idle, events=[], timeout=30000, poll_interval=250):
"""
Blocks for a specified amount of time _after_ an event has been received, or until
**timeout** elapses (whichever comes first).
This is useful for waiting for events to occur after performing an action, then giving some
amount of time for those events to "settle" (e.g.: allowing the page time to react to those
events without knowing ahead of time what, if any, listeners will be responding.) A common
use case for this would be to wait a few seconds _after_ a resize has occurred for anything
that just loaded to finish doing so.
#### Arguments
- **idle** (`int`):
The amount of time, in milliseconds, that the event stream should be idle before
returning.
- **events** (`list`, optional):
If not empty, the **idle** time will be interpreted as the amount of time since _any
of these specific events_ have occurred. The default is to wait for the browser to be
idle with respect to _any_ events.
- **timeout** (`int`):
The maximum amount of time to wait before raising a
`webfriend.exceptions.TimeoutError`.
- **poll_interval** (`int`):
How often to check the event timings to see if the idle time has elapsed.
#### Returns
An `int` representing the number of milliseconds we waited for.
#### Raises
`webfriend.exceptions.TimeoutError`
"""
if hasattr(events, 'values'):
events = events.values
return self.tab.wait_for_idle(
idle,
events=events,
timeout=timeout,
poll_interval=poll_interval
)
def wait_for_load(self, timeout=30000, idle_time=500):
"""
Blocks until the "Page.loadEventFired" event has fired, or until timeout elapses (whichever
comes first).
#### Arguments
- **timeout** (`int`):
The timeout, in milliseconds, before raising a `webfriend.exceptions.TimeoutError`.
#### Returns
`webfriend.rpc.Event`
#### Raises
`webfriend.exceptions.TimeoutError`
"""
if idle_time:
return self.tab.wait_for_idle(idle_time, events=[
'Page.loadEventFired',
], timeout=timeout)
else:
return self.tab.wait_for('Page.loadEventFired', timeout=timeout)
def type(self, text, **kwargs):
"""
See: `webfriend.rpc.Input.type_text`
"""
return self.tab.input.type_text(text, **kwargs)
def focus(self, selector):
"""
Focuses the given HTML element described by **selector**. One and only one element may
match the selector.
#### Arguments
- **selector** (`str`):
The page element to focus, given as a CSS-style selector, an ID (e.g. "#myid"), or an
XPath query (e.g.: "xpath://body/p").
#### Returns
The matching `webfriend.rpc.DOMElement` that was given focus.
#### Raises
- `webfriend.exceptions.EmptyResult` if zero elements were matched, or
- `webfriend.exceptions.TooManyResults` if more than one elements were matched.
"""
elements = self.tab.dom.query_all(selector)
self.tab.dom.ensure_unique_element(selector, elements)
element = elements['nodes'][0]
self.tab.dom.focus(element.id)
return element
def click(self, selector=None, x=None, y=None, unique_match=True, **kwargs):
"""
Click on HTML element(s) or on a specific part of the page. More complex click operations
are supported (e.g.: double clicking, drag and drop) by supplying **x**/**y** coordinates
directly.
#### Arguments
- **selector** (`str`, optional):
The page element to focus, given as a CSS-style selector, an ID (e.g. "#myid"), or an
XPath query (e.g.: "xpath://body/p").
- **x** (`int`, optional):
If **selector** is not specified, this is the X-coordinate component of the location
to click at.
- **y** (`int`, optional):
If **selector** is not specified, this is the Y-coordinate component of the location
to click at.
- **unique_match** (`bool`):
For **selector** matches, whether there can be one and only one match to click on. If
false, every matched element will be clicked on in the order they were matched in.
- **kwargs**:
Only applies to **x**/**y** click events, see: `webfriend.rpc.Input.click_at`.
#### Returns
A `list` of elements that were clicked on.
#### Raises
For **selector**-based events:
- `webfriend.exceptions.EmptyResult` if zero elements were matched, or
- `webfriend.exceptions.TooManyResults` if more than one elements were matched.
"""
if selector:
elements = self.tab.dom.select_nodes(selector)
results = []
if unique_match:
self.tab.dom.ensure_unique_element(selector, elements)
for element in elements['nodes']:
results.append(element.click())
return results
elif x is None or y is None:
raise ValueError("Either 'selector' or 'x' and 'y' must be specified")
return [
self.tab.input.click_at(x, y, **kwargs)
]
def field(self, selector, value, autoclear=True):
"""
Locate and enter data into a form input field.
#### Arguments
- **selector** (`str`):
The page element to enter data into, given as a CSS-style selector, an ID
(e.g. "#myid"), or an XPath query (e.g.: "xpath://body/p").
- **value** (`str`):
The text value to type into the located field.
- **autoclear** (`bool`, optional):
Whether to clear the existing contents of the field before entering new data.
#### Returns
The text that was entered, as a string.
"""
if not isinstance(value, basestring):
raise ValueError("'value' must be specified")
elements = self.tab.dom.select_nodes(selector)
field = self.tab.dom.ensure_unique_element(selector, elements)
if autoclear:
field['value'] = ''
field.focus()
return self.type(value)
def scroll_to(self, selector=None, x=None, y=None):
"""
Scroll the viewport to the given location, either that of the named element or, if
provided, the specfic (X,Y) coordinates relative to the top-left of the current page.
#### Arguments
- **selector** (`str`, optional):
The page element to scroll to, given as a CSS-style selector, an ID (e.g. "#myid"), or
an XPath query (e.g.: "xpath://body/p").
- **x**, **y** (`int`, optional):
If both **x* and **y** are provided, these are the coordinates to scroll to.
#### Returns
The result of the scroll operation.
"""
if selector:
elements = self.tab.dom.select_nodes(selector)
self.tab.dom.ensure_unique_element(selector, elements)
return elements['nodes'][0].scroll_to()
elif x is None or y is None:
raise ValueError("Either 'selector' or 'x' and 'y' must be specified")
return self.tab.dom.root.scroll_to(x, y)
def select(self, *args, **kwargs):
"""
See: `webfriend.rpc.DOM.select_nodes`
"""
return self.tab.dom.select_nodes(*args, **kwargs)
def xpath(self, *args, **kwargs):
"""
See: `webfriend.rpc.DOM.xpath`
"""
return self.tab.dom.xpath(*args, **kwargs)
def switch_root(self, selector=None):
"""
Change the current selector scope to be rooted at the given element.
"""
if selector is None:
return self.tab.dom.root_to_top()
else:
return self.tab.dom.root_to(selector)
def tabs(self, sync=True):
"""
Return a description of all of the currently-open tabs.
#### Arguments
- **sync** (`bool`):
Whether to perform a preemptive sync with the browser before returning the tab
descriptions.
#### Returns
A `list` of `dicts` describing all browser tabs currently open. Each `dict` will at least
contain the keys:
- *id* (`str`):
The tab ID that can be used with other tab management commands.
- *url* (`str`):
The URL of the tab being described.
- *webSocketDebuggerUrl* (`str`):
The URL of the inspection Websocket used to issue and receive RPC traffic.
- *target* (`bool`):
Whether the tab being described is the active tab that other commands will be issued
against.
"""
if sync:
self.browser.sync()
return [
t.as_dict() for t in self.browser.tabs.values()
]
def new_tab(self, url, width=None, height=None, autoswitch=True):
"""
Open a new tab and navigate to the given URL.
#### Arguments
- **url** (`str`):
The URL that the new tab will be navigated to.
- **width**, **height** (`int`, optional):
If provided, these represent the width and height (in pixels) that the new tab should
be created with.
- **autoswitch** (`bool`, optional):
Whether to automatically switch to the newly-created tab as the active tab for
subsequent commands.
#### Returns
A `str` representing the ID of the newly-created tab.
"""
tab_id = self.browser.create_tab(url, width=width, height=height)
if autoswitch:
self.browser.switch_to_tab(tab_id)
return tab_id
def switch_tab(self, tab_id):
"""
See: `webfriend.browser.Chrome.switch_to_tab`
"""
self.browser.switch_to_tab(tab_id)
return self.tabs(sync=False)
def close_tab(self, tab_id=None):
"""
Close the tab identified by the given ID.
#### Arguments
- **tab_id** (`str`):
The ID of the tab to close.
#### Returns
A `bool` value representing whether the tab was closed successfully or not.
"""
if not tab_id:
tab_id = self.browser.default_tab
return self.browser.close_tab(tab_id)
def javascript(self, body=None, file=None, expose_variables=True):
"""
Inject Javascript into the current page, evaluate it, and return the results. The script
is wrapped in an anonymous function whose return value will be returned from this command
as a native data type.
By default, scripts will have access to all local variables in the calling script that are
defined at the time of invocation. They are available to injected scripts as a plain
object accessible using the `this` variable.
#### Arguments
- **body** (`str`, optional):
A string value that represents the script to be injected and executed.
- **file** (`str`, optional):
A filename that will loaded and injected into the browser.
- **expose_variables** (`bool`):
Whether to expose all local variables to the injected script or not.
#### Returns
Whatever data was returned from the injected script using a `return` statement,
automatically parsed into native data types. Objects, arrays, and all scalar types are
supported as return values.
"""
if not body and not file:
raise ValueError("Must specify either body or file")
if file:
body = open(file, 'r').read()
if expose_variables:
data = self.scope.as_dict()
else:
data = {}
return self.tab.evaluate(body, data=data, calling_context=self.environment)
def env(self, name, fallback=None, ignore_empty=True, detect_type=True, joiner=None):
"""
Retrieves a system environment variable and returns the value of it, or a fallback value if
the variable does not exist or (optionally) is empty.
#### Arguments
- **name** (`str`):
The name of the environment variable. Matches are case-insensitive, and the last
variable to be defined for a given key is the value that will be returned.
- **fallback** (any):
The value to return if the environment variable does not exist, or (optionally) is
empty.
- **ignore_empty** (`bool`):
Whether empty values should be ignored or not.
- **detect_type** (`bool`):
Whether automatic type detection should be performed or not.
- **joiner** (`str`, optional):
If specified, this string will be used to split matching values into a list of values.
This is useful for environment variables that contain multiple values joined by a
separator (e.g: the `PATH` variable.)
#### Returns
The value of the environment variable **name**, or a list of values if **joiner** was
specified. If **name** is non-existent or was empty, **fallback** will be returned instead.
"""
value = None
# perform case-insensitive search of all environment variables
for k, v in os.environ.items():
if k.upper() == name.upper():
value = v
break
if value is None:
return fallback
# trim whitespace
value = value.strip()
if isinstance(joiner, basestring):
value = value.split(joiner)
# handle empty values AND empty lists post-split
if ignore_empty and not len(value):
return fallback
# perform type detection (if specified)
if detect_type:
if isinstance(value, list):
value = [autotype(v) for v in value]
else:
value = autotype(value)
return value
def require(self, plugin_name, package_format='webfriend.scripting.commands.{}'):
"""
Loads a named plugin into the current environment.
#### Arguments
- **plugin_name** (`str`):
The name of the plugin to load. This corresponds to the name of a Python module that
contains subclasses of `webfriend.scripting.commands.base.CommandProxy`.
- **package_format** (`str`):
Specifies which Python package contains the module named in **plugin_name**. The
default is to assume plugins are built as namespaced modules that overlay the core
import tree at `webfriend.scripting.commands.<plugin_name>`.
#### Returns
The value of **plugin_name** if the load was successful.
"""
self.environment.register_by_module_name(plugin_name)
return plugin_name
def run(
self,
script_name,
data=None,
isolated=True,
preserve_state=True,
merge_scopes=False,
result_key=Environment.default_result_key
):
"""
Evaluates another Friendscript loaded from another file.
#### Arguments
- **script_name** (`str`):
The filename or basename of the file to search for in the `WEBFRIEND_PATH` environment
variable to load and evaluate. The `WEBFRIEND_PATH` variable behaves like the the
traditional *nix `PATH` variable, wherein multiple paths can be specified as a
colon-separated (`:`) list. The current working directory will always be checked
first.
- **data** (`dict`, optional):
If specified, these values will be made available to the evaluated script before it
begins execution.
- **isolated** (`bool`):
Whether the script should have access to the calling script's variables or not.
- **preserve_state** (`bool`):
Whether event handlers created in the evaluated script should remain defined after the
script has completed.
- **merge_scopes** (`bool`):
Whether the scope state at the end of the script's evaluation should be merged into the
current execution scope. Setting this to true allows variables defined inside of the
evaluated script to stay defined after the script has completed. Otherwise, only the
value of the **result_key** variable is returned as the result of this command.
- **result_key** (`str`):
Defines the name of the variable that will be read from the evaluated script's scope
and returned from this command. Defaults to "result", which is the same behavior as
all other commands.
#### Returns
The value of the variable named by **result_key** at the end of the evaluated script's
execution.
#### Raises
Any exception that can be raised from Friendscript.
"""
script = None
final_script_name = None
path_prefixes = ['.']
# makes the ".fs" optional when passing scripts as arguments
script_name = re.sub(r'\.fs$', '', script_name) + '.fs'
# setup scopes
if isolated:
scope = Scope()
else:
scope = Scope(parent=self.scope)
# if data is specified, set these values in the evaluated script's scope
if isinstance(isolated, dict):
scope.update(isolated)
# process WEBFRIEND_PATH envvar
for prefix in os.environ.get('WEBFRIEND_PATH', '').split(':'):
path_prefixes.append(prefix)
# search for file in all prefixes
for prefix in path_prefixes:
s = os.path.join(prefix, script_name)
if os.path.isfile(s):
final_script_name = s
script = open(final_script_name, 'r').read()
break
# validate the script was read
if script is None:
raise exceptions.NotFound('Unable to locate script "{}" in any path'.format(
script_name
))
elif isinstance(script, str):
script = script.decode('UTF-8')
# evaluate the script
logging.debug('Evaluating script {}'.format(final_script_name))
scope = execute_script(
self.browser,
script,
scope=scope,
preserve_state=preserve_state
)
# if not using an isolated scope, then the top-level keys that were modified in this script
# are set in our current scope (as if the included code ran inline)
if merge_scopes:
logging.debug(
'Updating {} variables in calling scope with result scope (iso={})'.format(
len(scope),
isolated
)
)
self.scope.update(scope)
return scope.get('result')
def help(self, command):
print('\n'.join(document_commands(
oneshot=True,
commands=[command],
)).rstrip('\n'))
| |
"""
@package mi.instrument.sunburst.sami2_pco2.pco2b.test.test_driver
@file marine-integrations/mi/instrument/sunburst/sami2_pco2/pco2b/driver.py
@author Kevin Stiemke
@brief Test cases for pco2b driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Kevin Stiemke'
__license__ = 'Apache 2.0'
import unittest
import time
import copy
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from pyon.agent.agent import ResourceAgentEvent
from pyon.agent.agent import ResourceAgentState
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import InstrumentDriver
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import InstrumentCommand
from mi.instrument.sunburst.sami2_pco2.driver import ScheduledJob
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import ProtocolState
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import ProtocolEvent
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Capability
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Parameter
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Protocol
from mi.instrument.sunburst.driver import Prompt
from mi.instrument.sunburst.driver import SAMI_NEWLINE
from mi.instrument.sunburst.sami2_pco2.driver import Pco2wSamiSampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Pco2wbDev1SampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import Pco2wConfigurationDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2b.driver import DataParticleType
# Added Imports (Note, these pick up some of the base classes not directly imported above)
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverTestMixinSub
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverUnitTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverIntegrationTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverQualificationTest
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.sunburst.sami2_pco2.pco2b.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='V7HE4T',
instrument_agent_name='sunburst_sami2_pco2_pco2b',
instrument_agent_packet_config=DataParticleType(),
# driver_startup_config={}
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.EXTERNAL_PUMP_DELAY: 10,
Parameter.BIT_SWITCHES: 0x01
},
}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(Pco2DriverTestMixinSub):
"""
Mixin class used for storing data particle constants and common data
assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_driver_capabilities = {
# capabilities defined in the IOS
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_BLANK_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE,
ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.RUN_EXTERNAL_PUMP: {STATES: [ProtocolState.COMMAND]}
}
###
# Instrument output (driver input) Definitions
###
# Configuration string received from the instrument via the L command
# (clock set to 2014-01-01 00:00:00) with sampling set to start 540 days
# (~18 months) later and stop 365 days after that. SAMI and Device1
# (external SBE pump) are set to run every 60 minutes, but will be polled
# on a regular schedule rather than autosampled. Device1 is not configured
# to run after the SAMI and will run for 10 seconds. To configure the
# instrument using this string, add a null byte (00) to the end of the
# string.
VALID_CONFIG_STRING = 'CEE90B0002C7EA0001E133800A000E100402000E10010B' + \
'000000000D000000000D000000000D07' + \
'1020FF54181C0100381E' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000000000000000000000000' + \
'0000000000000000000000000000' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + SAMI_NEWLINE
# Data records -- SAMI and Device1 (external pump) (responses to R0 and R1
# commands, respectively)
VALID_R0_BLANK_SAMPLE = '*542705CEE91CC800400019096206800730074C2CE042' + \
'74003B0018096106800732074E0D82066124' + SAMI_NEWLINE
VALID_R0_DATA_SAMPLE = '*542704CEE91CC8003B001909620155073003E908A1232' + \
'D0043001A09620154072F03EA0D92065F3B' + SAMI_NEWLINE
VALID_R1_SAMPLE = '*540711CEE91DE2CE' + SAMI_NEWLINE
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.LAUNCH_TIME: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00000000, VALUE: 0xCEE90B00},
Parameter.START_TIME_FROM_LAUNCH: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02C7EA00, VALUE: 0x02C7EA00},
Parameter.STOP_TIME_FROM_START: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01E13380, VALUE: 0x01E13380},
Parameter.MODE_BITS: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0A, VALUE: 0x0A},
Parameter.SAMI_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10},
Parameter.SAMI_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x04, VALUE: 0x04},
Parameter.SAMI_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02, VALUE: 0x02},
Parameter.DEVICE1_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10},
Parameter.DEVICE1_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01},
Parameter.DEVICE1_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0B, VALUE: 0x0B},
Parameter.DEVICE2_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.DEVICE2_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.DEVICE2_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D},
Parameter.DEVICE3_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.DEVICE3_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.DEVICE3_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D},
Parameter.PRESTART_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000},
Parameter.PRESTART_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.PRESTART_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D},
Parameter.GLOBAL_CONFIGURATION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x07, VALUE: 0x07},
Parameter.PUMP_PULSE: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x10, VALUE: 0x10},
Parameter.PUMP_DURATION: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x20, VALUE: 0x20},
Parameter.SAMPLES_PER_MEASUREMENT: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0xFF, VALUE: 0xFF},
Parameter.CYCLES_BETWEEN_BLANKS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x54, VALUE: 0x54},
Parameter.NUMBER_REAGENT_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x18, VALUE: 0x18},
Parameter.NUMBER_BLANK_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x1C, VALUE: 0x1C},
Parameter.FLUSH_PUMP_INTERVAL: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01},
Parameter.BIT_SWITCHES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00},
Parameter.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x38, VALUE: 0x38},
Parameter.EXTERNAL_PUMP_SETTINGS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x1E, VALUE: 0x1E},
Parameter.AUTO_SAMPLE_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 3600, VALUE: 3600},
Parameter.EXTERNAL_PUMP_DELAY: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 360, VALUE: 360},
Parameter.REAGENT_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.DEIONIZED_WATER_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.PUMP_100ML_CYCLES: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
}
_sami_data_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 4)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x003B, 0x0019, 0x0962, 0x0155,
0x0730, 0x03E9, 0x08A1, 0x232D,
0x0043, 0x001A, 0x0962, 0x0154,
0x072F, 0x03EA], REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D92, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x065F, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x3B, REQUIRED: True}
}
_sami_blank_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 5)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x05, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x0040, 0x0019, 0x0962, 0x0680, 0x0730,
0x074C, 0x2CE0, 0x4274, 0x003B, 0x0018,
0x0961, 0x0680, 0x0732, 0x074E],
REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D82, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x0661, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x24, REQUIRED: True}
}
_dev1_sample_parameters = {
# Device 1 (external pump) Type 17 sample
Pco2wbDev1SampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x07, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x11, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91DE2, REQUIRED: True},
Pco2wbDev1SampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0xCE, REQUIRED: True}
}
_configuration_parameters = {
# Configuration settings
Pco2wConfigurationDataParticleKey.LAUNCH_TIME: {TYPE: int, VALUE: 0xCEE90B00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.START_TIME_OFFSET: {TYPE: int, VALUE: 0x02C7EA00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.RECORDING_TIME: {TYPE: int, VALUE: 0x01E13380, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SAMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT1_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT1_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT2_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT2_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT3_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SLOT3_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_SAMI: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_SAMI: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_SAMI: {TYPE: int, VALUE: 0x02, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE1: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_DEVICE1: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE1: {TYPE: int, VALUE: 0x0B, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE2: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_DEVICE2: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE2: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE3: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_DEVICE3: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE3: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2wConfigurationDataParticleKey.TIMER_INTERVAL_PRESTART: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DRIVER_ID_PRESTART: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PARAMETER_POINTER_PRESTART: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2wConfigurationDataParticleKey.USE_BAUD_RATE_57600: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SEND_RECORD_TYPE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SEND_LIVE_RECORDS: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2wConfigurationDataParticleKey.EXTEND_GLOBAL_CONFIG: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PUMP_PULSE: {TYPE: int, VALUE: 0x10, REQUIRED: True},
Pco2wConfigurationDataParticleKey.PUMP_DURATION: {TYPE: int, VALUE: 0x20, REQUIRED: True},
Pco2wConfigurationDataParticleKey.SAMPLES_PER_MEASUREMENT: {TYPE: int, VALUE: 0xFF, REQUIRED: True},
Pco2wConfigurationDataParticleKey.CYCLES_BETWEEN_BLANKS: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wConfigurationDataParticleKey.NUMBER_REAGENT_CYCLES: {TYPE: int, VALUE: 0x18, REQUIRED: True},
Pco2wConfigurationDataParticleKey.NUMBER_BLANK_CYCLES: {TYPE: int, VALUE: 0x1C, REQUIRED: True},
Pco2wConfigurationDataParticleKey.FLUSH_PUMP_INTERVAL: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2wConfigurationDataParticleKey.DISABLE_START_BLANK_FLUSH: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.MEASURE_AFTER_PUMP_PULSE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2wConfigurationDataParticleKey.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, VALUE: 0x38, REQUIRED: True},
Pco2wConfigurationDataParticleKey.EXTERNAL_PUMP_SETTINGS: {TYPE: int, VALUE: 0x1E, REQUIRED: True}
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify
values.
@param current_parameters: driver parameters read from the driver
instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters,
verify_values)
def assert_particle_sami_data_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particle (Type 4)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 4, msg="Not a regular sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_B_SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
def assert_particle_sami_blank_sample(self, data_particle, verify_values=False):
"""
Verify sami_blank_sample particle (Type 5)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 5, msg="Not a blank sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_blank_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_B_SAMI_SAMPLE_CAL)
self.assert_data_particle_parameters(data_particle,
self._sami_blank_sample_parameters,
verify_values)
def assert_particle_dev1_sample(self, data_particle, verify_values=False):
"""
Verify dev1_sample particle
@param data_particle: Pco2wDev1SampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 17, msg="Not a device 1 sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wbDev1SampleDataParticleKey,
self._dev1_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_B_DEV1_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._dev1_sample_parameters,
verify_values)
def assert_particle_configuration(self, data_particle, verify_values=False):
"""
Verify configuration particle
@param data_particle: Pco2wConfigurationDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2wConfigurationDataParticleKey,
self._configuration_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_B_CONFIGURATION)
self.assert_data_particle_parameters(data_particle,
self._configuration_parameters,
verify_values)
###############################################################################
# UNIT TESTS #
# Unit Tests: test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(Pco2DriverUnitTest, DriverTestMixinSub):
capabilities_test_dict = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.WAITING: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH',
'DRIVER_EVENT_REAGENT_FLUSH',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH_100ML',
'DRIVER_EVENT_REAGENT_FLUSH_100ML',
'DRIVER_EVENT_RUN_EXTERNAL_PUMP'],
ProtocolState.DEIONIZED_WATER_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DEIONIZED_WATER_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.RUN_EXTERNAL_PUMP: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DIRECT_ACCESS: ['EXECUTE_DIRECT',
'DRIVER_EVENT_STOP_DIRECT'],
ProtocolState.POLLED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.POLLED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
}
def test_base_driver_enums(self):
"""
Verify that all the SAMI Instrument driver enumerations have no
duplicate values that might cause confusion. Also do a little
extra validation for the Capabilites
Extra enumeration tests are done in a specific subclass
"""
# Test Enums defined in the base SAMI driver
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
# Test capabilites for duplicates, then verify that capabilities
# is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might
cause confusion.
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
for part in [self.VALID_STATUS_MESSAGE, self.VALID_R0_BLANK_SAMPLE,
self.VALID_R0_DATA_SAMPLE, self.VALID_R1_SAMPLE, self.VALID_CONFIG_STRING]:
self.assert_chunker_sample(chunker, part)
self.assert_chunker_sample_with_noise(chunker, part)
self.assert_chunker_fragmented_sample(chunker, part)
self.assert_chunker_combined_sample(chunker, part)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the
correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self.VALID_STATUS_MESSAGE,
self.assert_particle_regular_status, True)
self.assert_particle_published(driver, self.VALID_R0_BLANK_SAMPLE,
self.assert_particle_sami_blank_sample, True)
self.assert_particle_published(driver, self.VALID_R0_DATA_SAMPLE,
self.assert_particle_sami_data_sample, True)
self.assert_particle_published(driver, self.VALID_R1_SAMPLE,
self.assert_particle_dev1_sample, True)
self.assert_particle_published(driver, self.VALID_CONFIG_STRING,
self.assert_particle_configuration, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities. Iterate through available
capabilities, and verify that they can pass successfully through the
filter. Test silly made up capabilities to verify they are blocked by
filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, SAMI_NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in
this dict must also be defined in the protocol FSM. Note, the EXIT and
ENTER DRIVER_EVENTS don't need to be listed here.
"""
# capabilities defined in base class test_driver.
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self.capabilities_test_dict)
def test_pump_commands(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_commands(driver)
def test_pump_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_timing(driver)
def test_waiting_discover(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_waiting_discover(driver)
def test_autosample_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_autosample_timing(driver)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(Pco2DriverIntegrationTest, DriverTestMixinSub):
"""
Integration Tests:
test_startup_params: Verify that driver startup parameters are set properly.
test_set: In command state, test configuration particle generation.
Parameter.PUMP_PULSE
Parameter.PUMP_DURATION
Parameter.SAMPLES_PER_MEASUREMENT
Parameter.CYCLES_BETWEEN_BLANKS
Parameter.NUMBER_REAGENT_CYCLES
Parameter.NUMBER_BLANK_CYCLES
Parameter.FLUSH_PUMP_INTERVAL
Parameter.BIT_SWITCHES
Parameter.NUMBER_EXTRA_PUMP_CYCLES
Parameter.AUTO_SAMPLE_INTERVAL
Negative Set Tests:
START_TIME_FROM_LAUNCH
STOP_TIME_FROM_START
MODE_BITS
SAMI_SAMPLE_INTERVAL
test_commands: In autosample and command states, test particle generation.
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
ACQUIRE_BLANK_SAMPLE = ProtocolEvent.ACQUIRE_BLANK_SAMPLE
test_autosample: Test autosample particle generation.
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
test_scheduled_data: In command and autosample states
ACQUIRE_STATUS
"""
def test_startup_params(self):
startup_values = {
Parameter.PUMP_PULSE: 0x10,
Parameter.PUMP_DURATION: 0x20,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFF,
Parameter.CYCLES_BETWEEN_BLANKS: 0x54,
Parameter.NUMBER_REAGENT_CYCLES: 0x18,
Parameter.NUMBER_BLANK_CYCLES: 0x1C,
Parameter.FLUSH_PUMP_INTERVAL: 0x01,
Parameter.BIT_SWITCHES: 0x01,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x38,
Parameter.EXTERNAL_PUMP_SETTINGS: 0x1E,
Parameter.EXTERNAL_PUMP_DELAY: 10,
Parameter.AUTO_SAMPLE_INTERVAL: 3600,
Parameter.REAGENT_FLUSH_DURATION: 0x08,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x08,
Parameter.PUMP_100ML_CYCLES: 1
}
new_values = {
Parameter.PUMP_PULSE: 0x11,
Parameter.PUMP_DURATION: 0x21,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFA,
Parameter.CYCLES_BETWEEN_BLANKS: 0xA9,
Parameter.NUMBER_REAGENT_CYCLES: 0x19,
Parameter.NUMBER_BLANK_CYCLES: 0x1D,
Parameter.FLUSH_PUMP_INTERVAL: 0x02,
Parameter.BIT_SWITCHES: 0x02,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x39,
Parameter.EXTERNAL_PUMP_SETTINGS: 0x40,
Parameter.EXTERNAL_PUMP_DELAY: 300,
Parameter.AUTO_SAMPLE_INTERVAL: 600,
Parameter.REAGENT_FLUSH_DURATION: 0x01,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x0F,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_initialize_driver()
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
self.assert_set_bulk(new_values)
self.driver_client.cmd_dvr('apply_startup_params')
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
def test_set(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 77)
self.assert_set(Parameter.CYCLES_BETWEEN_BLANKS, 7)
self.assert_set(Parameter.PUMP_PULSE, 20)
self.assert_set(Parameter.SAMPLES_PER_MEASUREMENT, 239)
self.assert_set(Parameter.NUMBER_REAGENT_CYCLES, 26)
self.assert_set(Parameter.NUMBER_BLANK_CYCLES, 30)
self.assert_set(Parameter.FLUSH_PUMP_INTERVAL, 2)
self.assert_set(Parameter.BIT_SWITCHES, 1)
self.assert_set(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88)
self.assert_set(Parameter.EXTERNAL_PUMP_SETTINGS, 40)
self.assert_set(Parameter.EXTERNAL_PUMP_DELAY, 60)
self.assert_set(Parameter.REAGENT_FLUSH_DURATION, 16)
self.assert_set(Parameter.DEIONIZED_WATER_FLUSH_DURATION, 4)
self.assert_set(Parameter.PUMP_100ML_CYCLES, 14)
self.assert_set_readonly(Parameter.START_TIME_FROM_LAUNCH, 84600)
self.assert_set_readonly(Parameter.STOP_TIME_FROM_START, 84600)
self.assert_set_readonly(Parameter.MODE_BITS, 10)
self.assert_set_readonly(Parameter.SAMI_SAMPLE_INTERVAL, 1800)
def test_bulk_set(self):
self.assert_initialize_driver()
new_values = {
Parameter.AUTO_SAMPLE_INTERVAL: 77,
Parameter.CYCLES_BETWEEN_BLANKS: 7,
Parameter.PUMP_PULSE: 20,
Parameter.SAMPLES_PER_MEASUREMENT: 239,
Parameter.NUMBER_REAGENT_CYCLES: 26,
Parameter.NUMBER_BLANK_CYCLES: 30,
Parameter.FLUSH_PUMP_INTERVAL: 2,
Parameter.BIT_SWITCHES: 1,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 88,
Parameter.EXTERNAL_PUMP_SETTINGS: 40,
Parameter.EXTERNAL_PUMP_DELAY: 60,
Parameter.REAGENT_FLUSH_DURATION: 4,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 16,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_set_bulk(new_values)
def test_bad_parameters(self):
self.assert_initialize_driver()
self.assert_set_exception(Parameter.CYCLES_BETWEEN_BLANKS, 7.0)
self.assert_set_exception(Parameter.PUMP_PULSE, 20.0)
self.assert_set_exception(Parameter.SAMPLES_PER_MEASUREMENT, 239.0)
self.assert_set_exception(Parameter.NUMBER_REAGENT_CYCLES, 26.0)
self.assert_set_exception(Parameter.NUMBER_BLANK_CYCLES, 30.0)
self.assert_set_exception(Parameter.FLUSH_PUMP_INTERVAL, 2.0)
self.assert_set_exception(Parameter.BIT_SWITCHES, 1.0)
self.assert_set_exception(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88.0)
self.assert_set_exception(Parameter.EXTERNAL_PUMP_SETTINGS, 40.0)
## EXTERNAL_PUMP_DELAY is set to 10 seconds in the startup_config. It defaults to 10 minutes
def test_external_pump_delay(self):
"""
Test delay between running of external pump and taking a sample
"""
max_sample_time = 15 # Maximum observed sample time with current configuration.
global dev1_sample
global data_sample
def get_dev1_sample(particle):
"""
Get dev1 sample
:param particle: dev1 sample particle
"""
global dev1_sample
dev1_sample = particle
def get_data_sample(particle):
"""
Get data sample
:param particle: data sample particle
"""
global data_sample
data_sample = particle
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, get_dev1_sample, timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, get_data_sample, timeout=180)
dev1_dict = self.get_data_particle_values_as_dict(dev1_sample)
sample_dict = self.get_data_particle_values_as_dict(data_sample)
dev1_time = dev1_dict.get(Pco2wbDev1SampleDataParticleKey.RECORD_TIME)
sample_time = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TIME)
time_diff = sample_time - dev1_time
self.assertTrue((time_diff > 10) and (time_diff < (10 + max_sample_time)),
"External pump delay %s is invalid" % time_diff)
self.assert_set(Parameter.EXTERNAL_PUMP_DELAY, 60)
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, get_dev1_sample, timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, get_data_sample, timeout=180)
dev1_dict = self.get_data_particle_values_as_dict(dev1_sample)
sample_dict = self.get_data_particle_values_as_dict(data_sample)
dev1_time = dev1_dict.get(Pco2wbDev1SampleDataParticleKey.RECORD_TIME)
sample_time = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TIME)
time_diff = sample_time - dev1_time
self.assertTrue((time_diff > 60) and (time_diff < (60 + max_sample_time)),
"External pump delay %s is invalid" % time_diff)
def test_acquire_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
def test_acquire_blank_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE_CAL, self.assert_particle_sami_blank_sample,
timeout=180)
def test_auto_sample(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 80)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=400)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=4)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
self.clear_events()
#Now verify that no more particles get generated
failed = False
try:
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=240)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE,
self.assert_particle_dev1_sample)
failed = True
except AssertionError:
pass
self.assertFalse(failed)
#Restart autosample
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=400)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=4)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_polled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, state=ProtocolState.POLLED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
def test_polled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.POLLED_BLANK_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE_CAL, self.assert_particle_sami_blank_sample,
timeout=180)
def test_scheduled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.SCHEDULED_BLANK_SAMPLE,
delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE_CAL, self.assert_particle_sami_blank_sample,
timeout=180)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_device_status_auto_sample(self):
"""
Verify the device status command can be triggered and run in autosample
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, delay=180)
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_CONFIGURATION,
self.assert_particle_configuration,
timeout=300)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_BATTERY_VOLTAGE,
self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_queued_command(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
## Queue status
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=220)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=1, timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.COMMAND)
def test_queued_autosample(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
## Queue status
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=220)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
particle_count=1, timeout=60)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_acquire_status(self):
self.assert_initialize_driver()
self.clear_events()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.PCO2W_B_REGULAR_STATUS,
self.assert_particle_regular_status)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_CONFIGURATION,
self.assert_particle_configuration)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_BATTERY_VOLTAGE,
self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
def test_scheduled_device_status_command(self):
"""
Verify the device status command can be triggered and run in command
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, delay=120)
self.clear_events()
self.assert_async_particle_generation(DataParticleType.PCO2W_B_CONFIGURATION,
self.assert_particle_configuration,
timeout=180)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_BATTERY_VOLTAGE,
self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
self.assert_current_state(ProtocolState.COMMAND)
def test_run_external_pump(self):
"""
Test running external pump and queueing status
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.RUN_EXTERNAL_PUMP)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_DEV1_SAMPLE, self.assert_particle_dev1_sample,
timeout=20.0)
self.assert_async_particle_generation(DataParticleType.PCO2W_B_REGULAR_STATUS,
self.assert_particle_regular_status,
timeout=20.0)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(Pco2DriverQualificationTest, DriverTestMixinSub):
@unittest.skip("Runs for several hours to test default autosample rate of 60 minutes")
def test_overnight(self):
"""
Verify autosample at default rate
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.BIT_SWITCHES, 0x00)
self.assert_set_parameter(Parameter.EXTERNAL_PUMP_DELAY, 360)
request_sample = time.time()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=120)
receive_dev1_sample = time.time()
dev1_sample_time = receive_dev1_sample - request_sample
self.assert_sample_async(self.assert_particle_sami_blank_sample, DataParticleType.PCO2W_B_SAMI_SAMPLE, timeout=800)
receive_sample = time.time()
sample_time = receive_sample - request_sample
log.debug("dev1_sample_time = %s", dev1_sample_time)
log.debug("sample_time = %s", sample_time)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.PCO2W_B_SAMI_SAMPLE,
timeout=14400)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly
supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
configuration_string = 'CF87945A02C7EA0001E133800A000E100402000E10010B0000000000000000000000000000000' + \
'71020FFA8181C0100383C00000000000000000000000000000000000000000000000000000000' + \
'00000000000000000000000000000000000000000000000000000000000000000000000000000' + \
'0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
# Erase memory
self.tcp_client.send_data("E5A%s" % SAMI_NEWLINE)
time.sleep(1)
# Load a new configuration string changing X to X
self.tcp_client.send_data("L5A%s" % SAMI_NEWLINE)
time.sleep(1)
self.tcp_client.send_data("%s00%s" % (configuration_string, SAMI_NEWLINE))
time.sleep(1)
# Check that configuration was changed
self.tcp_client.send_data("L%s" % SAMI_NEWLINE)
return_value = self.tcp_client.expect(configuration_string)
self.assertTrue(return_value)
###
# Add instrument specific code here.
###
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_get_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
def test_command_poll(self):
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_data_sample, DataParticleType.PCO2W_B_SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_blank_sample, DataParticleType.PCO2W_B_SAMI_SAMPLE_CAL, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_B_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PCO2W_B_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PCO2W_B_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.RUN_EXTERNAL_PUMP, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample_poll(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_data_sample, DataParticleType.PCO2W_B_SAMI_SAMPLE, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_dev1_sample,
DataParticleType.PCO2W_B_DEV1_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_async(self.assert_particle_sami_blank_sample, DataParticleType.PCO2W_B_SAMI_SAMPLE_CAL, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_B_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PCO2W_B_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PCO2W_B_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PCO2W_B_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.AUTO_SAMPLE_INTERVAL, 80)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.PCO2W_B_SAMI_SAMPLE)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE,
ProtocolEvent.DEIONIZED_WATER_FLUSH,
ProtocolEvent.REAGENT_FLUSH,
ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML,
ProtocolEvent.REAGENT_FLUSH_100ML,
ProtocolEvent.RUN_EXTERNAL_PUMP
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
da_capabilities = copy.deepcopy(capabilities)
da_capabilities[AgentCapabilityType.AGENT_COMMAND] = [ResourceAgentEvent.GO_COMMAND]
da_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
# Test direct access disconnect
self.assert_direct_access_start_telnet(timeout=10)
self.assertTrue(self.tcp_client)
self.assert_capabilities(da_capabilities)
self.tcp_client.disconnect()
# Now do it again, but use the event to stop DA
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_direct_access_start_telnet(timeout=10)
self.assert_capabilities(da_capabilities)
self.assert_direct_access_stop_telnet()
##################
# Command Mode
##################
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
st_capabilities = copy.deepcopy(capabilities)
st_capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
st_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE
]
self.assert_start_autosample(timeout=200)
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the Stepper CLI Backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.debug import stepper
from tensorflow.python.debug.cli import stepper_cli
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
# Regex pattern for a node line in the stepper CLI output.
NODE_LINE_PATTERN = re.compile(r".*\(.*\).*\[.*\].*")
def _parse_sorted_nodes_list(lines):
"""Parsed a list of lines to extract the node list.
Args:
lines: (list of str) Lines from which the node list and associated
information will be extracted.
Returns:
(list of str) The list of node names.
(list of str) The list of status labels.
(int) 0-based index among the nodes for the node pointed by the next-node
pointer. If no such node exists, -1.
"""
node_names = []
status_labels = []
node_pointer = -1
node_line_counter = 0
for line in lines:
if NODE_LINE_PATTERN.match(line):
node_names.append(line.split(" ")[-1])
idx_left_bracket = line.index("[")
idx_right_bracket = line.index("]")
status_labels.append(line[idx_left_bracket + 1:idx_right_bracket])
if line.strip().startswith(
stepper_cli.NodeStepperCLI.NEXT_NODE_POINTER_STR):
node_pointer = node_line_counter
node_line_counter += 1
return node_names, status_labels, node_pointer
def _parsed_used_feeds(lines):
feed_types = {}
begin_line = -1
for i, line in enumerate(lines):
if line.startswith("Stepper used feeds:"):
begin_line = i + 1
break
if begin_line == -1:
return feed_types
for line in lines[begin_line:]:
line = line.strip()
if not line:
return feed_types
else:
feed_name = line.split(" : ")[0].strip()
feed_type = line.split(" : ")[1].strip()
feed_types[feed_name] = feed_type
class NodeStepperSimpleGraphTest(test_util.TensorFlowTestCase):
def setUp(self):
self.a = variables.Variable(10.0, name="a")
self.b = variables.Variable(20.0, name="b")
self.c = math_ops.add(self.a, self.b, name="c") # Should be 30.0.
self.d = math_ops.sub(self.a, self.c, name="d") # Should be -20.0.
self.e = math_ops.mul(self.c, self.d, name="e") # Should be -600.0.
self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph")
self.f = math_ops.mul(self.e, self.ph, name="f")
self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
self.e, name="opt")
self.sess = session.Session()
self.sess.run(self.a.initializer)
self.sess.run(self.b.initializer)
def tearDown(self):
ops.reset_default_graph()
def _assert_nodes_topologically_sorted_with_target_e(self, node_names):
"""Check the topologically sorted order of the node names."""
self.assertGreaterEqual(len(node_names), 7)
self.assertLess(node_names.index("a"), node_names.index("a/read"))
self.assertLess(node_names.index("b"), node_names.index("b/read"))
self.assertLess(node_names.index("a/read"), node_names.index("c"))
self.assertLess(node_names.index("b/read"), node_names.index("c"))
self.assertLess(node_names.index("a/read"), node_names.index("d"))
self.assertLess(node_names.index("c"), node_names.index("d"))
self.assertLess(node_names.index("c"), node_names.index("e"))
self.assertLess(node_names.index("d"), node_names.index("e"))
def _assert_nodes_topologically_sorted_with_target_f(self, node_names):
self._assert_nodes_topologically_sorted_with_target_e(node_names)
self.assertGreaterEqual(len(node_names), 9)
self.assertLess(node_names.index("ph"), node_names.index("f"))
self.assertLess(node_names.index("e"), node_names.index("f"))
def testListingSortedNodesPresentsTransitveClosure(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.e))
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self._assert_nodes_topologically_sorted_with_target_e(node_names)
self.assertEqual(len(node_names), len(stat_labels))
for stat_label in stat_labels:
self.assertEqual(" ", stat_label)
self.assertEqual(0, node_pointer)
def testListingSortedNodesLabelsPlaceholders(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.f))
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self._assert_nodes_topologically_sorted_with_target_f(node_names)
index_ph = node_names.index("ph")
self.assertEqual(len(node_names), len(stat_labels))
for i in xrange(len(stat_labels)):
if index_ph == i:
self.assertIn(stepper_cli.NodeStepperCLI.STATE_IS_PLACEHOLDER,
stat_labels[i])
else:
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_IS_PLACEHOLDER,
stat_labels[i])
self.assertEqual(0, node_pointer)
def testContToNonexistentNodeShouldError(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.f))
output = cli.cont(["foobar"])
self.assertEqual([
"ERROR: foobar is not in the transitive closure of this stepper "
"instance."
], output.lines)
def testContToNodeOutsideTransitiveClosureShouldError(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.e))
output = cli.cont(["f"])
self.assertEqual([
"ERROR: f is not in the transitive closure of this stepper "
"instance."
], output.lines)
def testContToValidNodeShouldUpdateStatus(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.e))
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
index_c = node_names.index("c")
self.assertEqual(" ", stat_labels[index_c])
self.assertEqual(0, node_pointer)
output = cli.cont("c")
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self.assertGreaterEqual(len(node_names), 3)
self.assertIn("c", node_names)
index_c = node_names.index("c")
self.assertEqual(index_c, node_pointer)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[index_c])
output = cli.cont("d")
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
used_feed_types = _parsed_used_feeds(output.lines)
self.assertEqual({"c:0": "handle"}, used_feed_types)
self.assertGreaterEqual(len(node_names), 3)
self.assertIn("d", node_names)
index_d = node_names.index("d")
self.assertEqual(index_d, node_pointer)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[index_d])
def testSteppingOneStepAtATimeShouldUpdateStatus(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.e))
output = cli.list_sorted_nodes([])
orig_node_names, _, node_pointer = _parse_sorted_nodes_list(output.lines)
self.assertEqual(0, node_pointer)
for i in xrange(len(orig_node_names)):
output = cli.step([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
next_node_name = node_names[node_pointer]
self.assertEqual(orig_node_names[i], next_node_name)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT,
stat_labels[node_pointer])
# The order in which the nodes are listed should not change as the
# stepping happens.
output = cli.list_sorted_nodes([])
node_names, _, node_pointer = _parse_sorted_nodes_list(output.lines)
self.assertEqual(orig_node_names, node_names)
if i < len(orig_node_names) - 1:
self.assertEqual(i + 1, node_pointer)
else:
# Stepped over the limit. Pointer should be at -1.
self.assertEqual(-1, node_pointer)
# Attempt to step once more after the end has been reached should error out.
output = cli.step([])
self.assertEqual([
"ERROR: Cannot step any further because the end of the sorted "
"transitive closure has been reached."
], output.lines)
def testSteppingMultipleStepsUpdatesStatus(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.e))
output = cli.list_sorted_nodes([])
orig_node_names, _, _ = _parse_sorted_nodes_list(output.lines)
output = cli.step(["-t", "3"])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self.assertEqual(orig_node_names[2], node_names[node_pointer])
for i in xrange(node_pointer):
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[i])
for i in xrange(node_pointer + 1, len(stat_labels)):
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[i])
def testContToNodeWithoutOutputTensorInClosureShowsNoHandleCached(self):
node_stepper = stepper.NodeStepper(self.sess, self.opt)
sorted_nodes = node_stepper.sorted_nodes()
closure_elements = node_stepper.closure_elements()
# Find a node which is in the list of sorted nodes, but whose output tensor
# is not in the transitive closure.
no_output_node = None
for node in sorted_nodes:
if (node + ":0" not in closure_elements and
node + ":1" not in closure_elements):
no_output_node = node
break
self.assertIsNotNone(no_output_node)
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.cont([no_output_node])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
self.assertEqual(no_output_node, node_names[node_pointer])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_CONT,
stat_labels[node_pointer])
def testContToUpdateNodeLeadsToDirtyVariableLabel(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.opt))
output = cli.cont(["opt/update_b/ApplyGradientDescent"])
output = cli.list_sorted_nodes([])
node_names, stat_labels, _ = _parse_sorted_nodes_list(output.lines)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("b")])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("a")])
def testContWithRestoreVariablesOptionShouldRestoreVariableValue(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.opt))
output = cli.cont(["opt/update_a/ApplyGradientDescent"])
# After cont() call on .../update_a/..., Variable a should have been marked
# as dirty, whereas b should not have.
output = cli.list_sorted_nodes([])
node_names, stat_labels, _ = _parse_sorted_nodes_list(output.lines)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("a")])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("b")])
output = cli.cont(["opt/update_b/ApplyGradientDescent", "-r"])
# After cont() call on .../update_b/... with the -r flag, Variable b should
# have been marked as dirty, whereas Variable a should not be because it
# should have been restored.
output = cli.list_sorted_nodes([])
node_names, stat_labels, _ = _parse_sorted_nodes_list(output.lines)
self.assertIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("b")])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_DIRTY_VARIABLE,
stat_labels[node_names.index("a")])
def testPrintTensorShouldWorkWithTensorName(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.e))
cli.cont("d")
output = cli.print_tensor(["d:0"])
self.assertEqual("Tensor \"d:0\":", output.lines[0])
self.assertEqual("-20.0", output.lines[-1])
def testPrintTensorShouldWorkWithNodeNameWithOutputTensor(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.e))
cli.cont("d")
output = cli.print_tensor(["d"])
self.assertEqual("Tensor \"d:0\":", output.lines[0])
self.assertEqual("-20.0", output.lines[-1])
def testPrintTensorShouldWorkSlicingString(self):
ph_value = np.array([[1.0, 0.0], [0.0, 2.0]])
cli = stepper_cli.NodeStepperCLI(
stepper.NodeStepper(
self.sess, self.f, feed_dict={self.ph: ph_value}))
output = cli.print_tensor(["ph:0[:, 1]"])
self.assertEqual("Tensor \"ph:0[:, 1]\":", output.lines[0])
self.assertEqual(repr(ph_value[:, 1]), output.lines[-1])
output = cli.print_tensor(["ph[:, 1]"])
self.assertEqual("Tensor \"ph:0[:, 1]\":", output.lines[0])
self.assertEqual(repr(ph_value[:, 1]), output.lines[-1])
def testPrintTensorWithNonexistentTensorShouldError(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.e))
output = cli.print_tensor(["foobar"])
self.assertEqual([
"ERROR: foobar is not in the transitive closure of this stepper "
"instance."
], output.lines)
def testPrintTensorWithNoHandleShouldError(self):
cli = stepper_cli.NodeStepperCLI(stepper.NodeStepper(self.sess, self.e))
output = cli.print_tensor("e")
self.assertEqual([
"This stepper instance does not have access to the value of tensor "
"\"e:0\""
], output.lines)
def testInjectTensorValueByTensorNameShouldBeReflected(self):
node_stepper = stepper.NodeStepper(self.sess, self.e)
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.cont(["d"])
node_names, _, node_pointer = _parse_sorted_nodes_list(output.lines)
self.assertEqual("d", node_names[node_pointer])
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
index_d = node_names.index("d")
self.assertIn(stepper_cli.NodeStepperCLI.STATE_CONT, stat_labels[index_d])
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_OVERRIDDEN,
stat_labels[index_d])
self.assertAllClose(-20.0, node_stepper.get_tensor_value("d:0"))
output = cli.inject_value(["d:0", "20.0"])
# Verify that the override is available.
self.assertEqual(["d:0"], node_stepper.override_names())
# Verify that the list of sorted nodes reflects the existence of the value
# override (i.e., injection).
output = cli.list_sorted_nodes([])
node_names, stat_labels, node_pointer = _parse_sorted_nodes_list(
output.lines)
index_d = node_names.index("d")
self.assertNotIn(stepper_cli.NodeStepperCLI.STATE_CONT,
stat_labels[index_d])
self.assertIn(stepper_cli.NodeStepperCLI.STATE_OVERRIDDEN,
stat_labels[index_d])
def testInjectTensorValueByNodeNameShouldBeReflected(self):
node_stepper = stepper.NodeStepper(self.sess, self.e)
cli = stepper_cli.NodeStepperCLI(node_stepper)
cli.inject_value(["d", "20.0"])
self.assertEqual(["d:0"], node_stepper.override_names())
def testInjectToNonexistentTensorShouldError(self):
node_stepper = stepper.NodeStepper(self.sess, self.e)
cli = stepper_cli.NodeStepperCLI(node_stepper)
output = cli.inject_value(["foobar:0", "20.0"])
self.assertEqual([
"ERROR: foobar:0 is not in the transitive closure of this stepper "
"instance."
], output.lines)
if __name__ == "__main__":
googletest.main()
| |
from math import ceil
from dash.orgs.models import Org
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db import connection, models
from django.db.models import Sum
from django.utils.functional import SimpleLazyObject
from django.utils.translation import ugettext_lazy as _
from casepro.cases.models import CaseAction, Partner
from casepro.msgs.models import Label
from casepro.utils import date_range
from casepro.utils.export import BaseExport
def datetime_to_date(dt, org):
"""
Convert a datetime to a date using the given org's timezone
"""
return dt.astimezone(org.timezone).date()
class BaseCount(models.Model):
"""
Tracks total counts of different items (e.g. replies, messages) in different scopes (e.g. org, user)
"""
TYPE_INCOMING = "I"
TYPE_INBOX = "N"
TYPE_ARCHIVED = "A"
TYPE_REPLIES = "R"
TYPE_CASE_OPENED = "C"
TYPE_CASE_CLOSED = "D"
id = models.BigAutoField(auto_created=True, primary_key=True, verbose_name="ID")
squash_sql = """
WITH removed as (
DELETE FROM %(table_name)s WHERE %(delete_cond)s RETURNING "count"
)
INSERT INTO %(table_name)s(%(insert_cols)s, "count")
VALUES (%(insert_vals)s, GREATEST(0, (SELECT SUM("count") FROM removed)));"""
item_type = models.CharField(max_length=1, help_text=_("The thing being counted"))
scope = models.CharField(max_length=32, help_text=_("The scope in which it is being counted"))
count = models.IntegerField()
@staticmethod
def encode_scope(*args):
types = []
for arg in args:
# request.user is actually a SimpleLazyObject proxy
if isinstance(arg, User) and isinstance(arg, SimpleLazyObject):
arg = User(pk=arg.pk)
types.append(type(arg))
if types == [Org]:
return "org:%d" % args[0].pk
elif types == [Partner]:
return "partner:%d" % args[0].pk
elif types == [Org, User]:
return "org:%d:user:%d" % (args[0].pk, args[1].pk)
elif types == [Label]:
return "label:%d" % args[0].pk
else: # pragma: no cover
raise ValueError("Unsupported scope: %s" % ",".join([t.__name__ for t in types]))
@classmethod
def squash(cls):
"""
Squashes counts so that there is a single count per item_type + scope combination
"""
last_squash_id = cache.get(cls.last_squash_key, 0)
unsquashed_values = cls.objects.filter(pk__gt=last_squash_id)
unsquashed_values = unsquashed_values.values(*cls.squash_over).distinct(*cls.squash_over)
for unsquashed in unsquashed_values:
with connection.cursor() as cursor:
sql = cls.squash_sql % {
"table_name": cls._meta.db_table,
"delete_cond": " AND ".join(['"%s" = %%s' % f for f in cls.squash_over]),
"insert_cols": ", ".join(['"%s"' % f for f in cls.squash_over]),
"insert_vals": ", ".join(["%s"] * len(cls.squash_over)),
}
params = [unsquashed[f] for f in cls.squash_over]
cursor.execute(sql, params + params)
max_id = cls.objects.order_by("-pk").values_list("pk", flat=True).first()
if max_id:
cache.set(cls.last_squash_key, max_id)
class CountSet(object):
"""
A queryset of counts which can be aggregated in different ways
"""
def __init__(self, counts, scopes):
self.counts = counts
self.scopes = scopes
def total(self):
"""
Calculates the overall total over a set of counts
"""
total = self.counts.aggregate(total=Sum("count"))
return total["total"] if total["total"] is not None else 0
def scope_totals(self):
"""
Calculates per-scope totals over a set of counts
"""
totals = list(self.counts.values_list("scope").annotate(replies=Sum("count")))
total_by_encoded_scope = {t[0]: t[1] for t in totals}
total_by_scope = {}
for encoded_scope, scope in self.scopes.items():
total_by_scope[scope] = total_by_encoded_scope.get(encoded_scope, 0)
return total_by_scope
class Meta:
abstract = True
class BaseSecondTotal(BaseCount):
"""
Tracks total seconds and counts of different items (e.g. time since assigned ) in different scopes (e.g. org, user)
"""
TYPE_TILL_REPLIED = "A"
TYPE_TILL_CLOSED = "C"
squash_sql = """
WITH removed as (
DELETE FROM %(table_name)s WHERE %(delete_cond)s RETURNING "count", "seconds"
)
INSERT INTO %(table_name)s(%(insert_cols)s, "count", "seconds")
VALUES (
%(insert_vals)s,
GREATEST(0, (SELECT SUM("count") FROM removed)),
COALESCE((SELECT SUM("seconds") FROM removed), 0)
);"""
seconds = models.BigIntegerField()
class CountSet(BaseCount.CountSet):
"""
A queryset of counts which can be aggregated in different ways
"""
def average(self):
"""
Calculates the overall total over a set of counts
"""
totals = self.counts.aggregate(total=Sum("count"), seconds=Sum("seconds"))
if totals["seconds"] is None or totals["total"] is None:
return 0
average = float(totals["seconds"]) / totals["total"]
return average
def seconds(self):
"""
Calculates the overall total of seconds over a set of counts
"""
total = self.counts.aggregate(total_seconds=Sum("seconds"))
return total["total_seconds"] if total["total_seconds"] is not None else 0
def scope_averages(self):
"""
Calculates per-scope averages over a set of counts
"""
totals = list(self.counts.values("scope").annotate(cases=Sum("count"), seconds=Sum("seconds")))
total_by_encoded_scope = {t["scope"]: (t["cases"], t["seconds"]) for t in totals}
average_by_scope = {}
for encoded_scope, scope in self.scopes.items():
cases, seconds = total_by_encoded_scope.get(encoded_scope, (1, 0))
average_by_scope[scope] = float(seconds) / cases
return average_by_scope
def day_totals(self):
"""
Calculates per-day totals over a set of counts
"""
return list(
self.counts.values_list("day").annotate(cases=Sum("count"), seconds=Sum("seconds")).order_by("day")
)
def month_totals(self):
"""
Calculates per-month totals over a set of counts
"""
counts = self.counts.extra(select={"month": 'EXTRACT(month FROM "day")'})
return list(
counts.values_list("month").annotate(cases=Sum("count"), seconds=Sum("seconds")).order_by("month")
)
class Meta:
abstract = True
class TotalCount(BaseCount):
"""
Tracks total counts of different items (e.g. replies, messages) in different scopes (e.g. org, user)
"""
squash_over = ("item_type", "scope")
last_squash_key = "total_count:last_squash"
@classmethod
def get_by_label(cls, labels, item_type):
return cls._get_count_set(item_type, {cls.encode_scope(l): l for l in labels})
@classmethod
def _get_count_set(cls, item_type, scopes):
counts = cls.objects.filter(item_type=item_type)
if scopes:
counts = counts.filter(scope__in=scopes.keys())
return BaseCount.CountSet(counts, scopes)
class Meta:
index_together = ("item_type", "scope")
class DailyCount(BaseCount):
"""
Tracks per-day counts of different items (e.g. replies, messages) in different scopes (e.g. org, user)
"""
day = models.DateField(help_text=_("The day this count is for"))
squash_over = ("day", "item_type", "scope")
last_squash_key = "daily_count:last_squash"
@classmethod
def record_item(cls, day, item_type, *scope_args):
cls.objects.create(day=day, item_type=item_type, scope=cls.encode_scope(*scope_args), count=1)
@classmethod
def record_removal(cls, day, item_type, *scope_args):
cls.objects.create(day=day, item_type=item_type, scope=cls.encode_scope(*scope_args), count=-1)
@classmethod
def get_by_org(cls, orgs, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(o): o for o in orgs}, since, until)
@classmethod
def get_by_partner(cls, partners, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(p): p for p in partners}, since, until)
@classmethod
def get_by_user(cls, org, users, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(org, u): u for u in users}, since, until)
@classmethod
def get_by_label(cls, labels, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(l): l for l in labels}, since, until)
@classmethod
def _get_count_set(cls, item_type, scopes, since, until):
counts = cls.objects.filter(item_type=item_type)
if scopes:
counts = counts.filter(scope__in=scopes.keys())
if since:
counts = counts.filter(day__gte=since)
if until:
counts = counts.filter(day__lt=until)
return DailyCount.CountSet(counts, scopes)
class CountSet(BaseCount.CountSet):
"""
A queryset of counts which can be aggregated in different ways
"""
def day_totals(self):
"""
Calculates per-day totals over a set of counts
"""
return list(self.counts.values_list("day").annotate(total=Sum("count")).order_by("day"))
def month_totals(self):
"""
Calculates per-month totals over a set of counts
"""
counts = self.counts.extra(select={"month": 'EXTRACT(month FROM "day")'})
return list(counts.values_list("month").annotate(replies=Sum("count")).order_by("month"))
class Meta:
index_together = ("item_type", "scope", "day")
class DailyCountExport(BaseExport):
"""
Exports based on daily counts. Each row is date and columns are different scopes.
"""
TYPE_LABEL = "L"
TYPE_PARTNER = "P"
TYPE_USER = "U"
type = models.CharField(max_length=1)
since = models.DateField()
until = models.DateField()
directory = "daily_count_export"
download_view = "statistics.dailycountexport_read"
@classmethod
def create(cls, org, user, of_type, since, until):
return cls.objects.create(org=org, created_by=user, type=of_type, since=since, until=until)
def render_book(self, book):
if self.type == self.TYPE_LABEL:
sheet = book.add_sheet(str(_("Incoming Messages")))
labels = list(Label.get_all(self.org).order_by("name"))
# get each label's day counts and organise by label and day
totals_by_label = {}
for label in labels:
totals = DailyCount.get_by_label(
[label], DailyCount.TYPE_INCOMING, self.since, self.until
).day_totals()
totals_by_label[label] = {t[0]: t[1] for t in totals}
self.write_row(sheet, 0, ["Date"] + [l.name for l in labels])
row = 1
for day in date_range(self.since, self.until):
totals = [totals_by_label.get(l, {}).get(day, 0) for l in labels]
self.write_row(sheet, row, [day] + totals)
row += 1
elif self.type == self.TYPE_USER:
replies_sheet = book.add_sheet(str(_("Replies Sent")))
cases_opened_sheet = book.add_sheet(str(_("Cases Opened")))
cases_closed_sheet = book.add_sheet(str(_("Cases Closed")))
users = self.org.get_org_users().order_by("profile__full_name")
replies_totals_by_user = {}
cases_opened_by_user = {}
cases_closed_by_user = {}
for user in users:
replies_totals = DailyCount.get_by_user(
self.org, [user], DailyCount.TYPE_REPLIES, self.since, self.until
).day_totals()
cases_opened_totals = DailyCount.get_by_user(
self.org, [user], DailyCount.TYPE_CASE_OPENED, self.since, self.until
).day_totals()
cases_closed_totals = DailyCount.get_by_user(
self.org, [user], DailyCount.TYPE_CASE_CLOSED, self.since, self.until
).day_totals()
replies_totals_by_user[user] = {t[0]: t[1] for t in replies_totals}
cases_opened_by_user[user] = {t[0]: t[1] for t in cases_opened_totals}
cases_closed_by_user[user] = {t[0]: t[1] for t in cases_closed_totals}
self.write_row(replies_sheet, 0, ["Date"] + [u.get_full_name() for u in users])
self.write_row(cases_opened_sheet, 0, ["Date"] + [u.get_full_name() for u in users])
self.write_row(cases_closed_sheet, 0, ["Date"] + [u.get_full_name() for u in users])
row = 1
for day in date_range(self.since, self.until):
replies_totals = [replies_totals_by_user.get(u, {}).get(day, 0) for u in users]
cases_opened_totals = [cases_opened_by_user.get(u, {}).get(day, 0) for u in users]
cases_closed_totals = [cases_closed_by_user.get(u, {}).get(day, 0) for u in users]
self.write_row(replies_sheet, row, [day] + replies_totals)
self.write_row(cases_opened_sheet, row, [day] + cases_opened_totals)
self.write_row(cases_closed_sheet, row, [day] + cases_closed_totals)
row += 1
elif self.type == self.TYPE_PARTNER:
replies_sheet = book.add_sheet(str(_("Replies Sent")))
ave_sheet = book.add_sheet(str(_("Average Reply Time")))
ave_closed_sheet = book.add_sheet(str(_("Average Closed Time")))
cases_opened_sheet = book.add_sheet(str(_("Cases Opened")))
cases_closed_sheet = book.add_sheet(str(_("Cases Closed")))
partners = list(Partner.get_all(self.org).order_by("name"))
# get each partner's day counts and organise by partner and day
replies_totals_by_partner = {}
cases_opened_by_partner = {}
cases_closed_by_partner = {}
replied_averages_by_partner = {}
closed_averages_by_partner = {}
for partner in partners:
replies_totals = DailyCount.get_by_partner(
[partner], DailyCount.TYPE_REPLIES, self.since, self.until
).day_totals()
cases_opened_totals = DailyCount.get_by_partner(
[partner], DailyCount.TYPE_CASE_OPENED, self.since, self.until
).day_totals()
cases_closed_totals = DailyCount.get_by_partner(
[partner], DailyCount.TYPE_CASE_CLOSED, self.since, self.until
).day_totals()
replies_totals_by_partner[partner] = {t[0]: t[1] for t in replies_totals}
cases_opened_by_partner[partner] = {t[0]: t[1] for t in cases_opened_totals}
cases_closed_by_partner[partner] = {t[0]: t[1] for t in cases_closed_totals}
replied_second_totals = DailySecondTotalCount.get_by_partner(
[partner], DailySecondTotalCount.TYPE_TILL_REPLIED, self.since, self.until
).day_totals()
replied_averages_by_partner[partner] = {t[0]: (float(t[2]) / t[1]) for t in replied_second_totals}
closed_second_totals = DailySecondTotalCount.get_by_partner(
[partner], DailySecondTotalCount.TYPE_TILL_CLOSED, self.since, self.until
).day_totals()
closed_averages_by_partner[partner] = {t[0]: (float(t[2]) / t[1]) for t in closed_second_totals}
self.write_row(replies_sheet, 0, ["Date"] + [p.name for p in partners])
self.write_row(cases_opened_sheet, 0, ["Date"] + [p.name for p in partners])
self.write_row(cases_closed_sheet, 0, ["Date"] + [p.name for p in partners])
self.write_row(ave_sheet, 0, ["Date"] + [p.name for p in partners])
self.write_row(ave_closed_sheet, 0, ["Date"] + [p.name for p in partners])
row = 1
for day in date_range(self.since, self.until):
replies_totals = [replies_totals_by_partner.get(l, {}).get(day, 0) for l in partners]
cases_opened_totals = [cases_opened_by_partner.get(l, {}).get(day, 0) for l in partners]
cases_closed_totals = [cases_closed_by_partner.get(l, {}).get(day, 0) for l in partners]
replied_averages = [replied_averages_by_partner.get(l, {}).get(day, 0) for l in partners]
closed_averages = [closed_averages_by_partner.get(l, {}).get(day, 0) for l in partners]
self.write_row(replies_sheet, row, [day] + replies_totals)
self.write_row(cases_opened_sheet, row, [day] + cases_opened_totals)
self.write_row(cases_closed_sheet, row, [day] + cases_closed_totals)
self.write_row(ave_sheet, row, [day] + replied_averages)
self.write_row(ave_closed_sheet, row, [day] + closed_averages)
row += 1
class DailySecondTotalCount(BaseSecondTotal):
"""
Tracks total seconds and count of different items in different scopes (e.g. org, user)
"""
day = models.DateField(help_text=_("The day this count is for"))
squash_over = ("day", "item_type", "scope")
last_squash_key = "daily_second_total_count:last_squash"
@classmethod
def record_item(cls, day, seconds, item_type, *scope_args):
cls.objects.create(day=day, item_type=item_type, scope=cls.encode_scope(*scope_args), count=1, seconds=seconds)
@classmethod
def get_by_org(cls, orgs, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(o): o for o in orgs}, since, until)
@classmethod
def get_by_partner(cls, partners, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(p): p for p in partners}, since, until)
@classmethod
def get_by_user(cls, org, users, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(org, u): u for u in users}, since, until)
@classmethod
def _get_count_set(cls, item_type, scopes, since, until):
counts = cls.objects.filter(item_type=item_type)
if scopes:
counts = counts.filter(scope__in=scopes.keys())
if since:
counts = counts.filter(day__gte=since)
if until:
counts = counts.filter(day__lt=until)
return DailySecondTotalCount.CountSet(counts, scopes)
def record_case_closed_time(close_action):
org = close_action.case.org
user = close_action.created_by
partner = close_action.case.assignee
case = close_action.case
day = datetime_to_date(close_action.created_on, close_action.case.org)
# count the time to close on an org level
td = close_action.created_on - case.opened_on
seconds_since_open = ceil(td.total_seconds())
DailySecondTotalCount.record_item(day, seconds_since_open, DailySecondTotalCount.TYPE_TILL_CLOSED, org)
# count the time since case was last assigned to this partner till it was closed
if user.partners.filter(id=partner.id).exists():
# count the time since this case was (re)assigned to this partner
try:
action = case.actions.filter(action=CaseAction.REASSIGN, assignee=partner).latest("created_on")
start_date = action.created_on
except CaseAction.DoesNotExist:
start_date = case.opened_on
td = close_action.created_on - start_date
seconds_since_open = ceil(td.total_seconds())
DailySecondTotalCount.record_item(day, seconds_since_open, DailySecondTotalCount.TYPE_TILL_CLOSED, partner)
| |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: copy.py
"""Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copy_reg import dispatch_table
class Error(Exception):
pass
error = Error
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ['Error', 'copy', 'deepcopy']
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
else:
copier = getattr(cls, '__copy__', None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, '__reduce_ex__', None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, '__reduce__', None)
if reductor:
rv = reductor()
else:
raise Error('un(shallow)copyable object of type %s' % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, long, float, bool, str, tuple,
frozenset, type, xrange, types.ClassType,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
for name in ('ComplexType', 'UnicodeType', 'CodeType'):
t = getattr(types, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
def _copy_inst(x):
if hasattr(x, '__copy__'):
return x.__copy__()
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _copy_inst
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
else:
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError:
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, '__deepcopy__', None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, '__reduce_ex__', None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, '__reduce__', None)
if reductor:
rv = reductor()
else:
raise Error('un(deep)copyable object of type %s' % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo)
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[long] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[str] = _deepcopy_atomic
try:
d[unicode] = _deepcopy_atomic
except NameError:
pass
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[xrange] = _deepcopy_atomic
d[types.ClassType] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
d = id(x)
try:
return memo[d]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
memo[d] = y
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.iteritems():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo):
return type(x)(x.im_func, deepcopy(x.im_self, memo), x.im_class)
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
memo[id(memo)] = [
x]
def _deepcopy_inst(x, memo):
if hasattr(x, '__deepcopy__'):
return x.__deepcopy__(memo)
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
args = deepcopy(args, memo)
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
memo[id(x)] = y
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _deepcopy_inst
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
else:
if memo is None:
memo = {}
n = len(info)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.iteritems():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
class _EmptyClass:
pass
def _test():
l = [
None, 1, 2, 3.14, 'xyzzy', (1, 2), [3.14, 'abc'], {'abc': 'ABC'}, (), [], {}]
l1 = copy(l)
print l1 == l
l1 = map(copy, l)
print l1 == l
l1 = deepcopy(l)
print l1 == l
class C:
def __init__(self, arg=None):
self.a = 1
self.arg = arg
if __name__ == '__main__':
import sys
file = sys.argv[0]
else:
file = __file__
self.fp = open(file)
self.fp.close()
def __getstate__(self):
return {'a': self.a,'arg': self.arg}
def __setstate__(self, state):
for key, value in state.iteritems():
setattr(self, key, value)
def __deepcopy__(self, memo=None):
new = self.__class__(deepcopy(self.arg, memo))
new.a = self.a
return new
c = C('argument sketch')
l.append(c)
l2 = copy(l)
print l == l2
print l
print l2
l2 = deepcopy(l)
print l == l2
print l
print l2
l.append({l[1]: l,'xyz': l[2]})
l3 = copy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
l3 = deepcopy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
class odict(dict):
def __init__(self, d={}):
self.a = 99
dict.__init__(self, d)
def __setitem__(self, k, i):
dict.__setitem__(self, k, i)
self.a
o = odict({'A': 'B'})
x = deepcopy(o)
print (o, x)
return
if __name__ == '__main__':
_test()
| |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import copy
import warnings
import numpy
from pyscf import lib
from pyscf.fci import cistring
from pyscf import symm
from pyscf import __config__
LARGE_CI_TOL = getattr(__config__, 'fci_addons_large_ci_tol', 0.1)
RETURN_STRS = getattr(__config__, 'fci_addons_large_ci_return_strs', True)
PENALTY = getattr(__config__, 'fci_addons_fix_spin_shift', 0.2)
def large_ci(ci, norb, nelec, tol=LARGE_CI_TOL, return_strs=RETURN_STRS):
'''Search for the largest CI coefficients
'''
neleca, nelecb = _unpack_nelec(nelec)
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
assert(ci.shape == (na, nb))
addra, addrb = numpy.where(abs(ci) > tol)
if addra.size == 0:
# No large CI coefficient > tol, search for the largest coefficient
addra, addrb = numpy.unravel_index(numpy.argmax(abs(ci)), ci.shape)
addra = numpy.asarray([addra])
addrb = numpy.asarray([addrb])
strsa = cistring.addrs2str(norb, neleca, addra)
strsb = cistring.addrs2str(norb, nelecb, addrb)
if return_strs:
strsa = [bin(x) for x in strsa]
strsb = [bin(x) for x in strsb]
return list(zip(ci[addra,addrb], strsa, strsb))
else:
occslsta = cistring._strs2occslst(strsa, norb)
occslstb = cistring._strs2occslst(strsb, norb)
return list(zip(ci[addra,addrb], occslsta, occslstb))
def initguess_triplet(norb, nelec, binstring):
'''Generate a triplet initial guess for FCI solver
'''
neleca, nelecb = _unpack_nelec(nelec)
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
addr = cistring.str2addr(norb, neleca, int(binstring,2))
ci0 = numpy.zeros((na,nb))
ci0[addr,0] = numpy.sqrt(.5)
ci0[0,addr] =-numpy.sqrt(.5)
return ci0
def symm_initguess(norb, nelec, orbsym, wfnsym=0, irrep_nelec=None):
'''Generate CI wavefunction initial guess which has the given symmetry.
Args:
norb : int
Number of orbitals.
nelec : int or 2-item list
Number of electrons, or 2-item list for (alpha, beta) electrons
orbsym : list of int
The irrep ID for each orbital.
Kwags:
wfnsym : int
The irrep ID of target symmetry
irrep_nelec : dict
Freeze occupancy for certain irreps
Returns:
CI coefficients 2D array which has the target symmetry.
'''
neleca, nelecb = _unpack_nelec(nelec)
orbsym = numpy.asarray(orbsym)
if not isinstance(orbsym[0], numpy.number):
raise RuntimeError('TODO: convert irrep symbol to irrep id')
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
ci1 = numpy.zeros((na,nb))
########################
# pass 1: The fixed occs
orbleft = numpy.ones(norb, dtype=bool)
stra = numpy.zeros(norb, dtype=bool)
strb = numpy.zeros(norb, dtype=bool)
if irrep_nelec is not None:
for k,n in irrep_nelec.items():
orbleft[orbsym==k] = False
if isinstance(n, (int, numpy.number)):
idx = numpy.where(orbsym==k)[0][:n//2]
stra[idx] = True
strb[idx] = True
else:
na, nb = n
stra[numpy.where(orbsym==k)[0][:na]] = True
strb[numpy.where(orbsym==k)[0][:nb]] = True
if (na-nb)%2:
wfnsym ^= k
orbleft = numpy.where(orbleft)[0]
neleca_left = neleca - stra.sum()
nelecb_left = nelecb - strb.sum()
spin = neleca_left - nelecb_left
assert(neleca_left >= 0)
assert(nelecb_left >= 0)
assert(spin >= 0)
########################
# pass 2: search pattern
def gen_str_iter(orb_list, nelec):
if nelec == 1:
for i in orb_list:
yield [i]
elif nelec >= len(orb_list):
yield orb_list
else:
restorb = orb_list[1:]
#yield from gen_str_iter(restorb, nelec)
for x in gen_str_iter(restorb, nelec):
yield x
for x in gen_str_iter(restorb, nelec-1):
yield [orb_list[0]] + x
# search for alpha and beta pattern which match to the required symmetry
def query(target, nelec_atmost, spin, orbsym):
norb = len(orbsym)
for excite_level in range(1, nelec_atmost+1):
for beta_only in gen_str_iter(list(range(norb)), excite_level):
alpha_allow = [i for i in range(norb) if i not in beta_only]
alpha_orbsym = orbsym[alpha_allow]
alpha_target = target
for i in beta_only:
alpha_target ^= orbsym[i]
alpha_only = symm.route(alpha_target, spin+excite_level, alpha_orbsym)
if alpha_only:
alpha_only = [alpha_allow[i] for i in alpha_only]
return alpha_only, beta_only
raise RuntimeError('No pattern found for wfn irrep %s over orbsym %s'
% (target, orbsym))
if spin == 0:
aonly = bonly = []
if wfnsym != 0:
aonly, bonly = query(wfnsym, neleca_left, spin, orbsym[orbleft])
else:
# 1. assume "nelecb_left" doubly occupied orbitals
# search for alpha pattern which match to the required symmetry
aonly, bonly = orbleft[symm.route(wfnsym, spin, orbsym[orbleft])], []
# dcompose doubly occupied orbitals, search for alpha and beta pattern
if len(aonly) != spin:
aonly, bonly = query(wfnsym, neleca_left, spin, orbsym[orbleft])
ndocc = neleca_left - len(aonly) # == nelecb_left - len(bonly)
docc_allow = numpy.ones(len(orbleft), dtype=bool)
docc_allow[aonly] = False
docc_allow[bonly] = False
docclst = orbleft[numpy.where(docc_allow)[0]][:ndocc]
stra[docclst] = True
strb[docclst] = True
def find_addr_(stra, aonly, nelec):
stra[orbleft[aonly]] = True
return cistring.str2addr(norb, nelec, ('%i'*norb)%tuple(stra)[::-1])
if bonly:
if spin > 0:
aonly, socc_only = aonly[:-spin], aonly[-spin:]
stra[orbleft[socc_only]] = True
stra1 = stra.copy()
strb1 = strb.copy()
addra = find_addr_(stra, aonly, neleca)
addrb = find_addr_(strb, bonly, nelecb)
addra1 = find_addr_(stra1, bonly, neleca)
addrb1 = find_addr_(strb1, aonly, nelecb)
ci1[addra,addrb] = ci1[addra1,addrb1] = numpy.sqrt(.5)
else:
addra = find_addr_(stra, aonly, neleca)
addrb = find_addr_(strb, bonly, nelecb)
ci1[addra,addrb] = 1
return ci1
def cylindrical_init_guess(mol, norb, nelec, orbsym, wfnsym=0, singlet=True,
nroots=1):
'''
FCI initial guess for system of cylindrical symmetry.
(In testing)
Examples:
>>> mol = gto.M(atom='O; O 1 1.2', spin=2, symmetry=True)
>>> orbsym = [6,7,2,3]
>>> ci0 = fci.addons.cylindrical_init_guess(mol, 4, (3,3), orbsym, wfnsym=10)[0]
>>> print(ci0.reshape(4,4))
>>> ci0 = fci.addons.cylindrical_init_guess(mol, 4, (3,3), orbsym, wfnsym=10, singlet=False)[0]
>>> print(ci0.reshape(4,4))
'''
warnings.warn('Initial guess for cylindrical symmetry is under testing')
neleca, nelecb = _unpack_nelec(nelec)
if isinstance(orbsym[0], str):
orbsym = [symm.irrep_name2id(mol.groupname, x) for x in orbsym]
orbsym = numpy.asarray(orbsym)
if isinstance(wfnsym, str):
wfnsym = symm.irrep_name2id(mol.groupname, wfnsym)
if mol.groupname in ('SO3', 'Dooh', 'Coov'):
def irrep_id2lz(irrep_id):
# See also symm.basis.DOOH_IRREP_ID_TABLE
level = irrep_id // 10
if mol.groupname == 'SO3':
level = level % 10 # See SO3 irreps in pyscf.symm.basis
d2h_id = irrep_id % 10
# irrep_id 0,1,4,5 corresponds to lz = 0,2,4,...
# irrep_id 2,3,6,7 corresponds to lz = 1,3,5,...
lz = level * 2 + ((d2h_id==2) | (d2h_id==3) | (d2h_id==6) | (d2h_id==7))
if isinstance(irrep_id, (int, numpy.number)):
# irrep_id 1,3,4,6 corresponds to E_y (E_{(-)})
# irrep_id 0,2,5,7 corresponds to E_x (E_{(+)})
if (d2h_id==1) | (d2h_id==3) | (d2h_id==4) | (d2h_id==6):
lz = -lz
else:
lz[(d2h_id==1) | (d2h_id==3) | (d2h_id==4) | (d2h_id==6)] *= -1
return lz
orb_lz = irrep_id2lz(orbsym)
wfn_lz = irrep_id2lz(wfnsym)
d2h_wfnsym_id = wfnsym % 10
else:
raise NotImplementedError
orb_lz = wfn_lz = d2h_wfnsym_id = None
occslsta = occslstb = cistring._gen_occslst(range(norb), neleca)
if neleca != nelecb:
occslstb = cistring._gen_occslst(range(norb), nelecb)
na = len(occslsta)
nb = len(occslsta)
gx_mask = orbsym == 2
gy_mask = orbsym == 3
ux_mask = orbsym == 7
uy_mask = orbsym == 6
all_lz = set(abs(orb_lz))
def search_open_shell_det(occ_lst):
occ_mask = numpy.zeros(norb, dtype=bool)
occ_mask[occ_lst] = True
# First search Lz of the open-shell orbital
for lz_open in all_lz:
if numpy.count_nonzero(orb_lz == lz_open) % 2 == 1:
break
n_gx = numpy.count_nonzero(gx_mask & occ_mask & (orb_lz == lz_open))
n_gy = numpy.count_nonzero(gy_mask & occ_mask & (orb_lz ==-lz_open))
n_ux = numpy.count_nonzero(ux_mask & occ_mask & (orb_lz == lz_open))
n_uy = numpy.count_nonzero(uy_mask & occ_mask & (orb_lz ==-lz_open))
if n_gx > n_gy:
idx = numpy.where(occ_mask & (orb_lz == lz_open) & gx_mask)[0][0]
idy = numpy.where((~occ_mask) & (orb_lz ==-lz_open) & gy_mask)[0][0]
elif n_gx < n_gy:
idx = numpy.where((~occ_mask) & (orb_lz == lz_open) & gx_mask)[0][0]
idy = numpy.where(occ_mask & (orb_lz ==-lz_open) & gy_mask)[0][0]
elif n_ux > n_uy:
idx = numpy.where(occ_mask & (orb_lz == lz_open) & ux_mask)[0][0]
idy = numpy.where((~occ_mask) & (orb_lz ==-lz_open) & uy_mask)[0][0]
elif n_ux < n_uy:
idx = numpy.where((~occ_mask) & (orb_lz == lz_open) & ux_mask)[0][0]
idy = numpy.where(occ_mask & (orb_lz ==-lz_open) & uy_mask)[0][0]
else:
raise RuntimeError
nelec = len(occ_lst)
det_x = occ_mask.copy()
det_x[idx] = True
det_x[idy] = False
str_x = ''.join(['1' if i else '0' for i in det_x[::-1]])
addr_x = cistring.str2addr(norb, nelec, str_x)
det_y = occ_mask.copy()
det_y[idx] = False
det_y[idy] = True
str_y = ''.join(['1' if i else '0' for i in det_y[::-1]])
addr_y = cistring.str2addr(norb, nelec, str_y)
return addr_x, addr_y
ci0 = []
iroot = 0
for addr in range(na*nb):
ci_1 = numpy.zeros((na,nb))
addra = addr // nb
addrb = addr % nb
occa = occslsta[addra]
occb = occslstb[addrb]
tot_sym = 0
for i in occa:
tot_sym ^= orbsym[i]
for i in occb:
tot_sym ^= orbsym[i]
if tot_sym == d2h_wfnsym_id:
n_Ex_a = (gx_mask[occa]).sum() + (ux_mask[occa]).sum()
n_Ey_a = (gy_mask[occa]).sum() + (uy_mask[occa]).sum()
n_Ex_b = (gx_mask[occb]).sum() + (ux_mask[occb]).sum()
n_Ey_b = (gy_mask[occb]).sum() + (uy_mask[occb]).sum()
if abs(n_Ex_a - n_Ey_a) == 1 and abs(n_Ex_b - n_Ey_b) == 1:
# open-shell for both alpha det and beta det e.g. the
# valence part of O2 molecule
addr_x_a, addr_y_a = search_open_shell_det(occa)
addr_x_b, addr_y_b = search_open_shell_det(occb)
if singlet:
if wfn_lz == 0:
ci_1[addr_x_a,addr_x_b] = numpy.sqrt(.5)
ci_1[addr_y_a,addr_y_b] = numpy.sqrt(.5)
else:
ci_1[addr_x_a,addr_x_b] = numpy.sqrt(.5)
ci_1[addr_y_a,addr_y_b] =-numpy.sqrt(.5)
else:
ci_1[addr_x_a,addr_y_b] = numpy.sqrt(.5)
ci_1[addr_y_a,addr_x_b] =-numpy.sqrt(.5)
else:
# TODO: Other direct-product to direct-sum transofromation
# which involves CG coefficients.
ci_1[addra,addrb] = 1
ci0.append(ci_1.ravel())
iroot += 1
if iroot >= nroots:
break
return ci0
def _symmetrize_wfn(ci, strsa, strsb, orbsym, wfnsym=0):
ci = ci.reshape(strsa.size,strsb.size)
airreps = numpy.zeros(strsa.size, dtype=numpy.int32)
birreps = numpy.zeros(strsb.size, dtype=numpy.int32)
orbsym_in_d2h = numpy.asarray(orbsym) % 10
wfnsym_in_d2h = wfnsym % 10
for i, ir in enumerate(orbsym_in_d2h):
airreps[numpy.bitwise_and(strsa, 1 << i) > 0] ^= ir
birreps[numpy.bitwise_and(strsb, 1 << i) > 0] ^= ir
mask = (airreps.reshape(-1,1) ^ birreps) == wfnsym_in_d2h
ci1 = numpy.zeros_like(ci)
ci1[mask] = ci[mask]
ci1 *= 1/numpy.linalg.norm(ci1)
return ci1
def symmetrize_wfn(ci, norb, nelec, orbsym, wfnsym=0):
'''Symmetrize the CI wavefunction by zeroing out the determinants which
do not have the right symmetry.
Args:
ci : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
nelec : int or 2-item list
Number of electrons, or 2-item list for (alpha, beta) electrons
orbsym : list of int
The irrep ID for each orbital.
Kwags:
wfnsym : int
The irrep ID of target symmetry
Returns:
2D array which is the symmetrized CI coefficients
'''
neleca, nelecb = _unpack_nelec(nelec)
strsa = numpy.asarray(cistring.make_strings(range(norb), neleca))
strsb = numpy.asarray(cistring.make_strings(range(norb), nelecb))
return _symmetrize_wfn(ci, strsa, strsb, orbsym, wfnsym)
def _guess_wfnsym(ci, strsa, strsb, orbsym):
nb = len(strsb)
idx = abs(ci).argmax()
stra = strsa[idx // nb]
strb = strsb[idx % nb ]
orbsym_in_d2h = numpy.asarray(orbsym) % 10 # convert to D2h irreps
airrep = 0
birrep = 0
for i, ir in enumerate(orbsym_in_d2h):
if (stra & (1 << i)):
airrep ^= ir
if (strb & (1 << i)):
birrep ^= ir
return airrep ^ birrep
def guess_wfnsym(ci, norb, nelec, orbsym):
'''Guess the wavefunction symmetry based on the non-zero elements in the
given CI coefficients.
Args:
ci : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
nelec : int or 2-item list
Number of electrons, or 2-item list for (alpha, beta) electrons
orbsym : list of int
The irrep ID for each orbital.
Returns:
Irrep ID
'''
neleca, nelecb = _unpack_nelec(nelec)
strsa = numpy.asarray(cistring.make_strings(range(norb), neleca))
strsb = numpy.asarray(cistring.make_strings(range(norb), nelecb))
if isinstance(ci, numpy.ndarray) and ci.ndim <= 2:
wfnsym = _guess_wfnsym(ci, strsa, strsb, orbsym)
else:
wfnsym = [_guess_wfnsym(c, strsa, strsb, orbsym) for c in ci]
if any(wfnsym[0] != x for x in wfnsym):
warnings.warn('Different wfnsym %s found in different CI vecotrs' % wfnsym)
wfnsym = wfnsym[0]
return wfnsym
def des_a(ci0, norb, neleca_nelecb, ap_id):
r'''Construct (N-1)-electron wavefunction by removing an alpha electron from
the N-electron wavefunction.
... math::
|N-1\rangle = \hat{a}_p |N\rangle
Args:
ci0 : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
(neleca,nelecb) : (int,int)
Number of (alpha, beta) electrons of the input CI function
ap_id : int
Orbital index (0-based), for the annihilation operator
Returns:
2D array, row for alpha strings and column for beta strings. Note it
has different number of rows to the input CI coefficients
'''
neleca, nelecb = neleca_nelecb
if neleca <= 0:
return numpy.zeros_like(ci0)
if ci0.ndim == 1:
ci0 = ci0.reshape(cistring.num_strings(norb, neleca),
cistring.num_strings(norb, nelecb))
des_index = cistring.gen_des_str_index(range(norb), neleca)
na_ci1 = cistring.num_strings(norb, neleca-1)
ci1 = numpy.zeros((na_ci1, ci0.shape[1]))
entry_has_ap = (des_index[:,:,1] == ap_id)
addr_ci0 = numpy.any(entry_has_ap, axis=1)
addr_ci1 = des_index[entry_has_ap,2]
sign = des_index[entry_has_ap,3]
#print(addr_ci0)
#print(addr_ci1)
ci1[addr_ci1] = sign.reshape(-1,1) * ci0[addr_ci0]
return ci1
def des_b(ci0, norb, neleca_nelecb, ap_id):
r'''Construct (N-1)-electron wavefunction by removing a beta electron from
N-electron wavefunction.
Args:
ci0 : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
(neleca,nelecb) : (int,int)
Number of (alpha, beta) electrons of the input CI function
ap_id : int
Orbital index (0-based), for the annihilation operator
Returns:
2D array, row for alpha strings and column for beta strings. Note it
has different number of columns to the input CI coefficients.
'''
neleca, nelecb = neleca_nelecb
if nelecb <= 0:
return numpy.zeros_like(ci0)
if ci0.ndim == 1:
ci0 = ci0.reshape(cistring.num_strings(norb, neleca),
cistring.num_strings(norb, nelecb))
des_index = cistring.gen_des_str_index(range(norb), nelecb)
nb_ci1 = cistring.num_strings(norb, nelecb-1)
ci1 = numpy.zeros((ci0.shape[0], nb_ci1))
entry_has_ap = (des_index[:,:,1] == ap_id)
addr_ci0 = numpy.any(entry_has_ap, axis=1)
addr_ci1 = des_index[entry_has_ap,2]
sign = des_index[entry_has_ap,3]
# This sign prefactor accounts for interchange of operators with alpha and beta spins
if neleca % 2 == 1:
sign *= -1
ci1[:,addr_ci1] = ci0[:,addr_ci0] * sign
return ci1
def cre_a(ci0, norb, neleca_nelecb, ap_id):
r'''Construct (N+1)-electron wavefunction by adding an alpha electron in
the N-electron wavefunction.
... math::
|N+1\rangle = \hat{a}^+_p |N\rangle
Args:
ci0 : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
(neleca,nelecb) : (int,int)
Number of (alpha, beta) electrons of the input CI function
ap_id : int
Orbital index (0-based), for the creation operator
Returns:
2D array, row for alpha strings and column for beta strings. Note it
has different number of rows to the input CI coefficients.
'''
neleca, nelecb = neleca_nelecb
if neleca >= norb:
return numpy.zeros_like(ci0)
if ci0.ndim == 1:
ci0 = ci0.reshape(cistring.num_strings(norb, neleca),
cistring.num_strings(norb, nelecb))
cre_index = cistring.gen_cre_str_index(range(norb), neleca)
na_ci1 = cistring.num_strings(norb, neleca+1)
ci1 = numpy.zeros((na_ci1, ci0.shape[1]))
entry_has_ap = (cre_index[:,:,0] == ap_id)
addr_ci0 = numpy.any(entry_has_ap, axis=1)
addr_ci1 = cre_index[entry_has_ap,2]
sign = cre_index[entry_has_ap,3]
ci1[addr_ci1] = sign.reshape(-1,1) * ci0[addr_ci0]
return ci1
# construct (N+1)-electron wavefunction by adding a beta electron to
# N-electron wavefunction:
def cre_b(ci0, norb, neleca_nelecb, ap_id):
r'''Construct (N+1)-electron wavefunction by adding a beta electron in
the N-electron wavefunction.
Args:
ci0 : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
(neleca,nelecb) : (int,int)
Number of (alpha, beta) electrons of the input CI function
ap_id : int
Orbital index (0-based), for the creation operator
Returns:
2D array, row for alpha strings and column for beta strings. Note it
has different number of columns to the input CI coefficients.
'''
neleca, nelecb = neleca_nelecb
if nelecb >= norb:
return numpy.zeros_like(ci0)
if ci0.ndim == 1:
ci0 = ci0.reshape(cistring.num_strings(norb, neleca),
cistring.num_strings(norb, nelecb))
cre_index = cistring.gen_cre_str_index(range(norb), nelecb)
nb_ci1 = cistring.num_strings(norb, nelecb+1)
ci1 = numpy.zeros((ci0.shape[0], nb_ci1))
entry_has_ap = (cre_index[:,:,0] == ap_id)
addr_ci0 = numpy.any(entry_has_ap, axis=1)
addr_ci1 = cre_index[entry_has_ap,2]
sign = cre_index[entry_has_ap,3]
# This sign prefactor accounts for interchange of operators with alpha and beta spins
if neleca % 2 == 1:
sign *= -1
ci1[:,addr_ci1] = ci0[:,addr_ci0] * sign
return ci1
def det_overlap(string1, string2, norb, s=None):
'''Determinants overlap on non-orthogonal one-particle basis'''
if s is None: # orthogonal basis with s_ij = delta_ij
return float(string1 == string2)
else:
if isinstance(string1, str):
nelec = string1.count('1')
string1 = int(string1, 2)
else:
nelec = bin(string1).count('1')
if isinstance(string2, str):
assert(string2.count('1') == nelec)
string2 = int(string2, 2)
else:
assert(bin(string2).count('1') == nelec)
idx1 = [i for i in range(norb) if (1 << i & string1)]
idx2 = [i for i in range(norb) if (1 << i & string2)]
s1 = lib.take_2d(s, idx1, idx2)
return numpy.linalg.det(s1)
def overlap(bra, ket, norb, nelec, s=None):
'''Overlap between two CI wavefunctions
Args:
s : 2D array or a list of 2D array
The overlap matrix of non-orthogonal one-particle basis
'''
if s is not None:
bra = transform_ci_for_orbital_rotation(bra, norb, nelec, s)
return numpy.dot(bra.ravel().conj(), ket.ravel())
def fix_spin_(fciobj, shift=PENALTY, ss=None, **kwargs):
r'''If FCI solver cannot stay on spin eigenfunction, this function can
add a shift to the states which have wrong spin.
.. math::
(H + shift*S^2) |\Psi\rangle = E |\Psi\rangle
Args:
fciobj : An instance of :class:`FCISolver`
Kwargs:
shift : float
Level shift for states which have different spin
ss : number
S^2 expection value == s*(s+1)
Returns
A modified FCI object based on fciobj.
'''
import types
if 'ss_value' in kwargs:
sys.stderr.write('fix_spin_: kwarg "ss_value" will be removed in future release. '
'It was replaced by "ss"\n')
ss_value = kwargs['ss_value']
else:
ss_value = ss
if (not isinstance(fciobj, types.ModuleType)
and 'contract_2e' in getattr(fciobj, '__dict__', {})):
del fciobj.contract_2e # To avoid initialize twice
old_contract_2e = fciobj.contract_2e
def contract_2e(eri, fcivec, norb, nelec, link_index=None, **kwargs):
if isinstance(nelec, (int, numpy.number)):
sz = (nelec % 2) * .5
else:
sz = abs(nelec[0]-nelec[1]) * .5
if ss_value is None:
ss = sz*(sz+1)
else:
ss = ss_value
if ss < sz*(sz+1)+.1:
# (S^2-ss)|Psi> to shift state other than the lowest state
ci1 = fciobj.contract_ss(fcivec, norb, nelec).reshape(fcivec.shape)
ci1 -= ss * fcivec
else:
# (S^2-ss)^2|Psi> to shift states except the given spin.
# It still relies on the quality of initial guess
tmp = fciobj.contract_ss(fcivec, norb, nelec).reshape(fcivec.shape)
tmp -= ss * fcivec
ci1 = -ss * tmp
ci1 += fciobj.contract_ss(tmp, norb, nelec).reshape(fcivec.shape)
tmp = None
ci1 *= shift
ci0 = old_contract_2e(eri, fcivec, norb, nelec, link_index, **kwargs)
ci1 += ci0.reshape(fcivec.shape)
return ci1
fciobj.davidson_only = True
fciobj.contract_2e = contract_2e
return fciobj
def fix_spin(fciobj, shift=.1, ss=None):
return fix_spin_(copy.copy(fciobj), shift, ss)
def transform_ci_for_orbital_rotation(ci, norb, nelec, u):
'''
Transform CI coefficients (dimension conserved) to the representation in
new one-particle basis. Solving CI problem for Hamiltonian h1, h2 defined
in old basis,
CI_old = fci.kernel(h1, h2, ...)
Given orbital rotation u, the CI problem can be either solved by
transforming the Hamiltonian, or transforming the coefficients.
CI_new = fci.kernel(u^T*h1*u, ...) = transform_ci_for_orbital_rotation(CI_old, u)
Args:
u : a squared 2D array or a list of 2D array
the orbital rotation to transform the old one-particle basis to new
one-particle basis
'''
if isinstance(u, numpy.ndarray) and u.ndim == 2:
assert u.shape == (norb, norb)
else:
assert u[0].shape == (norb, norb) and u[1].shape == (norb, norb)
return transform_ci(ci, nelec, u)
def transform_ci(ci, nelec, u):
'''Transform CI coefficients to the representation in new one-particle basis.
Solving CI problem for Hamiltonian h1, h2 defined in old basis,
CI_old = fci.kernel(h1, h2, ...)
Given orbital rotation u, the CI problem can be either solved by
transforming the Hamiltonian, or transforming the coefficients.
CI_new = fci.kernel(u^T*h1*u, ...) = transform_ci_for_orbital_rotation(CI_old, u)
Args:
u : 2D array or a list of 2D array
the orbital rotation to transform the old one-particle basis to new
one-particle basis. If u is not a squared matrix, the resultant CI
coefficients array may have different shape to the input CI
coefficients.
'''
neleca, nelecb = _unpack_nelec(nelec)
if isinstance(u, numpy.ndarray) and u.ndim == 2:
ua = ub = u
assert ua.shape == ub.shape
else:
ua, ub = u
norb_old, norb_new = ua.shape
na_old = cistring.num_strings(norb_old, neleca)
nb_old = cistring.num_strings(norb_old, nelecb)
na_new = cistring.num_strings(norb_new, neleca)
nb_new = cistring.num_strings(norb_new, nelecb)
ci = ci.reshape(na_old, nb_old)
one_particle_strs_old = numpy.asarray([1 << i for i in range(norb_old)])
one_particle_strs_new = numpy.asarray([1 << i for i in range(norb_new)])
if neleca == 0:
trans_ci_a = numpy.ones((1, 1))
else:
trans_ci_a = numpy.zeros((na_old, na_new))
strs_old = numpy.asarray(cistring.make_strings(range(norb_old), neleca))
# Unitary transformation array trans_ci is the overlap between two sets of CI basis.
occ_masks_old = (strs_old[:,None] & one_particle_strs_old) != 0
if norb_old == norb_new:
occ_masks_new = occ_masks_old
else:
strs_new = numpy.asarray(cistring.make_strings(range(norb_new), neleca))
occ_masks_new = (strs_new[:,None] & one_particle_strs_new) != 0
# Perform
#for i in range(na_old): # old basis
# for j in range(na_new): # new basis
# uij = u[occ_masks_old[i]][:,occ_masks_new[j]]
# trans_ci_a[i,j] = numpy.linalg.det(uij)
occ_idx_all_strs = numpy.where(occ_masks_new)[1].reshape(na_new,neleca)
for i in range(na_old):
ui = ua[occ_masks_old[i]].T.copy()
minors = ui[occ_idx_all_strs]
trans_ci_a[i,:] = numpy.linalg.det(minors)
if neleca == nelecb and numpy.allclose(ua, ub):
trans_ci_b = trans_ci_a
elif nelecb == 0:
trans_ci_b = numpy.ones((1, 1))
else:
trans_ci_b = numpy.zeros((nb_old, nb_new))
strs_old = numpy.asarray(cistring.make_strings(range(norb_old), nelecb))
occ_masks_old = (strs_old[:,None] & one_particle_strs_old) != 0
if norb_old == norb_new:
occ_masks_new = occ_masks_old
else:
strs_new = numpy.asarray(cistring.make_strings(range(norb_new), nelecb))
occ_masks_new = (strs_new[:,None] & one_particle_strs_new) != 0
occ_idx_all_strs = numpy.where(occ_masks_new)[1].reshape(nb_new,nelecb)
for i in range(nb_old):
ui = ub[occ_masks_old[i]].T.copy()
minors = ui[occ_idx_all_strs]
trans_ci_b[i,:] = numpy.linalg.det(minors)
# Transform old basis to new basis for all alpha-electron excitations
ci = lib.dot(trans_ci_a.T, ci)
# Transform old basis to new basis for all beta-electron excitations
ci = lib.dot(ci, trans_ci_b)
return ci
def _unpack_nelec(nelec, spin=None):
if spin is None:
spin = 0
else:
nelec = int(numpy.sum(nelec))
if isinstance(nelec, (int, numpy.number)):
nelecb = (nelec-spin)//2
neleca = nelec - nelecb
nelec = neleca, nelecb
return nelec
del(LARGE_CI_TOL, RETURN_STRS, PENALTY)
| |
"""Compressed Block Sparse Row matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['bsr_matrix', 'isspmatrix_bsr']
from warnings import warn
import numpy as np
from .data import _data_matrix, _minmax_mixin
from .compressed import _cs_matrix
from .base import isspmatrix, _formats, spmatrix
from .sputils import (isshape, getdtype, to_native, upcast, get_index_dtype,
check_shape)
from . import _sparsetools
from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_pass1,
bsr_matmat_pass2, bsr_transpose, bsr_sort_indices,
bsr_tocsr)
class bsr_matrix(_cs_matrix, _minmax_mixin):
"""Block Sparse Row matrix
This can be instantiated in several ways:
bsr_matrix(D, [blocksize=(R,C)])
where D is a dense matrix or 2-D ndarray.
bsr_matrix(S, [blocksize=(R,C)])
with another sparse matrix S (equivalent to S.tobsr())
bsr_matrix((M, N), [blocksize=(R,C), dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
bsr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard BSR representation where the block column
indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding block values are stored in
``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
supplied, the matrix dimensions are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
BSR format index array
indptr
BSR format index pointer array
blocksize
Block size of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
**Summary of BSR format**
The Block Compressed Row (BSR) format is very similar to the Compressed
Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
sub matrices like the last example below. Block matrices often arise in
vector-valued finite element discretizations. In such cases, BSR is
considerably more efficient than CSR and CSC for many sparse arithmetic
operations.
**Blocksize**
The blocksize (R,C) must evenly divide the shape of the matrix (M,N).
That is, R and C must satisfy the relationship ``M % R = 0`` and
``N % C = 0``.
If no blocksize is specified, a simple heuristic is applied to determine
an appropriate blocksize.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> bsr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3 ,4, 5, 6])
>>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
>>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
array([[1, 1, 0, 0, 2, 2],
[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 3, 3],
[0, 0, 0, 0, 3, 3],
[4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6]])
"""
format = 'bsr'
def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if isspmatrix_bsr(arg1) and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.tobsr(blocksize=blocksize)
self._set_self(arg1)
elif isinstance(arg1,tuple):
if isshape(arg1):
# it's a tuple of matrix dimensions (M,N)
self._shape = check_shape(arg1)
M,N = self.shape
# process blocksize
if blocksize is None:
blocksize = (1,1)
else:
if not isshape(blocksize):
raise ValueError('invalid blocksize=%s' % blocksize)
blocksize = tuple(blocksize)
self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
R,C = blocksize
if (M % R) != 0 or (N % C) != 0:
raise ValueError('shape must be multiple of blocksize')
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C))
self.indices = np.zeros(0, dtype=idx_dtype)
self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
elif len(arg1) == 2:
# (data,(row,col)) format
from .coo import coo_matrix
self._set_self(coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize))
elif len(arg1) == 3:
# (data,indices,indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = 1
if shape is not None:
maxval = max(shape)
if blocksize is not None:
maxval = max(maxval, max(blocksize))
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data))
else:
raise ValueError('unrecognized bsr_matrix constructor usage')
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except Exception:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)
self._set_self(arg1)
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
M = len(self.indptr) - 1
N = self.indices.max() + 1
except Exception:
raise ValueError('unable to infer matrix dimensions')
else:
R,C = self.blocksize
self._shape = check_shape((M*R,N*C))
if self.shape is None:
if shape is None:
# TODO infer shape here
raise ValueError('need to infer shape')
else:
self._shape = check_shape(shape)
if dtype is not None:
self.data = self.data.astype(dtype)
self.check_format(full_check=False)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
*Parameters*:
full_check:
True - rigorous check, O(N) operations : default
False - basic check, O(1) operations
"""
M,N = self.shape
R,C = self.blocksize
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indices, self.indptr))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError("indices, and indptr should be 1-D")
if self.data.ndim != 3:
raise ValueError("data should be 3-D")
# check index pointer
if (len(self.indptr) != M//R + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), M//R + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= N//C:
raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
if self.indices.min() < 0:
raise ValueError("column index values must be >= 0")
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices(check_first=False)
def _get_blocksize(self):
return self.data.shape[1:]
blocksize = property(fget=_get_blocksize)
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for BSR format")
R,C = self.blocksize
return int(self.indptr[-1] * R * C)
getnnz.__doc__ = spmatrix.getnnz.__doc__
def __repr__(self):
format = _formats[self.getformat()][1]
return ("<%dx%d sparse matrix of type '%s'\n"
"\twith %d stored elements (blocksize = %dx%d) in %s format>" %
(self.shape + (self.dtype.type, self.nnz) + self.blocksize +
(format,)))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
R, C = self.blocksize
y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
_sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,
self.indptr, self.indices,
np.ravel(self.data), y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
##########################
# NotImplemented methods #
##########################
def __getitem__(self,key):
raise NotImplementedError
def __setitem__(self,key,val):
raise NotImplementedError
######################
# Arithmetic methods #
######################
@np.deprecate(message="BSR matvec is deprecated in scipy 0.19.0. "
"Use * operator instead.")
def matvec(self, other):
"""Multiply matrix by vector."""
return self * other
@np.deprecate(message="BSR matmat is deprecated in scipy 0.19.0. "
"Use * operator instead.")
def matmat(self, other):
"""Multiply this sparse matrix by other matrix."""
return self * other
def _add_dense(self, other):
return self.tocoo(copy=False)._add_dense(other)
def _mul_vector(self, other):
M,N = self.shape
R,C = self.blocksize
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
bsr_matvec(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
other, result)
return result
def _mul_multivector(self,other):
R,C = self.blocksize
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
bsr_matvecs(M//R, N//C, n_vecs, R, C,
self.indptr, self.indices, self.data.ravel(),
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
R,n = self.blocksize
# convert to this format
if isspmatrix_bsr(other):
C = other.blocksize[1]
else:
C = 1
from .csr import isspmatrix_csr
if isspmatrix_csr(other) and n == 1:
other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
else:
other = other.tobsr(blocksize=(n,C))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=(M//R)*(N//C))
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
csr_matmat_pass1(M//R, N//C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
indptr)
bnnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=bnnz)
indptr = indptr.astype(idx_dtype)
indices = np.empty(bnnz, dtype=idx_dtype)
data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
bsr_matmat_pass2(M//R, N//C, R, C, n,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
np.ravel(self.data),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
data = data.reshape(-1,R,C)
# TODO eliminate zeros
return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C))
######################
# Conversion methods #
######################
def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix into Block Sparse Row Format.
With copy=False, the data/indices may be shared between this
matrix and the resultant bsr_matrix.
If blocksize=(R, C) is provided, it will be used for determining
block size of the bsr_matrix.
"""
if blocksize not in [None, self.blocksize]:
return self.tocsr().tobsr(blocksize=blocksize)
if copy:
return self.copy()
else:
return self
def tocsr(self, copy=False):
M, N = self.shape
R, C = self.blocksize
nnz = self.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype))
bsr_tocsr(M // R, # n_brow
N // C, # n_bcol
R, C,
self.indptr.astype(idx_dtype, copy=False),
self.indices.astype(idx_dtype, copy=False),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
return csr_matrix((data, indices, indptr), shape=self.shape)
tocsr.__doc__ = spmatrix.tocsr.__doc__
def tocsc(self, copy=False):
return self.tocsr(copy=False).tocsc(copy=copy)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocoo(self, copy=True):
"""Convert this matrix to COOrdinate format.
When copy=False the data array will be shared between
this matrix and the resultant coo_matrix.
"""
M,N = self.shape
R,C = self.blocksize
indptr_diff = np.diff(self.indptr)
if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
# Check for potential overflow
indptr_diff_limited = indptr_diff.astype(np.intp)
if np.any(indptr_diff_limited != indptr_diff):
raise ValueError("Matrix too big to convert")
indptr_diff = indptr_diff_limited
row = (R * np.arange(M//R)).repeat(indptr_diff)
row = row.repeat(R*C).reshape(-1,R,C)
row += np.tile(np.arange(R).reshape(-1,1), (1,C))
row = row.reshape(-1)
col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
col += np.tile(np.arange(C), (R,1))
col = col.reshape(-1)
data = self.data.reshape(-1)
if copy:
data = data.copy()
from .coo import coo_matrix
return coo_matrix((data,(row,col)), shape=self.shape)
def toarray(self, order=None, out=None):
return self.tocoo(copy=False).toarray(order=order, out=out)
toarray.__doc__ = spmatrix.toarray.__doc__
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
R, C = self.blocksize
M, N = self.shape
NBLK = self.nnz//(R*C)
if self.nnz == 0:
return bsr_matrix((N, M), blocksize=(C, R),
dtype=self.dtype, copy=copy)
indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
indices = np.empty(NBLK, dtype=self.indices.dtype)
data = np.empty((NBLK, C, R), dtype=self.data.dtype)
bsr_transpose(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
indptr, indices, data.ravel())
return bsr_matrix((data, indices, indptr),
shape=(N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero elements in-place."""
R,C = self.blocksize
M,N = self.shape
mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
nonzero_blocks = mask.nonzero()[0]
if len(nonzero_blocks) == 0:
return # nothing to do
self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
# modifies self.indptr and self.indices *in place*
_sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,
self.indices, mask)
self.prune()
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
R, C = self.blocksize
M, N = self.shape
# port of _sparsetools.csr_sum_duplicates
n_row = M // R
nnz = 0
row_end = 0
for i in range(n_row):
jj = row_end
row_end = self.indptr[i+1]
while jj < row_end:
j = self.indices[jj]
x = self.data[jj]
jj += 1
while jj < row_end and self.indices[jj] == j:
x += self.data[jj]
jj += 1
self.indices[nnz] = j
self.data[nnz] = x
nnz += 1
self.indptr[i+1] = nnz
self.prune() # nnz may have changed
self.has_canonical_format = True
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if self.has_sorted_indices:
return
R,C = self.blocksize
M,N = self.shape
bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
self.has_sorted_indices = True
def prune(self):
""" Remove empty space after all non-zero elements.
"""
R,C = self.blocksize
M,N = self.shape
if len(self.indptr) != M//R + 1:
raise ValueError("index pointer has invalid length")
bnnz = self.indptr[-1]
if len(self.indices) < bnnz:
raise ValueError("indices array has too few elements")
if len(self.data) < bnnz:
raise ValueError("data array has too few elements")
self.data = self.data[:bnnz]
self.indices = self.indices[:bnnz]
# utility functions
def _binopt(self, other, op, in_shape=None, out_shape=None):
"""Apply the binary operation fn to two sparse matrices."""
# Ideally we'd take the GCDs of the blocksize dimensions
# and explode self and other to match.
other = self.__class__(other, blocksize=self.blocksize)
# e.g. bsr_plus_bsr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
R,C = self.blocksize
max_bnnz = len(self.data) + len(other.data)
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=max_bnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(max_bnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(R*C*max_bnnz, dtype=np.bool_)
else:
data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
fn(self.shape[0]//R, self.shape[1]//C, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
actual_bnnz = indptr[-1]
indices = indices[:actual_bnnz]
data = data[:R*C*actual_bnnz]
if actual_bnnz < max_bnnz/2:
indices = indices.copy()
data = data.copy()
data = data.reshape(-1,R,C)
return self.__class__((data, indices, indptr), shape=self.shape)
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
# # these functions are used by the parent class
# # to remove redudancy between bsc_matrix and bsr_matrix
# def _swap(self,x):
# """swap the members of x if this is a column-oriented matrix
# """
# return (x[0],x[1])
def isspmatrix_bsr(x):
"""Is x of a bsr_matrix type?
Parameters
----------
x
object to check for being a bsr matrix
Returns
-------
bool
True if x is a bsr matrix, False otherwise
Examples
--------
>>> from scipy.sparse import bsr_matrix, isspmatrix_bsr
>>> isspmatrix_bsr(bsr_matrix([[5]]))
True
>>> from scipy.sparse import bsr_matrix, csr_matrix, isspmatrix_bsr
>>> isspmatrix_bsr(csr_matrix([[5]]))
False
"""
return isinstance(x, bsr_matrix)
| |
import sys, py
from rpython.tool.sourcetools import func_with_new_name
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rtyper.annlowlevel import (llhelper, MixLevelHelperAnnotator,
cast_base_ptr_to_instance, hlstr, cast_instance_to_gcref)
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.annotator import model as annmodel
from rpython.rtyper.llinterp import LLException
from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache
from rpython.flowspace.model import SpaceOperation, Variable, Constant
from rpython.flowspace.model import checkgraph, Link, copygraph
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.debug import fatalerror
from rpython.rlib.rstackovf import StackOverflow
from rpython.translator.backendopt import removenoops
from rpython.translator.unsimplify import call_final_function
from rpython.jit.metainterp import history, pyjitpl, gc, memmgr, jitexc
from rpython.jit.metainterp.pyjitpl import MetaInterpStaticData
from rpython.jit.metainterp.jitprof import Profiler, EmptyProfiler
from rpython.jit.metainterp.jitdriver import JitDriverStaticData
from rpython.jit.codewriter import support, codewriter
from rpython.jit.codewriter.policy import JitPolicy
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES
from rpython.rlib.entrypoint import all_jit_entrypoints,\
annotated_jit_entrypoints
# ____________________________________________________________
# Bootstrapping
def apply_jit(translator, backend_name="auto", inline=False,
vec=False, enable_opts=ALL_OPTS_NAMES, **kwds):
if 'CPUClass' not in kwds:
from rpython.jit.backend.detect_cpu import getcpuclass
kwds['CPUClass'] = getcpuclass(backend_name)
ProfilerClass = Profiler
# Always use Profiler here, which should have a very low impact.
# Otherwise you can try with ProfilerClass = EmptyProfiler.
warmrunnerdesc = WarmRunnerDesc(translator,
translate_support_code=True,
listops=True,
no_stats=True,
ProfilerClass=ProfilerClass,
**kwds)
for jd in warmrunnerdesc.jitdrivers_sd:
jd.warmstate.set_param_inlining(inline)
jd.warmstate.set_param_vec(vec)
jd.warmstate.set_param_enable_opts(enable_opts)
warmrunnerdesc.finish()
translator.warmrunnerdesc = warmrunnerdesc # for later debugging
def ll_meta_interp(function, args, backendopt=False,
listcomp=False, translationoptions={}, **kwds):
if listcomp:
extraconfigopts = {'translation.list_comprehension_operations': True}
else:
extraconfigopts = {}
for key, value in translationoptions.items():
extraconfigopts['translation.' + key] = value
interp, graph = get_interpreter(function, args,
backendopt=False, # will be done below
**extraconfigopts)
clear_tcache()
return jittify_and_run(interp, graph, args, backendopt=backendopt, **kwds)
def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False,
backendopt=False, trace_limit=sys.maxint, inline=False,
loop_longevity=0, retrace_limit=5, function_threshold=4,
disable_unrolling=sys.maxint,
enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15,
max_unroll_recursion=7, vec=1, vec_all=0, vec_cost=0,
vec_length=60, vec_ratio=2, vec_guard_ratio=3, **kwds):
from rpython.config.config import ConfigError
translator = interp.typer.annotator.translator
try:
translator.config.translation.gc = "boehm"
except (ConfigError, TypeError):
pass
try:
translator.config.translation.list_comprehension_operations = True
except ConfigError:
pass
warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds)
for jd in warmrunnerdesc.jitdrivers_sd:
jd.warmstate.set_param_threshold(3) # for tests
jd.warmstate.set_param_function_threshold(function_threshold)
jd.warmstate.set_param_trace_eagerness(2) # for tests
jd.warmstate.set_param_trace_limit(trace_limit)
jd.warmstate.set_param_inlining(inline)
jd.warmstate.set_param_loop_longevity(loop_longevity)
jd.warmstate.set_param_retrace_limit(retrace_limit)
jd.warmstate.set_param_max_retrace_guards(max_retrace_guards)
jd.warmstate.set_param_enable_opts(enable_opts)
jd.warmstate.set_param_max_unroll_recursion(max_unroll_recursion)
jd.warmstate.set_param_disable_unrolling(disable_unrolling)
jd.warmstate.set_param_vec(vec)
jd.warmstate.set_param_vec_all(vec_all)
jd.warmstate.set_param_vec_cost(vec_cost)
jd.warmstate.set_param_vec_length(vec_length)
jd.warmstate.set_param_vec_ratio(vec_ratio)
jd.warmstate.set_param_vec_guard_ratio(vec_guard_ratio)
warmrunnerdesc.finish()
if graph_and_interp_only:
return interp, graph
res = interp.eval_graph(graph, args)
if not kwds.get('translate_support_code', False):
warmrunnerdesc.metainterp_sd.profiler.finish()
warmrunnerdesc.metainterp_sd.cpu.finish_once()
print '~~~ return value:', repr(res)
while repeat > 1:
print '~' * 79
res1 = interp.eval_graph(graph, args)
if isinstance(res, int):
assert res1 == res
repeat -= 1
return res
def rpython_ll_meta_interp(function, args, backendopt=True, **kwds):
return ll_meta_interp(function, args, backendopt=backendopt,
translate_support_code=True, **kwds)
def _find_jit_marker(graphs, marker_name, check_driver=True):
results = []
for graph in graphs:
for block in graph.iterblocks():
for i in range(len(block.operations)):
op = block.operations[i]
if (op.opname == 'jit_marker' and
op.args[0].value == marker_name and
(not check_driver or op.args[1].value is None or
op.args[1].value.active)): # the jitdriver
results.append((graph, block, i))
return results
def _find_jit_markers(graphs, marker_names):
results = []
for graph in graphs:
for block in graph.iterblocks():
for i in range(len(block.operations)):
op = block.operations[i]
if (op.opname == 'jit_marker' and
op.args[0].value in marker_names):
results.append((graph, block, i))
return results
def find_can_enter_jit(graphs):
return _find_jit_marker(graphs, 'can_enter_jit')
def find_loop_headers(graphs):
return _find_jit_marker(graphs, 'loop_header')
def find_jit_merge_points(graphs):
results = _find_jit_marker(graphs, 'jit_merge_point')
if not results:
raise Exception("no jit_merge_point found!")
seen = set([graph for graph, block, pos in results])
assert len(seen) == len(results), (
"found several jit_merge_points in the same graph")
return results
def find_access_helpers(graphs):
return _find_jit_marker(graphs, 'access_helper', False)
def locate_jit_merge_point(graph):
[(graph, block, pos)] = find_jit_merge_points([graph])
return block, pos, block.operations[pos]
def find_set_param(graphs):
return _find_jit_marker(graphs, 'set_param')
def find_force_quasi_immutable(graphs):
results = []
for graph in graphs:
for block in graph.iterblocks():
for i in range(len(block.operations)):
op = block.operations[i]
if op.opname == 'jit_force_quasi_immutable':
results.append((graph, block, i))
return results
def get_stats():
return pyjitpl._warmrunnerdesc.stats
def reset_stats():
pyjitpl._warmrunnerdesc.stats.clear()
def reset_jit():
"""Helper for some tests (see micronumpy/test/test_zjit.py)"""
reset_stats()
pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear()
pyjitpl._warmrunnerdesc.jitcounter._clear_all()
def get_translator():
return pyjitpl._warmrunnerdesc.translator
def debug_checks():
stats = get_stats()
stats.maybe_view()
stats.check_consistency()
# ____________________________________________________________
class WarmRunnerDesc(object):
def __init__(self, translator, policy=None, backendopt=True, CPUClass=None,
ProfilerClass=EmptyProfiler, **kwds):
pyjitpl._warmrunnerdesc = self # this is a global for debugging only!
self.set_translator(translator)
self.memory_manager = memmgr.MemoryManager()
self.build_cpu(CPUClass, **kwds)
self.inline_inlineable_portals()
self.find_portals()
self.codewriter = codewriter.CodeWriter(self.cpu, self.jitdrivers_sd)
if policy is None:
policy = JitPolicy()
policy.set_supports_floats(self.cpu.supports_floats)
policy.set_supports_longlong(self.cpu.supports_longlong)
policy.set_supports_singlefloats(self.cpu.supports_singlefloats)
graphs = self.codewriter.find_all_graphs(policy)
policy.dump_unsafe_loops()
self.check_access_directly_sanity(graphs)
if backendopt:
self.prejit_optimizations(policy, graphs)
elif self.opt.listops:
self.prejit_optimizations_minimal_inline(policy, graphs)
self.build_meta_interp(ProfilerClass)
self.make_args_specifications()
#
from rpython.jit.metainterp.virtualref import VirtualRefInfo
vrefinfo = VirtualRefInfo(self)
self.codewriter.setup_vrefinfo(vrefinfo)
#
from rpython.jit.metainterp import counter
if self.cpu.translate_support_code:
self.jitcounter = counter.JitCounter(translator=translator)
else:
self.jitcounter = counter.DeterministicJitCounter()
#
self.hooks = policy.jithookiface
self.make_virtualizable_infos()
self.make_driverhook_graphs()
self.make_enter_functions()
self.rewrite_jit_merge_points(policy)
verbose = False # not self.cpu.translate_support_code
self.rewrite_access_helpers()
self.create_jit_entry_points()
self.codewriter.make_jitcodes(verbose=verbose)
self.rewrite_can_enter_jits()
self.rewrite_set_param_and_get_stats()
self.rewrite_force_virtual(vrefinfo)
self.rewrite_jitcell_accesses()
self.rewrite_force_quasi_immutable()
self.add_finish()
self.metainterp_sd.finish_setup(self.codewriter)
def finish(self):
vinfos = set([jd.virtualizable_info for jd in self.jitdrivers_sd])
for vinfo in vinfos:
if vinfo is not None:
vinfo.finish()
if self.cpu.translate_support_code:
self.annhelper.finish()
def _freeze_(self):
return True
def set_translator(self, translator):
self.translator = translator
self.rtyper = translator.rtyper
self.gcdescr = gc.get_description(translator.config)
def inline_inlineable_portals(self):
"""
Find all the graphs which have been decorated with @jitdriver.inline
and inline them in the callers, making them JIT portals. Then, create
a fresh copy of the jitdriver for each of those new portals, because
they cannot share the same one. See
test_ajit::test_inline_jit_merge_point
"""
from rpython.translator.backendopt.inline import (
inlinable_static_callers, auto_inlining)
jmp_calls = {}
def get_jmp_call(graph, _inline_jit_merge_point_):
# there might be multiple calls to the @inlined function: the
# first time we see it, we remove the call to the jit_merge_point
# and we remember the corresponding op. Then, we create a new call
# to it every time we need a new one (i.e., for each callsite
# which becomes a new portal)
try:
op, jmp_graph = jmp_calls[graph]
except KeyError:
op, jmp_graph = fish_jmp_call(graph, _inline_jit_merge_point_)
jmp_calls[graph] = op, jmp_graph
#
# clone the op
newargs = op.args[:]
newresult = Variable()
newresult.concretetype = op.result.concretetype
op = SpaceOperation(op.opname, newargs, newresult)
return op, jmp_graph
def fish_jmp_call(graph, _inline_jit_merge_point_):
# graph is function which has been decorated with
# @jitdriver.inline, so its very first op is a call to the
# function which contains the actual jit_merge_point: fish it!
jmp_block, op_jmp_call = next(callee.iterblockops())
msg = ("The first operation of an _inline_jit_merge_point_ graph must be "
"a direct_call to the function passed to @jitdriver.inline()")
assert op_jmp_call.opname == 'direct_call', msg
jmp_funcobj = op_jmp_call.args[0].value._obj
assert jmp_funcobj._callable is _inline_jit_merge_point_, msg
jmp_block.operations.remove(op_jmp_call)
return op_jmp_call, jmp_funcobj.graph
# find all the graphs which call an @inline_in_portal function
callgraph = inlinable_static_callers(self.translator.graphs, store_calls=True)
new_callgraph = []
new_portals = set()
inlined_jit_merge_points = set()
for caller, block, op_call, callee in callgraph:
func = getattr(callee, 'func', None)
_inline_jit_merge_point_ = getattr(func, '_inline_jit_merge_point_', None)
if _inline_jit_merge_point_:
_inline_jit_merge_point_._always_inline_ = True
inlined_jit_merge_points.add(_inline_jit_merge_point_)
op_jmp_call, jmp_graph = get_jmp_call(callee, _inline_jit_merge_point_)
#
# now we move the op_jmp_call from callee to caller, just
# before op_call. We assume that the args passed to
# op_jmp_call are the very same which are received by callee
# (i.e., the one passed to op_call)
assert len(op_call.args) == len(op_jmp_call.args)
op_jmp_call.args[1:] = op_call.args[1:]
idx = block.operations.index(op_call)
block.operations.insert(idx, op_jmp_call)
#
# finally, we signal that we want to inline op_jmp_call into
# caller, so that finally the actuall call to
# driver.jit_merge_point will be seen there
new_callgraph.append((caller, jmp_graph))
new_portals.add(caller)
# inline them!
inline_threshold = 0.1 # we rely on the _always_inline_ set above
auto_inlining(self.translator, inline_threshold, new_callgraph)
# clean up _always_inline_ = True, it can explode later
for item in inlined_jit_merge_points:
del item._always_inline_
# make a fresh copy of the JitDriver in all newly created
# jit_merge_points
self.clone_inlined_jit_merge_points(new_portals)
def clone_inlined_jit_merge_points(self, graphs):
"""
Find all the jit_merge_points in the given graphs, and replace the
original JitDriver with a fresh clone.
"""
if not graphs:
return
for graph, block, pos in find_jit_merge_points(graphs):
op = block.operations[pos]
v_driver = op.args[1]
driver = v_driver.value
if not driver.inline_jit_merge_point:
continue
new_driver = driver.clone()
c_new_driver = Constant(new_driver, v_driver.concretetype)
op.args[1] = c_new_driver
def find_portals(self):
self.jitdrivers_sd = []
graphs = self.translator.graphs
for graph, block, pos in find_jit_merge_points(graphs):
support.autodetect_jit_markers_redvars(graph)
self.split_graph_and_record_jitdriver(graph, block, pos)
#
assert (len(set([jd.jitdriver for jd in self.jitdrivers_sd])) ==
len(self.jitdrivers_sd)), \
"there are multiple jit_merge_points with the same jitdriver"
def split_graph_and_record_jitdriver(self, graph, block, pos):
op = block.operations[pos]
jd = JitDriverStaticData()
jd._jit_merge_point_in = graph
args = op.args[2:]
s_binding = self.translator.annotator.binding
jd._portal_args_s = [s_binding(v) for v in args]
graph = copygraph(graph)
[jmpp] = find_jit_merge_points([graph])
graph.startblock = support.split_before_jit_merge_point(*jmpp)
# XXX this is incredibly obscure, but this is sometiems necessary
# so we don't explode in checkgraph. for reasons unknown this
# is not contanied within simplify_graph
removenoops.remove_same_as(graph)
# a crash in the following checkgraph() means that you forgot
# to list some variable in greens=[] or reds=[] in JitDriver,
# or that a jit_merge_point() takes a constant as an argument.
checkgraph(graph)
for v in graph.getargs():
assert isinstance(v, Variable)
assert len(dict.fromkeys(graph.getargs())) == len(graph.getargs())
self.translator.graphs.append(graph)
jd.portal_graph = graph
# it's a bit unbelievable to have a portal without func
assert hasattr(graph, "func")
graph.func._dont_inline_ = True
graph.func._jit_unroll_safe_ = True
jd.jitdriver = block.operations[pos].args[1].value
jd.vec = jd.jitdriver.vec
jd.portal_runner_ptr = "<not set so far>"
jd.result_type = history.getkind(jd.portal_graph.getreturnvar()
.concretetype)[0]
self.jitdrivers_sd.append(jd)
def check_access_directly_sanity(self, graphs):
from rpython.translator.backendopt.inline import collect_called_graphs
jit_graphs = set(graphs)
for graph in collect_called_graphs(self.translator.entry_point_graph,
self.translator):
if graph in jit_graphs:
continue
assert not getattr(graph, 'access_directly', False)
def prejit_optimizations(self, policy, graphs):
from rpython.translator.backendopt.all import backend_optimizations
backend_optimizations(self.translator,
graphs=graphs,
merge_if_blocks=True,
constfold=True,
raisingop2direct_call=False,
remove_asserts=True,
really_remove_asserts=True)
def prejit_optimizations_minimal_inline(self, policy, graphs):
from rpython.translator.backendopt.inline import auto_inline_graphs
auto_inline_graphs(self.translator, graphs, 0.01)
def build_cpu(self, CPUClass, translate_support_code=False,
no_stats=False, supports_floats=True,
supports_longlong=True, supports_singlefloats=True,
**kwds):
assert CPUClass is not None
self.opt = history.Options(**kwds)
if no_stats:
stats = history.NoStats()
else:
stats = history.Stats()
self.stats = stats
if translate_support_code:
self.annhelper = MixLevelHelperAnnotator(self.translator.rtyper)
cpu = CPUClass(self.translator.rtyper, self.stats, self.opt,
translate_support_code, gcdescr=self.gcdescr)
if not supports_floats:
cpu.supports_floats = False
if not supports_longlong:
cpu.supports_longlong = False
if not supports_singlefloats:
cpu.supports_singlefloats = False
self.cpu = cpu
def build_meta_interp(self, ProfilerClass):
self.metainterp_sd = MetaInterpStaticData(self.cpu,
self.opt,
ProfilerClass=ProfilerClass,
warmrunnerdesc=self)
def make_virtualizable_infos(self):
vinfos = {}
for jd in self.jitdrivers_sd:
#
jd.greenfield_info = None
for name in jd.jitdriver.greens:
if '.' in name:
from rpython.jit.metainterp.greenfield import GreenFieldInfo
jd.greenfield_info = GreenFieldInfo(self.cpu, jd)
break
#
if not jd.jitdriver.virtualizables:
jd.virtualizable_info = None
jd.index_of_virtualizable = -1
continue
else:
assert jd.greenfield_info is None, "XXX not supported yet"
#
jitdriver = jd.jitdriver
assert len(jitdriver.virtualizables) == 1 # for now
[vname] = jitdriver.virtualizables
# XXX skip the Voids here too
jd.index_of_virtualizable = jitdriver.reds.index(vname)
#
index = jd.num_green_args + jd.index_of_virtualizable
VTYPEPTR = jd._JIT_ENTER_FUNCTYPE.ARGS[index]
if VTYPEPTR not in vinfos:
from rpython.jit.metainterp.virtualizable import VirtualizableInfo
vinfos[VTYPEPTR] = VirtualizableInfo(self, VTYPEPTR)
jd.virtualizable_info = vinfos[VTYPEPTR]
def make_enter_functions(self):
for jd in self.jitdrivers_sd:
self.make_enter_function(jd)
def make_enter_function(self, jd):
from rpython.jit.metainterp.warmstate import WarmEnterState
state = WarmEnterState(self, jd)
maybe_compile_and_run = state.make_entry_point()
jd.warmstate = state
def crash_in_jit(e):
tb = not we_are_translated() and sys.exc_info()[2]
try:
raise e
except jitexc.JitException:
raise # go through
except MemoryError:
raise # go through
except StackOverflow:
raise # go through
except Exception, e:
if not we_are_translated():
print "~~~ Crash in JIT!"
print '~~~ %s: %s' % (e.__class__, e)
if sys.stdout == sys.__stdout__:
import pdb; pdb.post_mortem(tb)
raise e.__class__, e, tb
fatalerror('~~~ Crash in JIT! %s' % (e,))
crash_in_jit._dont_inline_ = True
def maybe_enter_jit(*args):
try:
maybe_compile_and_run(state.increment_threshold, *args)
except Exception as e:
crash_in_jit(e)
maybe_enter_jit._always_inline_ = True
jd._maybe_enter_jit_fn = maybe_enter_jit
jd._maybe_compile_and_run_fn = maybe_compile_and_run
def make_driverhook_graphs(self):
s_Str = annmodel.SomeString()
#
annhelper = MixLevelHelperAnnotator(self.translator.rtyper)
for jd in self.jitdrivers_sd:
jd._get_printable_location_ptr = self._make_hook_graph(jd,
annhelper, jd.jitdriver.get_printable_location, s_Str)
jd._get_unique_id_ptr = self._make_hook_graph(jd,
annhelper, jd.jitdriver.get_unique_id, annmodel.SomeInteger())
jd._confirm_enter_jit_ptr = self._make_hook_graph(jd,
annhelper, jd.jitdriver.confirm_enter_jit, annmodel.s_Bool,
onlygreens=False)
jd._can_never_inline_ptr = self._make_hook_graph(jd,
annhelper, jd.jitdriver.can_never_inline, annmodel.s_Bool)
jd._should_unroll_one_iteration_ptr = self._make_hook_graph(jd,
annhelper, jd.jitdriver.should_unroll_one_iteration,
annmodel.s_Bool)
annhelper.finish()
def _make_hook_graph(self, jitdriver_sd, annhelper, func,
s_result, s_first_arg=None, onlygreens=True):
if func is None:
return None
#
if not onlygreens:
assert not jitdriver_sd.jitdriver.autoreds, (
"reds='auto' is not compatible with JitDriver hooks such as "
"confirm_enter_jit")
extra_args_s = []
if s_first_arg is not None:
extra_args_s.append(s_first_arg)
#
args_s = jitdriver_sd._portal_args_s
if onlygreens:
args_s = args_s[:len(jitdriver_sd._green_args_spec)]
graph = annhelper.getgraph(func, extra_args_s + args_s, s_result)
funcptr = annhelper.graph2delayed(graph)
return funcptr
def make_args_specifications(self):
for jd in self.jitdrivers_sd:
self.make_args_specification(jd)
def make_args_specification(self, jd):
graph = jd._jit_merge_point_in
_, _, op = locate_jit_merge_point(graph)
greens_v, reds_v = support.decode_hp_hint_args(op)
ALLARGS = [v.concretetype for v in (greens_v + reds_v)]
jd._green_args_spec = [v.concretetype for v in greens_v]
jd.red_args_types = [history.getkind(v.concretetype) for v in reds_v]
jd.num_green_args = len(jd._green_args_spec)
jd.num_red_args = len(jd.red_args_types)
RESTYPE = graph.getreturnvar().concretetype
(jd._JIT_ENTER_FUNCTYPE,
jd._PTR_JIT_ENTER_FUNCTYPE) = self.cpu.ts.get_FuncType(ALLARGS, lltype.Void)
(jd._PORTAL_FUNCTYPE,
jd._PTR_PORTAL_FUNCTYPE) = self.cpu.ts.get_FuncType(ALLARGS, RESTYPE)
#
if jd.result_type == 'v':
ASMRESTYPE = lltype.Void
elif jd.result_type == history.INT:
ASMRESTYPE = lltype.Signed
elif jd.result_type == history.REF:
ASMRESTYPE = llmemory.GCREF
elif jd.result_type == history.FLOAT:
ASMRESTYPE = lltype.Float
else:
assert False
(_, jd._PTR_ASSEMBLER_HELPER_FUNCTYPE) = self.cpu.ts.get_FuncType(
[llmemory.GCREF, llmemory.GCREF], ASMRESTYPE)
def rewrite_jitcell_accesses(self):
jitdrivers_by_name = {}
for jd in self.jitdrivers_sd:
name = jd.jitdriver.name
if name != 'jitdriver':
jitdrivers_by_name[name] = jd
m = _find_jit_markers(self.translator.graphs,
('get_jitcell_at_key', 'trace_next_iteration',
'dont_trace_here', 'trace_next_iteration_hash'))
accessors = {}
def get_accessor(name, jitdriver_name, function, ARGS, green_arg_spec):
a = accessors.get((name, jitdriver_name))
if a:
return a
d = {'function': function,
'cast_instance_to_gcref': cast_instance_to_gcref,
'lltype': lltype}
arg_spec = ", ".join([("arg%d" % i) for i in range(len(ARGS))])
arg_converters = []
for i, spec in enumerate(green_arg_spec):
if isinstance(spec, lltype.Ptr):
arg_converters.append("arg%d = lltype.cast_opaque_ptr(type%d, arg%d)" % (i, i, i))
d['type%d' % i] = spec
convert = ";".join(arg_converters)
if name == 'get_jitcell_at_key':
exec py.code.Source("""
def accessor(%s):
%s
return cast_instance_to_gcref(function(%s))
""" % (arg_spec, convert, arg_spec)).compile() in d
FUNC = lltype.Ptr(lltype.FuncType(ARGS, llmemory.GCREF))
elif name == "trace_next_iteration_hash":
exec py.code.Source("""
def accessor(arg0):
function(arg0)
""").compile() in d
FUNC = lltype.Ptr(lltype.FuncType([lltype.Unsigned],
lltype.Void))
else:
exec py.code.Source("""
def accessor(%s):
%s
function(%s)
""" % (arg_spec, convert, arg_spec)).compile() in d
FUNC = lltype.Ptr(lltype.FuncType(ARGS, lltype.Void))
func = d['accessor']
ll_ptr = self.helper_func(FUNC, func)
accessors[(name, jitdriver_name)] = ll_ptr
return ll_ptr
for graph, block, index in m:
op = block.operations[index]
jitdriver_name = op.args[1].value
JitCell = jitdrivers_by_name[jitdriver_name].warmstate.JitCell
ARGS = [x.concretetype for x in op.args[2:]]
if op.args[0].value == 'get_jitcell_at_key':
func = JitCell.get_jitcell
elif op.args[0].value == 'dont_trace_here':
func = JitCell.dont_trace_here
elif op.args[0].value == 'trace_next_iteration_hash':
func = JitCell.trace_next_iteration_hash
else:
func = JitCell._trace_next_iteration
argspec = jitdrivers_by_name[jitdriver_name]._green_args_spec
accessor = get_accessor(op.args[0].value,
jitdriver_name, func,
ARGS, argspec)
v_result = op.result
c_accessor = Constant(accessor, concretetype=lltype.Void)
newop = SpaceOperation('direct_call', [c_accessor] + op.args[2:],
v_result)
block.operations[index] = newop
def rewrite_can_enter_jits(self):
sublists = {}
for jd in self.jitdrivers_sd:
sublists[jd.jitdriver] = jd, []
jd.no_loop_header = True
#
loop_headers = find_loop_headers(self.translator.graphs)
for graph, block, index in loop_headers:
op = block.operations[index]
jitdriver = op.args[1].value
assert jitdriver in sublists, \
"loop_header with no matching jit_merge_point"
jd, sublist = sublists[jitdriver]
jd.no_loop_header = False
#
can_enter_jits = find_can_enter_jit(self.translator.graphs)
for graph, block, index in can_enter_jits:
op = block.operations[index]
jitdriver = op.args[1].value
assert jitdriver in sublists, \
"can_enter_jit with no matching jit_merge_point"
assert not jitdriver.autoreds, (
"can_enter_jit not supported with a jitdriver that "
"has reds='auto'")
jd, sublist = sublists[jitdriver]
origportalgraph = jd._jit_merge_point_in
if graph is not origportalgraph:
sublist.append((graph, block, index))
jd.no_loop_header = False
else:
pass # a 'can_enter_jit' before the 'jit-merge_point', but
# originally in the same function: we ignore it here
# see e.g. test_jitdriver.test_simple
for jd in self.jitdrivers_sd:
_, sublist = sublists[jd.jitdriver]
self.rewrite_can_enter_jit(jd, sublist)
def rewrite_can_enter_jit(self, jd, can_enter_jits):
FUNCPTR = jd._PTR_JIT_ENTER_FUNCTYPE
jit_enter_fnptr = self.helper_func(FUNCPTR, jd._maybe_enter_jit_fn)
if len(can_enter_jits) == 0:
# see test_warmspot.test_no_loop_at_all
operations = jd.portal_graph.startblock.operations
op1 = operations[0]
assert (op1.opname == 'jit_marker' and
op1.args[0].value == 'jit_merge_point')
op0 = SpaceOperation(
'jit_marker',
[Constant('can_enter_jit', lltype.Void)] + op1.args[1:],
None)
operations.insert(0, op0)
can_enter_jits = [(jd.portal_graph, jd.portal_graph.startblock, 0)]
for graph, block, index in can_enter_jits:
if graph is jd._jit_merge_point_in:
continue
op = block.operations[index]
greens_v, reds_v = support.decode_hp_hint_args(op)
args_v = greens_v + reds_v
vlist = [Constant(jit_enter_fnptr, FUNCPTR)] + args_v
v_result = Variable()
v_result.concretetype = lltype.Void
newop = SpaceOperation('direct_call', vlist, v_result)
block.operations[index] = newop
def helper_func(self, FUNCPTR, func):
if not self.cpu.translate_support_code:
return llhelper(FUNCPTR, func)
FUNC = FUNCPTR.TO
args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS]
s_result = lltype_to_annotation(FUNC.RESULT)
graph = self.annhelper.getgraph(func, args_s, s_result)
return self.annhelper.graph2delayed(graph, FUNC)
def rewrite_access_helpers(self):
ah = find_access_helpers(self.translator.graphs)
for graph, block, index in ah:
op = block.operations[index]
self.rewrite_access_helper(op)
def create_jit_entry_points(self):
for func, args, result in all_jit_entrypoints:
self.helper_func(lltype.Ptr(lltype.FuncType(args, result)), func)
annotated_jit_entrypoints.append((func, None))
def rewrite_access_helper(self, op):
# make sure we make a copy of function so it no longer belongs
# to extregistry
func = op.args[1].value
if func.func_name.startswith('stats_'):
# get special treatment since we rewrite it to a call that accepts
# jit driver
assert len(op.args) >= 3, ("%r must have a first argument "
"(which is None)" % (func,))
func = func_with_new_name(func, func.func_name + '_compiled')
def new_func(ignored, *args):
return func(self, *args)
ARGS = [lltype.Void] + [arg.concretetype for arg in op.args[3:]]
else:
ARGS = [arg.concretetype for arg in op.args[2:]]
new_func = func_with_new_name(func, func.func_name + '_compiled')
RESULT = op.result.concretetype
FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT))
ptr = self.helper_func(FUNCPTR, new_func)
op.opname = 'direct_call'
op.args = [Constant(ptr, FUNCPTR)] + op.args[2:]
def rewrite_jit_merge_points(self, policy):
for jd in self.jitdrivers_sd:
self.rewrite_jit_merge_point(jd, policy)
def rewrite_jit_merge_point(self, jd, policy):
#
# Mutate the original portal graph from this:
#
# def original_portal(..):
# stuff
# while 1:
# jit_merge_point(*args)
# more stuff
#
# to that:
#
# def original_portal(..):
# stuff
# return portal_runner(*args)
#
# def portal_runner(*args):
# while 1:
# try:
# return portal(*args)
# except ContinueRunningNormally, e:
# *args = *e.new_args
# except DoneWithThisFrame, e:
# return e.return
# except ExitFrameWithException, e:
# raise Exception, e.value
#
# def portal(*args):
# while 1:
# more stuff
#
origportalgraph = jd._jit_merge_point_in
portalgraph = jd.portal_graph
PORTALFUNC = jd._PORTAL_FUNCTYPE
# ____________________________________________________________
# Prepare the portal_runner() helper
#
from rpython.jit.metainterp.warmstate import specialize_value
from rpython.jit.metainterp.warmstate import unspecialize_value
portal_ptr = self.cpu.ts.functionptr(PORTALFUNC, 'portal',
graph=portalgraph)
jd._portal_ptr = portal_ptr
#
portalfunc_ARGS = []
nums = {}
for i, ARG in enumerate(PORTALFUNC.ARGS):
kind = history.getkind(ARG)
assert kind != 'void'
if i < len(jd.jitdriver.greens):
color = 'green'
else:
color = 'red'
attrname = '%s_%s' % (color, kind)
count = nums.get(attrname, 0)
nums[attrname] = count + 1
portalfunc_ARGS.append((ARG, attrname, count))
portalfunc_ARGS = unrolling_iterable(portalfunc_ARGS)
#
rtyper = self.translator.rtyper
RESULT = PORTALFUNC.RESULT
result_kind = history.getkind(RESULT)
ts = self.cpu.ts
state = jd.warmstate
maybe_compile_and_run = jd._maybe_compile_and_run_fn
def ll_portal_runner(*args):
start = True
while 1:
try:
# maybe enter from the function's start. Note that the
# 'start' variable is constant-folded away because it's
# the first statement in the loop.
if start:
maybe_compile_and_run(
state.increment_function_threshold, *args)
#
# then run the normal portal function, i.e. the
# interpreter's main loop. It might enter the jit
# via maybe_enter_jit(), which typically ends with
# handle_fail() being called, which raises on the
# following exceptions --- catched here, because we
# want to interrupt the whole interpreter loop.
return support.maybe_on_top_of_llinterp(rtyper,
portal_ptr)(*args)
except jitexc.ContinueRunningNormally, e:
args = ()
for ARGTYPE, attrname, count in portalfunc_ARGS:
x = getattr(e, attrname)[count]
x = specialize_value(ARGTYPE, x)
args = args + (x,)
start = False
continue
except jitexc.DoneWithThisFrameVoid:
assert result_kind == 'void'
return
except jitexc.DoneWithThisFrameInt, e:
assert result_kind == 'int'
return specialize_value(RESULT, e.result)
except jitexc.DoneWithThisFrameRef, e:
assert result_kind == 'ref'
return specialize_value(RESULT, e.result)
except jitexc.DoneWithThisFrameFloat, e:
assert result_kind == 'float'
return specialize_value(RESULT, e.result)
except jitexc.ExitFrameWithExceptionRef, e:
value = ts.cast_to_baseclass(e.value)
if not we_are_translated():
raise LLException(ts.get_typeptr(value), value)
else:
value = cast_base_ptr_to_instance(Exception, value)
raise Exception, value
def handle_jitexception(e):
# XXX the bulk of this function is mostly a copy-paste from above
try:
raise e
except jitexc.ContinueRunningNormally, e:
args = ()
for ARGTYPE, attrname, count in portalfunc_ARGS:
x = getattr(e, attrname)[count]
x = specialize_value(ARGTYPE, x)
args = args + (x,)
result = ll_portal_runner(*args)
if result_kind != 'void':
result = unspecialize_value(result)
return result
except jitexc.DoneWithThisFrameVoid:
assert result_kind == 'void'
return
except jitexc.DoneWithThisFrameInt, e:
assert result_kind == 'int'
return e.result
except jitexc.DoneWithThisFrameRef, e:
assert result_kind == 'ref'
return e.result
except jitexc.DoneWithThisFrameFloat, e:
assert result_kind == 'float'
return e.result
except jitexc.ExitFrameWithExceptionRef, e:
value = ts.cast_to_baseclass(e.value)
if not we_are_translated():
raise LLException(ts.get_typeptr(value), value)
else:
value = cast_base_ptr_to_instance(Exception, value)
raise Exception, value
jd._ll_portal_runner = ll_portal_runner # for debugging
jd.portal_runner_ptr = self.helper_func(jd._PTR_PORTAL_FUNCTYPE,
ll_portal_runner)
jd.portal_runner_adr = llmemory.cast_ptr_to_adr(jd.portal_runner_ptr)
jd.portal_calldescr = self.cpu.calldescrof(
jd._PTR_PORTAL_FUNCTYPE.TO,
jd._PTR_PORTAL_FUNCTYPE.TO.ARGS,
jd._PTR_PORTAL_FUNCTYPE.TO.RESULT,
EffectInfo.MOST_GENERAL)
vinfo = jd.virtualizable_info
def assembler_call_helper(deadframe, virtualizableref):
fail_descr = self.cpu.get_latest_descr(deadframe)
try:
fail_descr.handle_fail(deadframe, self.metainterp_sd, jd)
except jitexc.JitException, e:
return handle_jitexception(e)
else:
assert 0, "should have raised"
jd._assembler_call_helper = assembler_call_helper # for debugging
jd._assembler_helper_ptr = self.helper_func(
jd._PTR_ASSEMBLER_HELPER_FUNCTYPE,
assembler_call_helper)
jd.assembler_helper_adr = llmemory.cast_ptr_to_adr(
jd._assembler_helper_ptr)
if vinfo is not None:
jd.vable_token_descr = vinfo.vable_token_descr
def handle_jitexception_from_blackhole(bhcaller, e):
result = handle_jitexception(e)
if result_kind == 'void':
pass
elif result_kind == 'int':
bhcaller._setup_return_value_i(result)
elif result_kind == 'ref':
bhcaller._setup_return_value_r(result)
elif result_kind == 'float':
bhcaller._setup_return_value_f(result)
else:
assert False
jd.handle_jitexc_from_bh = handle_jitexception_from_blackhole
# ____________________________________________________________
# Now mutate origportalgraph to end with a call to portal_runner_ptr
#
origblock, origindex, op = locate_jit_merge_point(origportalgraph)
assert op.opname == 'jit_marker'
assert op.args[0].value == 'jit_merge_point'
greens_v, reds_v = support.decode_hp_hint_args(op)
vlist = [Constant(jd.portal_runner_ptr, jd._PTR_PORTAL_FUNCTYPE)]
vlist += greens_v
vlist += reds_v
v_result = Variable()
v_result.concretetype = PORTALFUNC.RESULT
newop = SpaceOperation('direct_call', vlist, v_result)
del origblock.operations[origindex:]
origblock.operations.append(newop)
origblock.exitswitch = None
origblock.recloseblock(Link([v_result], origportalgraph.returnblock))
# the origportal now can raise (even if it did not raise before),
# which means that we cannot inline it anywhere any more, but that's
# fine since any forced inlining has been done before
#
checkgraph(origportalgraph)
def add_finish(self):
def finish():
if self.metainterp_sd.profiler.initialized:
self.metainterp_sd.profiler.finish()
self.metainterp_sd.cpu.finish_once()
if self.cpu.translate_support_code:
call_final_function(self.translator, finish,
annhelper=self.annhelper)
def rewrite_set_param_and_get_stats(self):
from rpython.rtyper.lltypesystem.rstr import STR
closures = {}
graphs = self.translator.graphs
_, PTR_SET_PARAM_FUNCTYPE = self.cpu.ts.get_FuncType([lltype.Signed],
lltype.Void)
_, PTR_SET_PARAM_STR_FUNCTYPE = self.cpu.ts.get_FuncType(
[lltype.Ptr(STR)], lltype.Void)
def make_closure(jd, fullfuncname, is_string):
if jd is None:
def closure(i):
if is_string:
i = hlstr(i)
for jd in self.jitdrivers_sd:
getattr(jd.warmstate, fullfuncname)(i)
else:
state = jd.warmstate
def closure(i):
if is_string:
i = hlstr(i)
getattr(state, fullfuncname)(i)
if is_string:
TP = PTR_SET_PARAM_STR_FUNCTYPE
else:
TP = PTR_SET_PARAM_FUNCTYPE
funcptr = self.helper_func(TP, closure)
return Constant(funcptr, TP)
#
for graph, block, i in find_set_param(graphs):
op = block.operations[i]
if op.args[1].value is not None:
for jd in self.jitdrivers_sd:
if jd.jitdriver is op.args[1].value:
break
else:
assert 0, "jitdriver of set_param() not found"
else:
jd = None
funcname = op.args[2].value
key = jd, funcname
if key not in closures:
closures[key] = make_closure(jd, 'set_param_' + funcname,
funcname == 'enable_opts')
op.opname = 'direct_call'
op.args[:3] = [closures[key]]
def rewrite_force_virtual(self, vrefinfo):
all_graphs = self.translator.graphs
vrefinfo.replace_force_virtual_with_call(all_graphs)
def replace_force_quasiimmut_with_direct_call(self, op):
ARG = op.args[0].concretetype
mutatefieldname = op.args[1].value
key = (ARG, mutatefieldname)
if key in self._cache_force_quasiimmed_funcs:
cptr = self._cache_force_quasiimmed_funcs[key]
else:
from rpython.jit.metainterp import quasiimmut
func = quasiimmut.make_invalidation_function(ARG, mutatefieldname)
FUNC = lltype.Ptr(lltype.FuncType([ARG], lltype.Void))
llptr = self.helper_func(FUNC, func)
cptr = Constant(llptr, FUNC)
self._cache_force_quasiimmed_funcs[key] = cptr
op.opname = 'direct_call'
op.args = [cptr, op.args[0]]
def rewrite_force_quasi_immutable(self):
self._cache_force_quasiimmed_funcs = {}
graphs = self.translator.graphs
for graph, block, i in find_force_quasi_immutable(graphs):
self.replace_force_quasiimmut_with_direct_call(block.operations[i])
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, List, Optional, Union
import msrest.serialization
from ._policy_client_enums import *
class PolicyAssignment(msrest.serialization.Model):
"""The policy assignment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the policy assignment.
:vartype id: str
:ivar type: The type of the policy assignment.
:vartype type: str
:ivar name: The name of the policy assignment.
:vartype name: str
:ivar display_name: The display name of the policy assignment.
:vartype display_name: str
:ivar policy_definition_id: The ID of the policy definition.
:vartype policy_definition_id: str
:ivar scope: The scope for the policy assignment.
:vartype scope: str
:ivar parameters: Required if a parameter is used in policy rule.
:vartype parameters: any
:ivar description: This message will be part of response in case of policy violation.
:vartype description: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'policy_definition_id': {'key': 'properties.policyDefinitionId', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': 'object'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[str] = None,
name: Optional[str] = None,
display_name: Optional[str] = None,
policy_definition_id: Optional[str] = None,
scope: Optional[str] = None,
parameters: Optional[Any] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword type: The type of the policy assignment.
:paramtype type: str
:keyword name: The name of the policy assignment.
:paramtype name: str
:keyword display_name: The display name of the policy assignment.
:paramtype display_name: str
:keyword policy_definition_id: The ID of the policy definition.
:paramtype policy_definition_id: str
:keyword scope: The scope for the policy assignment.
:paramtype scope: str
:keyword parameters: Required if a parameter is used in policy rule.
:paramtype parameters: any
:keyword description: This message will be part of response in case of policy violation.
:paramtype description: str
"""
super(PolicyAssignment, self).__init__(**kwargs)
self.id = None
self.type = type
self.name = name
self.display_name = display_name
self.policy_definition_id = policy_definition_id
self.scope = scope
self.parameters = parameters
self.description = description
class PolicyAssignmentListResult(msrest.serialization.Model):
"""List of policy assignments.
:ivar value: An array of policy assignments.
:vartype value: list[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PolicyAssignment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["PolicyAssignment"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: An array of policy assignments.
:paramtype value: list[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super(PolicyAssignmentListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class PolicyDefinition(msrest.serialization.Model):
"""The policy definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the policy definition.
:vartype id: str
:ivar name: The name of the policy definition.
:vartype name: str
:ivar policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn,
and Custom. Possible values include: "NotSpecified", "BuiltIn", "Custom".
:vartype policy_type: str or ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyType
:ivar mode: The policy definition mode. Possible values are NotSpecified, Indexed, and All.
Possible values include: "NotSpecified", "Indexed", "All".
:vartype mode: str or ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyMode
:ivar display_name: The display name of the policy definition.
:vartype display_name: str
:ivar description: The policy definition description.
:vartype description: str
:ivar policy_rule: The policy rule.
:vartype policy_rule: any
:ivar metadata: The policy definition metadata.
:vartype metadata: any
:ivar parameters: Required if a parameter is used in policy rule.
:vartype parameters: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'policy_type': {'key': 'properties.policyType', 'type': 'str'},
'mode': {'key': 'properties.mode', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'policy_rule': {'key': 'properties.policyRule', 'type': 'object'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'parameters': {'key': 'properties.parameters', 'type': 'object'},
}
def __init__(
self,
*,
policy_type: Optional[Union[str, "PolicyType"]] = None,
mode: Optional[Union[str, "PolicyMode"]] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
policy_rule: Optional[Any] = None,
metadata: Optional[Any] = None,
parameters: Optional[Any] = None,
**kwargs
):
"""
:keyword policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn,
and Custom. Possible values include: "NotSpecified", "BuiltIn", "Custom".
:paramtype policy_type: str or ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyType
:keyword mode: The policy definition mode. Possible values are NotSpecified, Indexed, and All.
Possible values include: "NotSpecified", "Indexed", "All".
:paramtype mode: str or ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyMode
:keyword display_name: The display name of the policy definition.
:paramtype display_name: str
:keyword description: The policy definition description.
:paramtype description: str
:keyword policy_rule: The policy rule.
:paramtype policy_rule: any
:keyword metadata: The policy definition metadata.
:paramtype metadata: any
:keyword parameters: Required if a parameter is used in policy rule.
:paramtype parameters: any
"""
super(PolicyDefinition, self).__init__(**kwargs)
self.id = None
self.name = None
self.policy_type = policy_type
self.mode = mode
self.display_name = display_name
self.description = description
self.policy_rule = policy_rule
self.metadata = metadata
self.parameters = parameters
class PolicyDefinitionListResult(msrest.serialization.Model):
"""List of policy definitions.
:ivar value: An array of policy definitions.
:vartype value: list[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyDefinition]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PolicyDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["PolicyDefinition"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: An array of policy definitions.
:paramtype value: list[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyDefinition]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super(PolicyDefinitionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
| |
"""Support to interface with the Plex API."""
import logging
from homeassistant.components.media_player import BrowseMedia
from homeassistant.components.media_player.const import (
MEDIA_CLASS_ALBUM,
MEDIA_CLASS_ARTIST,
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_EPISODE,
MEDIA_CLASS_MOVIE,
MEDIA_CLASS_PLAYLIST,
MEDIA_CLASS_SEASON,
MEDIA_CLASS_TRACK,
MEDIA_CLASS_TV_SHOW,
MEDIA_CLASS_VIDEO,
)
from homeassistant.components.media_player.errors import BrowseError
from .const import DOMAIN
class UnknownMediaType(BrowseError):
"""Unknown media type."""
EXPANDABLES = ["album", "artist", "playlist", "season", "show"]
PLAYLISTS_BROWSE_PAYLOAD = {
"title": "Playlists",
"media_class": MEDIA_CLASS_DIRECTORY,
"media_content_id": "all",
"media_content_type": "playlists",
"can_play": False,
"can_expand": True,
}
SPECIAL_METHODS = {
"On Deck": "onDeck",
"Recently Added": "recentlyAdded",
}
ITEM_TYPE_MEDIA_CLASS = {
"album": MEDIA_CLASS_ALBUM,
"artist": MEDIA_CLASS_ARTIST,
"episode": MEDIA_CLASS_EPISODE,
"movie": MEDIA_CLASS_MOVIE,
"playlist": MEDIA_CLASS_PLAYLIST,
"season": MEDIA_CLASS_SEASON,
"show": MEDIA_CLASS_TV_SHOW,
"track": MEDIA_CLASS_TRACK,
"video": MEDIA_CLASS_VIDEO,
}
_LOGGER = logging.getLogger(__name__)
def browse_media(
entity_id, plex_server, media_content_type=None, media_content_id=None
):
"""Implement the websocket media browsing helper."""
def build_item_response(payload):
"""Create response payload for the provided media query."""
media = plex_server.lookup_media(**payload)
if media is None:
return None
try:
media_info = item_payload(media)
except UnknownMediaType:
return None
if media_info.can_expand:
media_info.children = []
for item in media:
try:
media_info.children.append(item_payload(item))
except UnknownMediaType:
continue
return media_info
if media_content_id and ":" in media_content_id:
media_content_id, special_folder = media_content_id.split(":")
else:
special_folder = None
if (
media_content_type
and media_content_type == "server"
and media_content_id != plex_server.machine_identifier
):
raise BrowseError(
f"Plex server with ID '{media_content_id}' is not associated with {entity_id}"
)
if special_folder:
if media_content_type == "server":
library_or_section = plex_server.library
children_media_class = MEDIA_CLASS_DIRECTORY
title = plex_server.friendly_name
elif media_content_type == "library":
library_or_section = plex_server.library.sectionByID(media_content_id)
title = library_or_section.title
try:
children_media_class = ITEM_TYPE_MEDIA_CLASS[library_or_section.TYPE]
except KeyError as err:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
) from err
else:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
)
payload = {
"title": title,
"media_class": MEDIA_CLASS_DIRECTORY,
"media_content_id": f"{media_content_id}:{special_folder}",
"media_content_type": media_content_type,
"can_play": False,
"can_expand": True,
"children": [],
"children_media_class": children_media_class,
}
method = SPECIAL_METHODS[special_folder]
items = getattr(library_or_section, method)()
for item in items:
try:
payload["children"].append(item_payload(item))
except UnknownMediaType:
continue
return BrowseMedia(**payload)
try:
if media_content_type in ["server", None]:
return server_payload(plex_server)
if media_content_type == "library":
return library_payload(plex_server, media_content_id)
except UnknownMediaType as err:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
) from err
if media_content_type == "playlists":
return playlists_payload(plex_server)
payload = {
"media_type": DOMAIN,
"plex_key": int(media_content_id),
}
response = build_item_response(payload)
if response is None:
raise BrowseError(f"Media not found: {media_content_type} / {media_content_id}")
return response
def item_payload(item):
"""Create response payload for a single media item."""
try:
media_class = ITEM_TYPE_MEDIA_CLASS[item.type]
except KeyError as err:
_LOGGER.debug("Unknown type received: %s", item.type)
raise UnknownMediaType from err
payload = {
"title": item.title,
"media_class": media_class,
"media_content_id": str(item.ratingKey),
"media_content_type": item.type,
"can_play": True,
"can_expand": item.type in EXPANDABLES,
}
if hasattr(item, "thumbUrl"):
payload["thumbnail"] = item.thumbUrl
return BrowseMedia(**payload)
def library_section_payload(section):
"""Create response payload for a single library section."""
try:
children_media_class = ITEM_TYPE_MEDIA_CLASS[section.TYPE]
except KeyError as err:
_LOGGER.debug("Unknown type received: %s", section.TYPE)
raise UnknownMediaType from err
return BrowseMedia(
title=section.title,
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id=section.key,
media_content_type="library",
can_play=False,
can_expand=True,
children_media_class=children_media_class,
)
def special_library_payload(parent_payload, special_type):
"""Create response payload for special library folders."""
title = f"{special_type} ({parent_payload.title})"
return BrowseMedia(
title=title,
media_class=parent_payload.media_class,
media_content_id=f"{parent_payload.media_content_id}:{special_type}",
media_content_type=parent_payload.media_content_type,
can_play=False,
can_expand=True,
children_media_class=parent_payload.children_media_class,
)
def server_payload(plex_server):
"""Create response payload to describe libraries of the Plex server."""
server_info = BrowseMedia(
title=plex_server.friendly_name,
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id=plex_server.machine_identifier,
media_content_type="server",
can_play=False,
can_expand=True,
)
server_info.children = []
server_info.children_media_class = MEDIA_CLASS_DIRECTORY
server_info.children.append(special_library_payload(server_info, "On Deck"))
server_info.children.append(special_library_payload(server_info, "Recently Added"))
for library in plex_server.library.sections():
if library.type == "photo":
continue
server_info.children.append(library_section_payload(library))
server_info.children.append(BrowseMedia(**PLAYLISTS_BROWSE_PAYLOAD))
return server_info
def library_payload(plex_server, library_id):
"""Create response payload to describe contents of a specific library."""
library = plex_server.library.sectionByID(library_id)
library_info = library_section_payload(library)
library_info.children = []
library_info.children.append(special_library_payload(library_info, "On Deck"))
library_info.children.append(
special_library_payload(library_info, "Recently Added")
)
for item in library.all():
try:
library_info.children.append(item_payload(item))
except UnknownMediaType:
continue
return library_info
def playlists_payload(plex_server):
"""Create response payload for all available playlists."""
playlists_info = {**PLAYLISTS_BROWSE_PAYLOAD, "children": []}
for playlist in plex_server.playlists():
try:
playlists_info["children"].append(item_payload(playlist))
except UnknownMediaType:
continue
response = BrowseMedia(**playlists_info)
response.children_media_class = MEDIA_CLASS_PLAYLIST
return response
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class LoadBalancersOperations(object):
"""LoadBalancersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-01"
self.config = config
def _delete_initial(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def get(
self, resource_group_name, load_balancer_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LoadBalancer or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_03_01.models.LoadBalancer or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def _create_or_update_initial(
self, resource_group_name, load_balancer_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LoadBalancer')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, load_balancer_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load
balancer operation.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.LoadBalancer
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns LoadBalancer or
ClientRawResponse<LoadBalancer> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_03_01.models.LoadBalancer]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_03_01.models.LoadBalancer]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancer
:rtype:
~azure.mgmt.network.v2017_03_01.models.LoadBalancerPaged[~azure.mgmt.network.v2017_03_01.models.LoadBalancer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancer
:rtype:
~azure.mgmt.network.v2017_03_01.models.LoadBalancerPaged[~azure.mgmt.network.v2017_03_01.models.LoadBalancer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'}
| |
from importlib import import_module
from django.conf import settings
from orchestra.core.errors import InvalidSlugValue
from orchestra.core.errors import SlugUniquenessError
class Workflow():
"""
Workflows represent execution graphs of human and machine steps.
Attributes:
slug (str):
Unique identifier for the workflow.
name (str):
Human-readable name for the workflow.
description (str):
A longer description of the workflow.
steps (dict):
Steps comprising the workflow.
"""
def __init__(self,
**kwargs):
self.slug = kwargs.get('slug')
if len(self.slug) > 200:
raise InvalidSlugValue('Slug value should be less than 200 chars')
self.name = kwargs.get('name')
self.description = kwargs.get('description')
self.steps = {}
def add_step(self, step):
"""
Add `step` to the workflow.
Args:
step (orchestra.workflow.Step):
The step to be added.
Returns:
None
Raises:
orchestra.core.errors.InvalidSlugValue:
Step slug should have fewer than 200 characters.
orchestra.core.errors.SlugUniquenessError:
Step slug has already been used in this workflow.
"""
if len(step.slug) > 200:
raise InvalidSlugValue('Slug value should be less than 200 chars')
if step.slug in self.steps:
raise SlugUniquenessError('Slug value already taken')
self.steps[step.slug] = step
def get_steps(self):
"""
Return all steps for the workflow.
Args:
None
Returns:
steps ([orchestra.workflow.Step]):
List of steps for the workflow.
"""
return self.steps.values()
def get_step_slugs(self):
"""
Return all step slugs for the workflow.
Args:
None
Returns:
slugs ([str]):
List of step slugs for the workflow.
"""
return self.steps.keys()
def get_step(self, slug):
"""
Return the specified step from the workflow.
Args:
slug (str):
The slug of the desired step.
Returns:
step (orchestra.workflow.Step):
The specified step from the workflow.
"""
return self.steps[slug]
def get_human_steps(self):
"""
Return steps from the workflow with a human `worker_type`.
Args:
None
Returns:
steps ([orchestra.workflow.Step]):
Steps from the workflow with a human `worker_type`..
"""
return [step for slug, step in self.steps.items()
if step.worker_type == Step.WorkerType.HUMAN]
def __str__(self):
return self.slug
def __unicode__(self):
return self.slug
class Step():
"""
Steps represent nodes on a workflow execution graph.
Attributes:
slug (str):
Unique identifier for the step.
name (str):
Human-readable name for the step.
description (str):
A longer description of the step.
worker_type (orchestra.workflow.Step.WorkerType):
Indicates whether the policy is for a human or machine.
creation_depends_on ([str]):
Slugs for steps on which this step's creation depends.
submission_depends_on ([str]):
Slugs for steps on which this step's submission depends.
function (function):
Function to execute during step. Should be present only for
machine tasks
required_certifications ([str]):
Slugs for certifications required for a worker to pick up
tasks based on this step.
"""
class WorkerType:
"""Specifies whether step is performed by human or machine"""
HUMAN = 0
MACHINE = 1
def __init__(self,
**kwargs):
self.slug = kwargs.get('slug')
self.name = kwargs.get('name')
self.description = kwargs.get('description')
self.worker_type = kwargs.get('worker_type')
self.creation_depends_on = kwargs.get('creation_depends_on') or []
self.submission_depends_on = kwargs.get('submission_depends_on') or []
self.function = kwargs.get('function')
self.required_certifications = kwargs.get(
'required_certifications') or []
# Example: {'policy': 'previously_completed_steps', 'step': ['design']}
self.assignment_policy = (kwargs.get('assignment_policy')
or get_default_policy(self.worker_type,
'assignment_policy'))
# Example: {'policy': 'sampled_review', 'rate': .25, 'max_reviews': 2}
self.review_policy = (kwargs.get('review_policy')
or get_default_policy(self.worker_type,
'review_policy'))
# Example: {'html_blob': 'http://some_url',
# 'javascript_includes': [url1, url2],
# 'css_includes': [url1, url2]}
self.user_interface = kwargs.get('user_interface') or {}
def __str__(self):
return self.slug
def __unicode__(self):
return self.slug
def get_workflows():
"""
Return all stored workflows.
Args:
None
Returns:
workflows ([orchestra.workflow.Workflow]):
A dict of all workflows keyed by slug.
"""
workflows = {}
for backend_module, variable in settings.ORCHESTRA_PATHS:
backend_module = import_module(backend_module)
workflow = getattr(backend_module, variable)
if workflow.slug in workflows:
raise SlugUniquenessError('Repeated slug value for workflows.')
workflows[workflow.slug] = workflow
return workflows
def get_workflow_by_slug(slug):
"""
Return the workflow specified by `slug`.
Args:
slug (str):
The slug of the desired workflow.
Returns:
workflow (orchestra.workflow.Workflow):
The corresponding workflow object.
"""
return get_workflows()[slug]
def get_workflow_choices():
"""
Return workflow data formatted as `choices` for a model field.
Args:
None
Returns:
workflow_choices (tuple):
A tuple of tuples containing each workflow slug and
human-readable name.
"""
workflows = get_workflows()
choices = []
for slug, workflow in workflows.items():
choices.append((slug, workflow.name))
return tuple(choices)
def get_step_choices():
"""
Return step data formatted as `choices` for a model field.
Args:
None
Returns:
step_choices (tuple):
A tuple of tuples containing each step slug and
human-readable name.
"""
choices = []
for slug, workflow in iter(get_workflows().items()):
for step in workflow.get_steps():
choices.append((step.slug, step.name))
return tuple(choices)
def get_default_policy(worker_type, policy_name):
"""
Return the default value for a specified policy.
Args:
worker_type (orchestra.workflow.Step.WorkerType):
Indicates whether the policy is for a human or machine.
policy_name (str):
The specified policy identifier.
Returns:
default_policy (dict):
A dict containing the default policy for the worker type and
policy name specified.
"""
default_policies = {
'assignment_policy': {'policy': 'anyone_certified'},
'review_policy': {'policy': 'sampled_review',
'rate': 1,
'max_reviews': 1}
}
if worker_type == Step.WorkerType.HUMAN:
return default_policies[policy_name]
else:
return {}
| |
NAME = 'PyYAML'
VERSION = '3.10'
DESCRIPTION = "YAML parser and emitter for Python"
LONG_DESCRIPTION = """\
YAML is a data serialization format designed for human readability
and interaction with scripting languages. PyYAML is a YAML parser
and emitter for Python.
PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
support, capable extension API, and sensible error messages. PyYAML
supports standard YAML tags and provides Python-specific tags that
allow to represent an arbitrary Python object.
PyYAML is applicable for a broad range of tasks from complex
configuration files to object serialization and persistance."""
AUTHOR = "Kirill Simonov"
AUTHOR_EMAIL = 'xi@resolvent.net'
LICENSE = "MIT"
PLATFORMS = "Any"
URL = "http://pyyaml.org/wiki/PyYAML"
DOWNLOAD_URL = "http://pyyaml.org/download/pyyaml/%s-%s.tar.gz" % (NAME, VERSION)
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
]
LIBYAML_CHECK = """
#include <yaml.h>
int main(void) {
yaml_parser_t parser;
yaml_emitter_t emitter;
yaml_parser_initialize(&parser);
yaml_parser_delete(&parser);
yaml_emitter_initialize(&emitter);
yaml_emitter_delete(&emitter);
return 0;
}
"""
import sys, os.path
from distutils import log
from distutils.core import setup, Command
from distutils.core import Distribution as _Distribution
from distutils.core import Extension as _Extension
from distutils.dir_util import mkpath
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm
from distutils.errors import CompileError, LinkError, DistutilsPlatformError
if 'setuptools.extension' in sys.modules:
_Extension = sys.modules['setuptools.extension']._Extension
sys.modules['distutils.core'].Extension = _Extension
sys.modules['distutils.extension'].Extension = _Extension
sys.modules['distutils.command.build_ext'].Extension = _Extension
with_pyrex = None
if sys.version_info[0] < 3:
try:
from Cython.Distutils.extension import Extension as _Extension
from Cython.Distutils import build_ext as _build_ext
with_pyrex = 'cython'
except ImportError:
try:
# Pyrex cannot build _yaml.c at the moment,
# but it may get fixed eventually.
from Pyrex.Distutils import Extension as _Extension
from Pyrex.Distutils import build_ext as _build_ext
with_pyrex = 'pyrex'
except ImportError:
pass
class Distribution(_Distribution):
def __init__(self, attrs=None):
_Distribution.__init__(self, attrs)
if not self.ext_modules:
return
for idx in range(len(self.ext_modules)-1, -1, -1):
ext = self.ext_modules[idx]
if not isinstance(ext, Extension):
continue
setattr(self, ext.attr_name, None)
self.global_options = [
(ext.option_name, None,
"include %s (default if %s is available)"
% (ext.feature_description, ext.feature_name)),
(ext.neg_option_name, None,
"exclude %s" % ext.feature_description),
] + self.global_options
self.negative_opt = self.negative_opt.copy()
self.negative_opt[ext.neg_option_name] = ext.option_name
def has_ext_modules(self):
if not self.ext_modules:
return False
for ext in self.ext_modules:
with_ext = self.ext_status(ext)
if with_ext is None or with_ext:
return True
return False
def ext_status(self, ext):
if 'Java' in sys.version or 'IronPython' in sys.version or 'PyPy' in sys.version:
return False
if isinstance(ext, Extension):
with_ext = getattr(self, ext.attr_name)
return with_ext
else:
return True
class Extension(_Extension):
def __init__(self, name, sources, feature_name, feature_description,
feature_check, **kwds):
if not with_pyrex:
for filename in sources[:]:
base, ext = os.path.splitext(filename)
if ext == '.pyx':
sources.remove(filename)
sources.append('%s.c' % base)
_Extension.__init__(self, name, sources, **kwds)
self.feature_name = feature_name
self.feature_description = feature_description
self.feature_check = feature_check
self.attr_name = 'with_' + feature_name.replace('-', '_')
self.option_name = 'with-' + feature_name
self.neg_option_name = 'without-' + feature_name
class build_ext(_build_ext):
def run(self):
optional = True
disabled = True
for ext in self.extensions:
with_ext = self.distribution.ext_status(ext)
if with_ext is None:
disabled = False
elif with_ext:
optional = False
disabled = False
break
if disabled:
return
try:
_build_ext.run(self)
except DistutilsPlatformError:
exc = sys.exc_info()[1]
if optional:
log.warn(str(exc))
log.warn("skipping build_ext")
else:
raise
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
for ext in self.extensions:
if with_pyrex == 'pyrex':
self.pyrex_sources(ext.sources, ext)
elif with_pyrex == 'cython':
self.cython_sources(ext.sources, ext)
for filename in ext.sources:
filenames.append(filename)
base = os.path.splitext(filename)[0]
for ext in ['c', 'h', 'pyx', 'pxd']:
filename = '%s.%s' % (base, ext)
if filename not in filenames and os.path.isfile(filename):
filenames.append(filename)
return filenames
def get_outputs(self):
self.check_extensions_list(self.extensions)
outputs = []
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
if os.path.isfile(filename):
outputs.append(filename)
return outputs
def build_extensions(self):
self.check_extensions_list(self.extensions)
for ext in self.extensions:
with_ext = self.distribution.ext_status(ext)
if with_ext is None:
with_ext = self.check_extension_availability(ext)
if not with_ext:
continue
if with_pyrex == 'pyrex':
ext.sources = self.pyrex_sources(ext.sources, ext)
elif with_pyrex == 'cython':
ext.sources = self.cython_sources(ext.sources, ext)
self.build_extension(ext)
def check_extension_availability(self, ext):
cache = os.path.join(self.build_temp, 'check_%s.out' % ext.feature_name)
if not self.force and os.path.isfile(cache):
data = open(cache).read().strip()
if data == '1':
return True
elif data == '0':
return False
mkpath(self.build_temp)
src = os.path.join(self.build_temp, 'check_%s.c' % ext.feature_name)
open(src, 'w').write(ext.feature_check)
log.info("checking if %s is compilable" % ext.feature_name)
try:
[obj] = self.compiler.compile([src],
macros=ext.define_macros+[(undef,) for undef in ext.undef_macros],
include_dirs=ext.include_dirs,
extra_postargs=(ext.extra_compile_args or []),
depends=ext.depends)
except CompileError:
log.warn("")
log.warn("%s is not found or a compiler error: forcing --%s"
% (ext.feature_name, ext.neg_option_name))
log.warn("(if %s is installed correctly, you may need to"
% ext.feature_name)
log.warn(" specify the option --include-dirs or uncomment and")
log.warn(" modify the parameter include_dirs in setup.cfg)")
open(cache, 'w').write('0\n')
return False
prog = 'check_%s' % ext.feature_name
log.info("checking if %s is linkable" % ext.feature_name)
try:
self.compiler.link_executable([obj], prog,
output_dir=self.build_temp,
libraries=ext.libraries,
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=(ext.extra_link_args or []))
except LinkError:
log.warn("")
log.warn("%s is not found or a linker error: forcing --%s"
% (ext.feature_name, ext.neg_option_name))
log.warn("(if %s is installed correctly, you may need to"
% ext.feature_name)
log.warn(" specify the option --library-dirs or uncomment and")
log.warn(" modify the parameter library_dirs in setup.cfg)")
open(cache, 'w').write('0\n')
return False
open(cache, 'w').write('1\n')
return True
class bdist_rpm(_bdist_rpm):
def _make_spec_file(self):
argv0 = sys.argv[0]
features = []
for ext in self.distribution.ext_modules:
if not isinstance(ext, Extension):
continue
with_ext = getattr(self.distribution, ext.attr_name)
if with_ext is None:
continue
if with_ext:
features.append('--'+ext.option_name)
else:
features.append('--'+ext.neg_option_name)
sys.argv[0] = ' '.join([argv0]+features)
spec_file = _bdist_rpm._make_spec_file(self)
sys.argv[0] = argv0
return spec_file
class test(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
build_cmd = self.get_finalized_command('build')
build_cmd.run()
sys.path.insert(0, build_cmd.build_lib)
if sys.version_info[0] < 3:
sys.path.insert(0, 'tests/lib')
else:
sys.path.insert(0, 'tests/lib3')
import test_all
test_all.main([])
if __name__ == '__main__':
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
platforms=PLATFORMS,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
package_dir={'': {2: 'lib', 3: 'lib3'}[sys.version_info[0]]},
packages=['yaml'],
ext_modules=[
Extension('_yaml', ['ext/_yaml.pyx'],
'libyaml', "LibYAML bindings", LIBYAML_CHECK,
libraries=['yaml']),
],
distclass=Distribution,
cmdclass={
'build_ext': build_ext,
'bdist_rpm': bdist_rpm,
'test': test,
},
)
| |
import numpy as np
from ...core.utils import as_id_array
from ...graph.structured_quad.structured_quad import StructuredQuadGraphTopology
from . import _neighbors_at_link
def neighbors_at_link(shape, links):
"""Get neighbor links.
Examples
--------
>>> import numpy as np
>>> from landlab.components.overland_flow._links import neighbors_at_link
>>> neighbors_at_link((3, 2), np.arange(7)) # doctest: +NORMALIZE_WHITESPACE
array([[-1, 3, -1, -1],
[ 2, 4, -1, -1], [-1, 5, 1, -1],
[-1, 6, -1, 0],
[ 5, 7, -1, 1], [-1, -1, 4, 2],
[-1, -1, -1, 3]])
"""
links = np.asarray(links, dtype=int)
out = np.full((links.size, 4), -1, dtype=int)
_neighbors_at_link.neighbors_at_link(links, shape, out)
return out
def vertical_link_ids(shape):
"""Vertical links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
(M, N) ndarray :
Array of link IDs.
Examples
--------
>>> from landlab.components.overland_flow._links import vertical_link_ids
>>> vertical_link_ids((3, 4))
array([[ 3, 4, 5, 6],
[10, 11, 12, 13]])
"""
layout = StructuredQuadGraphTopology(shape)
return layout.vertical_links.reshape((shape[0] - 1, shape[1]))
def horizontal_link_ids(shape):
"""Horizontal links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
(M, N) ndarray :
Array of link IDs.
Examples
--------
>>> from landlab.components.overland_flow._links import horizontal_link_ids
>>> horizontal_link_ids((3, 4))
array([[ 0, 1, 2],
[ 7, 8, 9],
[14, 15, 16]])
"""
layout = StructuredQuadGraphTopology(shape)
return layout.horizontal_links.reshape((shape[0], shape[1] - 1))
def vertical_south_link_neighbor(shape, vertical_ids, bad_index_value=-1):
"""Link IDs of south, vertical link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_ids : array of int
Array of all vertical link ids - MUST BE ARRAY OF LEN(VERTICAL_LINKS)
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of *south* vertical neighbor links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (
... vertical_link_ids, vertical_south_link_neighbor
... )
>>> rmg = RasterModelGrid((4, 5))
>>> vertical_links = vertical_link_ids(rmg.shape)
>>> vertical_south_link_neighbor(rmg.shape, vertical_links)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, -1, -1,
4, 5, 6, 7, 8,
13, 14, 15, 16, 17])
"""
vertical_links = StructuredQuadGraphTopology(shape).vertical_links
vertical_links[shape[1] :] = vertical_links[: -shape[1]]
vertical_links[: shape[1]] = bad_index_value
return vertical_links
def vertical_west_link_neighbor(shape, vertical_ids, bad_index_value=-1):
"""Link IDs of west, vertical link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_ids : array of int
Array of all vertical link ids- MUST BE ARRAY OF LEN(VERTICAL_LINKS)
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of *west* vertical neighbor links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import vertical_link_ids, vertical_west_link_neighbor
>>> rmg = RasterModelGrid((4, 5))
>>> vertical_links = vertical_link_ids(rmg.shape)
>>> vertical_west_link_neighbor(rmg.shape, vertical_links)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, 4, 5, 6, 7,
-1, 13, 14, 15, 16,
-1, 22, 23, 24, 25])
"""
vertical_links = StructuredQuadGraphTopology(shape).vertical_links.reshape(
(shape[0] - 1, shape[1])
)
vertical_links[:, 1:] = vertical_links[:, :-1]
vertical_links[:, 0] = bad_index_value
return vertical_links.reshape(-1)
def vertical_north_link_neighbor(shape, vertical_ids, bad_index_value=-1):
"""Link IDs of north, vertical link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_ids : array of int
Array of all vertical link ids- MUST BE ARRAY OF LEN(VERTICAL_LINKS)
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of *north* vertical neighbor links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import vertical_link_ids, vertical_north_link_neighbor
>>> rmg = RasterModelGrid((4, 5))
>>> vertical_ids = vertical_link_ids(rmg.shape)
>>> vertical_north_link_neighbor(rmg.shape, vertical_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([13, 14, 15, 16, 17,
22, 23, 24, 25, 26,
-1, -1, -1, -1, -1])
"""
vertical_links = StructuredQuadGraphTopology(shape).vertical_links
vertical_links[: -shape[1]] = vertical_links[shape[1] :]
vertical_links[-shape[1] :] = bad_index_value
return vertical_links
def vertical_east_link_neighbor(shape, vertical_ids, bad_index_value=-1):
"""Link IDs of east, vertical link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_ids : array of int
Array of all vertical link ids - MUST BE ARRAY OF LEN(VERTICAL_LINKS)
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of *east* vertical neighbor links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import vertical_link_ids, vertical_east_link_neighbor
>>> rmg = RasterModelGrid((4, 5))
>>> vertical_links = vertical_link_ids(rmg.shape)
>>> vertical_east_link_neighbor(rmg.shape, vertical_links)
... # doctest: +NORMALIZE_WHITESPACE
array([ 5, 6, 7, 8, -1,
14, 15, 16, 17, -1,
23, 24, 25, 26, -1])
"""
vertical_links = StructuredQuadGraphTopology(shape).vertical_links.reshape(
(shape[0] - 1, shape[1])
)
vertical_links[:, :-1] = vertical_links[:, 1:]
vertical_links[:, -1] = bad_index_value
return vertical_links.base
def active_link_ids(shape, node_status):
"""Get active links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
node_status : array_link
Status of nodes in grid.
Returns
-------
ndarray :
Links IDs at the active links.
Examples
--------
>>> from landlab.grid import RasterModelGrid
>>> from landlab.components.overland_flow._links import active_link_ids
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> status = rmg.status_at_node
>>> status # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4,
4, 0, 0, 4,
4, 4, 4, 4], dtype=uint8)
>>> active_link_ids((3, 4), status)
array([8])
"""
return as_id_array(np.where(is_active_link(shape, node_status))[0])
def is_active_link(shape, node_status):
"""Link IDs of active links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
node_status : array_link
Status of nodes in grid.
Returns
-------
ndarray :
Links IDs at the active links.
Examples
--------
>>> from landlab.components.overland_flow._links import is_active_link
>>> from landlab.grid.nodestatus import NodeStatus
>>> status = [
... [NodeStatus.CLOSED, NodeStatus.CLOSED, NodeStatus.CLOSED],
... [NodeStatus.CLOSED, NodeStatus.CORE, NodeStatus.CLOSED],
... [NodeStatus.CLOSED, NodeStatus.CORE, NodeStatus.CLOSED],
... [NodeStatus.CLOSED, NodeStatus.CLOSED, NodeStatus.CLOSED],
... ]
>>> is_active_link((4, 3), status) # doctest: +NORMALIZE_WHITESPACE
array([False, False,
False, False, False,
False, False,
False, True, False,
False, False,
False, False, False,
False, False], dtype=bool)
"""
from ...grid.linkstatus import is_active_link
node_status = np.asarray(node_status).reshape(-1)
if np.prod(shape) != node_status.size:
raise ValueError(
"node status array does not match size of grid "
"(%d != %d)" % (np.prod(shape), len(node_status))
)
# status_at_link_start = node_status.flat[node_id_at_link_start(shape)]
# status_at_link_end = node_status.flat[node_id_at_link_end(shape)]
# status_at_link = node_status[StructuredQuadGraphTopology(shape).nodes_at_link]
return is_active_link(node_status[StructuredQuadGraphTopology(shape).nodes_at_link])
def vertical_active_link_ids(shape, active_ids, bad_index_value=-1):
"""ID of vertical active links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
active_ids : array of int
Array of all active link ids
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs at the VERTICAL active links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
*---I-->*---I-->*---I-->*---I-->*
^ ^ ^ ^ ^
I I I I I
| | | | |
*---I-->o---H-->o---H-->o---I-->*
^ ^ ^ ^ ^
I 6 7 8 I
| | | | |
*---I-->o---H-->o---H-->o---I-->*
^ ^ ^ ^ ^
I I I I I
| | | | |
*---I-->*---I-->*---I-->*---I-->*
.. note::
``*`` indicates the nodes that are set to `NodeStatus.CLOSED`
``o`` indicates the nodes that are set to `NodeStatus.CORE`
``I`` indicates the links that are set to `LinkStatus.INACTIVE`
``H`` indicates horizontal active ids, which are ignored by this
function
Numeric values correspond to the vertical `LinkStatus.ACTIVE` IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (active_link_ids,
... vertical_active_link_ids)
>>> rmg = RasterModelGrid((4, 5))
>>> active_ids = active_link_ids((4, 5), rmg.status_at_node)
>>> active_ids # doctest: +NORMALIZE_WHITESPACE
array([ 5, 6, 7,
9, 10, 11, 12,
14, 15, 16,
18, 19, 20, 21,
23, 24, 25])
>>> vertical_active_link_ids((4, 5), active_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, 5, 6, 7, -1,
-1, 14, 15, 16, -1,
-1, 23, 24, 25, -1])
>>> rmg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> status = rmg.status_at_node
>>> active_ids = active_link_ids((4, 5), status)
>>> vertical_active_link_ids((4, 5), active_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, -1, -1,
-1, 14, 15, 16, -1,
-1, -1, -1, -1, -1])
"""
number_of_vertical_links = (shape[0] - 1) * shape[1]
out = np.full(number_of_vertical_links, bad_index_value, dtype=int)
vertical_ids = active_ids[np.where(is_vertical_link(shape, active_ids))]
out[nth_vertical_link(shape, vertical_ids)] = vertical_ids
return out
def _number_of_links(shape):
"""Number of links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
int :
Number of links in grid.
Examples
--------
>>> from landlab.components.overland_flow._links import _number_of_links
>>> _number_of_links((3, 4))
17
"""
return (shape[0] - 1) * shape[1] + shape[0] * (shape[1] - 1)
# return number_of_vertical_links(shape) + number_of_horizontal_links(shape)
def number_of_vertical_links(shape):
"""Number of vertical links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
int :
Number of vertical links in grid.
Examples
--------
>>> from landlab.components.overland_flow._links import number_of_vertical_links
>>> number_of_vertical_links((3, 4))
8
"""
return (shape[0] - 1) * shape[1]
def number_of_horizontal_links(shape):
"""Number of horizontal links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
int :
Number of horizontal links in grid.
Examples
--------
>>> from landlab.components.overland_flow._links import number_of_horizontal_links
>>> number_of_horizontal_links((3, 4))
9
"""
return shape[0] * (shape[1] - 1)
def is_vertical_link(shape, links):
"""Test if links are vertical.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
links : array of int
Array of link ids to test.
Returns
-------
ndarray of bool
`True` for links that are vertical.
Examples
--------
>>> from landlab.components.overland_flow._links import (is_vertical_link,
... _number_of_links)
>>> import numpy as np
>>> shape = (3, 4)
>>> links = np.arange(_number_of_links(shape))
>>> is_vertical_link(shape, links) # doctest: +NORMALIZE_WHITESPACE
array([False, False, False, True, True, True, True,
False, False, False, True, True, True, True,
False, False, False], dtype=bool)
"""
return ((links % (2 * shape[1] - 1)) >= shape[1] - 1) & (
links < _number_of_links(shape)
)
def nth_vertical_link(shape, links):
"""Convert link ID to vertical link ID.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
links : array of int
Array of link ids to test.
Returns
-------
ndarray of int
The link ID as the nth vertical links.
Examples
--------
>>> from landlab.components.overland_flow._links import nth_vertical_link
>>> shape = (3, 4)
>>> nth_vertical_link(shape, 4)
1
>>> nth_vertical_link(shape, (3, 4, 11))
array([0, 1, 5])
"""
links = np.asarray(links, dtype=np.int)
return as_id_array(
(links // (2 * shape[1] - 1)) * shape[1]
+ links % (2 * shape[1] - 1)
- (shape[1] - 1)
)
def horizontal_active_link_ids(shape, active_ids, bad_index_value=-1):
"""ID of horizontal active links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
active_ids : array of int
Array of all active link ids
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs at the HORIZONTAL active links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*---I-->*---I-->*---I-->*---I-->*
^ ^ ^ ^ ^
I I I I I
| | | | |
*---I-->o--24-->o--25-->o---I-->*
^ ^ ^ ^ ^
I V V V I
| | | | |
*---I-->o--20-->o--21-->o---I-->*
^ ^ ^ ^ ^
I I I I I
| | | | |
*---I-->*---I-->*---I-->*---I-->*
.. note::
``*`` indicates the nodes that are set to `NodeStatus.CLOSED`
``o`` indicates the nodes that are set to `NodeStatus.CORE`
``I`` indicates the links that are set to `LinkStatus.INACTIVE`
``V`` indicates vertical active ids, which are ignored by this
function.
Numeric values correspond to the horizontal `LinkStatus.ACTIVE` ID.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (active_link_ids,
... horizontal_active_link_ids)
>>> rmg = RasterModelGrid((4, 5))
>>> rmg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> status = rmg.status_at_node
>>> status # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4, 4,
4, 0, 0, 0, 4,
4, 0, 0, 0, 4,
4, 4, 4, 4, 4], dtype=uint8)
>>> active_ids = active_link_ids((4,5), status)
>>> horizontal_active_link_ids((4,5), active_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, -1,
-1, 10, 11, -1,
-1, 19, 20, -1,
-1, -1, -1, -1])
"""
number_of_horizontal_links = shape[0] * (shape[1] - 1)
out = np.full(number_of_horizontal_links, bad_index_value, dtype=int)
horizontal_ids = active_ids[np.where(~is_vertical_link(shape, active_ids))]
out[nth_horizontal_link(shape, horizontal_ids)] = horizontal_ids
return out
def nth_horizontal_link(shape, links):
"""Convert link ID to horizontal link ID.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
links : array of int
Array of link ids to test.
Returns
-------
ndarray of int
The link ID as the nth horizontal links.
Examples
--------
>>> from landlab.components.overland_flow._links import nth_horizontal_link
>>> shape = (3, 4)
>>> nth_horizontal_link(shape, 16)
8
>>> nth_horizontal_link(shape, (1, 7, 8))
array([1, 3, 4])
"""
links = np.asarray(links, dtype=np.int)
return as_id_array(
(links // (2 * shape[1] - 1)) * (shape[1] - 1) + links % (2 * shape[1] - 1)
)
def is_horizontal_link(shape, links):
"""Test if a link is horizontal.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
links : array of int
Array of link ids to test.
Returns
-------
ndarray of bool
`True` for links that are horizontal.
Examples
--------
>>> from landlab.components.overland_flow._links import (is_horizontal_link,
... _number_of_links)
>>> import numpy as np
>>> shape = (3, 4)
>>> links = np.arange(_number_of_links(shape))
>>> is_horizontal_link(shape, links) # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, False, False,
True, True, True, False, False, False, False,
True, True, True], dtype=bool)
"""
return (~is_vertical_link(shape, links)) & (links < _number_of_links(shape))
def horizontal_west_link_neighbor(shape, horizontal_ids, bad_index_value=-1):
"""ID of west, horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids - *must be of len(horizontal_links)*
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of west horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (horizontal_link_ids,
... horizontal_west_link_neighbor)
>>> rmg = RasterModelGrid((4, 5))
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_west_link_neighbor(rmg.shape, horizontal_links)
array([-1, 0, 1, 2, -1, 9, 10, 11, -1, 18, 19, 20, -1, 27, 28, 29])
"""
links = np.roll(horizontal_ids.reshape((shape[0], shape[1] - 1)), 1, axis=1)
links[:, 0] = bad_index_value
return links.reshape(-1)
def horizontal_east_link_neighbor(shape, horizontal_ids, bad_index_value=-1):
"""IDs of east, horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids - *must be of len(horizontal_links)*
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of east horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal `LinkStatus.ACTIVE` IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (horizontal_link_ids,
... horizontal_east_link_neighbor)
>>> rmg = RasterModelGrid((4, 5))
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_east_link_neighbor(rmg.shape, horizontal_links)
array([ 1, 2, 3, -1, 10, 11, 12, -1, 19, 20, 21, -1, 28, 29, 30, -1])
"""
links = np.roll(horizontal_ids.reshape((shape[0], shape[1] - 1)), -1, axis=1)
links[:, -1] = -1
return links.reshape(-1)
def horizontal_north_link_neighbor(shape, horizontal_ids, bad_index_value=-1):
"""ID of north, horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids - *must be of len(horizontal_links)*
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of north horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal `LinkStatus.ACTIVE` IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (horizontal_link_ids,
... horizontal_north_link_neighbor)
>>> rmg = RasterModelGrid((4, 5))
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_north_link_neighbor(rmg.shape, horizontal_links)
array([ 9, 10, 11, 12, 18, 19, 20, 21, 27, 28, 29, 30, -1, -1, -1, -1])
"""
links = np.roll(horizontal_ids.reshape((shape[0], shape[1] - 1)), -1, axis=0)
links[-1, :] = bad_index_value
return links.reshape(-1)
def horizontal_south_link_neighbor(shape, horizontal_ids, bad_index_value=-1):
"""ID of south horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids *must be of len(horizontal_links)*.
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of south horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (horizontal_link_ids,
... horizontal_north_link_neighbor)
>>> rmg = RasterModelGrid((4, 5))
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_south_link_neighbor(rmg.shape, horizontal_links)
array([-1, -1, -1, -1, 0, 1, 2, 3, 9, 10, 11, 12, 18, 19, 20, 21])
"""
links = np.roll(horizontal_ids.reshape((shape[0], shape[1] - 1)), 1, axis=0)
links[0, :] = bad_index_value
return links.reshape(-1)
| |
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import (Template, Context, TemplateDoesNotExist,
TemplateSyntaxError)
from django.template.defaultfilters import force_escape, pprint
from django.utils.html import escape
from django.utils.importlib import import_module
from django.utils.encoding import smart_unicode, smart_str
HIDDEN_SETTINGS = re.compile('SECRET|PASSWORD|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = u'********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
return HttpResponseServerError(html, mimetype='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
modpath = settings.DEFAULT_EXCEPTION_REPORTER_FILTER
modname, classname = modpath.rsplit('.', 1)
try:
mod = import_module(modname)
except ImportError, e:
raise ImproperlyConfigured(
'Error importing default exception reporter filter %s: "%s"' % (modpath, e))
try:
default_exception_reporter_filter = getattr(mod, classname)()
except AttributeError:
raise ImproperlyConfigured('Default exception reporter filter module "%s" does not define a "%s" class' % (modname, classname))
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviours.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return tb_frame.f_locals.items()
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if cleansed.has_key(param):
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
func_name = tb_frame.f_code.co_name
func = tb_frame.f_globals.get(func_name)
sensitive_variables = getattr(func, 'sensitive_variables', [])
cleansed = []
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed.append((name, CLEANSED_SUBSTITUTE))
return cleansed
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
else:
# Potentially cleanse only the request if it's one of the frame variables.
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, basestring):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_html(self):
"Return HTML code for traceback."
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{'name': t, 'exists': os.path.exists(t)} \
for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source') and
isinstance(self.exc_value, TemplateSyntaxError)):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context({
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
})
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_unicode(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
self.template_info = {
'message': self.exc_value.args[0],
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
f = open(filename)
try:
source = f.readlines()
finally:
f.close()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(r'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1)
break
source = [unicode(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': module_name.startswith('django.') and 'django' or 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if not tried:
# tried exists but is an empty list. The URLconf must've been empty.
return empty_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': smart_str(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), mimetype='text/html')
def empty_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(EMPTY_URLCONF_TEMPLATE, name='Empty URLConf template')
c = Context({
'project_name': settings.SETTINGS_MODULE.split('.')[0]
})
return HttpResponse(t.render(c), mimetype='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>{% for t in loader.templates %}<li><code>{{ t.name }}</code> (File {% if t.exists %}exists{% else %}does not exist{% endif %})</li>{% endfor %}</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Template error</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
EMPTY_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
ul { margin-left: 2em; margin-top: 1em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>Of course, you haven't actually done any work yet. Here's what to do next:</p>
<ul>
<li>If you plan to use a database, edit the <code>DATABASES</code> setting in <code>{{ project_name }}/settings.py</code>.</li>
<li>Start your first app by running <code>python {{ project_name }}/manage.py startapp [appname]</code>.</li>
</ul>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
| |
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import re
import six
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
class ConfigurationManager(object):
"""
ConfigurationManager is responsible for management of
datastore configuration.
Its base functionality includes reading and writing configuration files.
It is responsible for validating user inputs and requests.
When supplied an override strategy it allows the user to manage
configuration overrides as well.
"""
# Configuration group names. The names determine the order in which the
# groups get applied. System group should get applied over the user group.
USER_GROUP = '20-user'
SYSTEM_GROUP = '50-system'
DEFAULT_STRATEGY_OVERRIDES_SUB_DIR = 'overrides'
DEFAULT_CHANGE_ID = 'common'
def __init__(self, base_config_path, owner, group, codec,
requires_root=False, override_strategy=None):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration files.
:type owner string
:param group Group of the configuration files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the manager requires superuser
privileges.
:type requires_root boolean
:param override_strategy Strategy used to manage configuration
overrides (e.g. ImportOverrideStrategy).
Defaults to OneFileOverrideStrategy
if None. This strategy should be
compatible with very much any datastore.
It is recommended each datastore defines
its strategy explicitly to avoid upgrade
compatibility issues in case the default
implementation changes in the future.
:type override_strategy ConfigurationOverrideStrategy
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
self._value_cache = None
if not override_strategy:
# Use OneFile strategy by default. Store the revisions in a
# sub-directory at the location of the configuration file.
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(base_config_path),
self.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
operating_system.create_directory(
revision_dir, user=owner, group=group, force=True,
as_root=requires_root)
self._override_strategy = OneFileOverrideStrategy(revision_dir)
else:
self._override_strategy = override_strategy
self._override_strategy.configure(
base_config_path, owner, group, codec, requires_root)
def get_value(self, key, default=None):
"""Return the current value at a given key or 'default'.
"""
if self._value_cache is None:
self._refresh_cache()
return self._value_cache.get(key, default)
def parse_configuration(self):
"""Read contents of the configuration file (applying overrides if any)
and parse it into a dict.
:returns: Configuration file as a Python dict.
"""
base_options = operating_system.read_file(
self._base_config_path, codec=self._codec)
updates = self._override_strategy.parse_updates()
guestagent_utils.update_dict(updates, base_options)
return base_options
def save_configuration(self, options):
"""Write given contents to the base configuration file.
Remove all existing overrides (both system and user).
:param contents Contents of the configuration file.
:type contents string or dict
"""
if isinstance(options, dict):
# Serialize a dict of options for writing.
self.save_configuration(self._codec.serialize(options))
else:
self._override_strategy.remove(self.USER_GROUP)
self._override_strategy.remove(self.SYSTEM_GROUP)
operating_system.write_file(
self._base_config_path, options, as_root=self._requires_root)
operating_system.chown(
self._base_config_path, self._owner, self._group,
as_root=self._requires_root)
operating_system.chmod(
self._base_config_path, FileMode.ADD_READ_ALL,
as_root=self._requires_root)
self._refresh_cache()
def has_system_override(self, change_id):
"""Return whether a given 'system' change exists.
"""
return self._override_strategy.exists(self.SYSTEM_GROUP, change_id)
def apply_system_override(self, options, change_id=DEFAULT_CHANGE_ID):
"""Apply a 'system' change to the configuration.
System overrides are always applied after all user changes so that
they override any user-defined setting.
:param options Configuration changes.
:type options string or dict
"""
self._apply_override(self.SYSTEM_GROUP, change_id, options)
def apply_user_override(self, options, change_id=DEFAULT_CHANGE_ID):
"""Apply a 'user' change to the configuration.
The 'system' values will be re-applied over this override.
:param options Configuration changes.
:type options string or dict
"""
self._apply_override(self.USER_GROUP, change_id, options)
def _apply_override(self, group_name, change_id, options):
if not isinstance(options, dict):
# Deserialize the options into a dict if not already.
self._apply_override(
group_name, change_id, self._codec.deserialize(options))
else:
self._override_strategy.apply(group_name, change_id, options)
self._refresh_cache()
def remove_system_override(self, change_id=DEFAULT_CHANGE_ID):
"""Revert a 'system' configuration change.
"""
self._remove_override(self.SYSTEM_GROUP, change_id)
def remove_user_override(self, change_id=DEFAULT_CHANGE_ID):
"""Revert a 'user' configuration change.
"""
self._remove_override(self.USER_GROUP, change_id)
def _remove_override(self, group_name, change_id):
self._override_strategy.remove(group_name, change_id)
self._refresh_cache()
def _refresh_cache(self):
self._value_cache = self.parse_configuration()
@six.add_metaclass(abc.ABCMeta)
class ConfigurationOverrideStrategy(object):
"""ConfigurationOverrideStrategy handles configuration files.
The strategy provides functionality to enumerate, apply and remove
configuration overrides.
"""
@abc.abstractmethod
def configure(self, *args, **kwargs):
"""Configure this strategy.
A strategy needs to be configured before it can be used.
It would typically be configured by the ConfigurationManager.
"""
@abc.abstractmethod
def exists(self, group_name, change_id):
"""Return whether a given revision exists.
"""
@abc.abstractmethod
def apply(self, group_name, change_id, options):
"""Apply given options on the most current configuration revision.
Update if a file with the same id already exists.
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
:param options Configuration changes.
:type options dict
"""
@abc.abstractmethod
def remove(self, group_name, change_id=None):
"""Rollback a given configuration override.
Remove the whole group if 'change_id' is None.
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
"""
def parse_updates(self):
"""Return all updates applied to the base revision as a single dict.
Return an empty dict if the base file is always the most current
version of configuration.
:returns: Updates to the base revision as a Python dict.
"""
return {}
class ImportOverrideStrategy(ConfigurationOverrideStrategy):
"""Import strategy keeps overrides in separate files that get imported
into the base configuration file which never changes itself.
An override file is simply deleted when the override is removed.
We keep two sets of override files in a separate directory.
- User overrides - configuration overrides applied by the user via the
Trove API.
- System overrides - 'internal' configuration changes applied by the
guestagent.
The name format of override files is: '<set prefix>-<n>-<group name>.<ext>'
where 'set prefix' is to used to order user/system sets,
'n' is an index used to keep track of the order in which overrides
within their set got applied.
"""
FILE_NAME_PATTERN = '^%s-([0-9]+)-%s\.%s$'
def __init__(self, revision_dir, revision_ext):
"""
:param revision_dir Path to the directory for import files.
:type revision_dir string
:param revision_ext Extension of revision files.
:type revision_ext string
"""
self._revision_dir = revision_dir
self._revision_ext = revision_ext
def configure(self, base_config_path, owner, group, codec, requires_root):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration and
revision files.
:type owner string
:param group Group of the configuration and
revision files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the strategy requires superuser
privileges.
:type requires_root boolean
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
def exists(self, group_name, change_id):
return self._find_revision_file(group_name, change_id) is not None
def apply(self, group_name, change_id, options):
revision_file = self._find_revision_file(group_name, change_id)
if revision_file is None:
# Create a new file.
last_revision_index = self._get_last_file_index(group_name)
revision_file = guestagent_utils.build_file_path(
self._revision_dir,
'%s-%03d-%s' % (group_name, last_revision_index + 1,
change_id),
self._revision_ext)
else:
# Update the existing file.
current = operating_system.read_file(
revision_file, codec=self._codec)
options = guestagent_utils.update_dict(options, current)
operating_system.write_file(
revision_file, options, codec=self._codec,
as_root=self._requires_root)
operating_system.chown(
revision_file, self._owner, self._group,
as_root=self._requires_root)
operating_system.chmod(
revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def remove(self, group_name, change_id=None):
removed = set()
if change_id:
# Remove a given file.
revision_file = self._find_revision_file(group_name, change_id)
if revision_file:
removed.add(revision_file)
else:
# Remove the entire group.
removed = self._collect_revision_files(group_name)
for path in removed:
operating_system.remove(path, force=True,
as_root=self._requires_root)
def parse_updates(self):
parsed_options = {}
for path in self._collect_revision_files():
options = operating_system.read_file(path, codec=self._codec)
guestagent_utils.update_dict(options, parsed_options)
return parsed_options
@property
def has_revisions(self):
"""Return True if there currently are any revision files.
"""
return len(self._collect_revision_files()) > 0
def _get_last_file_index(self, group_name):
"""Get the index of the most current file in a given group.
"""
current_files = self._collect_revision_files(group_name)
if current_files:
name_pattern = self._build_rev_name_pattern(group_name=group_name)
last_file_name = os.path.basename(current_files[-1])
last_index_match = re.match(name_pattern, last_file_name)
if last_index_match:
return int(last_index_match.group(1))
return 0
def _collect_revision_files(self, group_name='.+'):
"""Collect and return a sorted list of paths to existing revision
files. The files should be sorted in the same order in which
they were applied.
"""
name_pattern = self._build_rev_name_pattern(group_name=group_name)
return sorted(operating_system.list_files_in_directory(
self._revision_dir, recursive=False, pattern=name_pattern))
def _find_revision_file(self, group_name, change_id):
name_pattern = self._build_rev_name_pattern(group_name, change_id)
found = operating_system.list_files_in_directory(
self._revision_dir, recursive=False, pattern=name_pattern)
return next(iter(found), None)
def _build_rev_name_pattern(self, group_name='.+', change_id='.+'):
return self.FILE_NAME_PATTERN % (group_name, change_id,
self._revision_ext)
class OneFileOverrideStrategy(ConfigurationOverrideStrategy):
"""This is a strategy for datastores that do not support multiple
configuration files.
It uses the Import Strategy to keep the overrides internally.
When an override is applied or removed a new configuration file is
generated by applying all changes on a saved-off base revision.
"""
BASE_REVISION_NAME = 'base'
REVISION_EXT = 'rev'
def __init__(self, revision_dir):
"""
:param revision_dir Path to the directory for import files.
:type revision_dir string
"""
self._revision_dir = revision_dir
self._import_strategy = ImportOverrideStrategy(revision_dir,
self.REVISION_EXT)
def configure(self, base_config_path, owner, group, codec, requires_root):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration and
revision files.
:type owner string
:param group Group of the configuration and
revision files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the strategy requires superuser
privileges.
:type requires_root boolean
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
self._base_revision_file = guestagent_utils.build_file_path(
self._revision_dir, self.BASE_REVISION_NAME, self.REVISION_EXT)
self._import_strategy.configure(
base_config_path, owner, group, codec, requires_root)
def exists(self, group_name, change_id):
return self._import_strategy.exists(group_name, change_id)
def apply(self, group_name, change_id, options):
self._import_strategy.apply(group_name, change_id, options)
self._regenerate_base_configuration()
def remove(self, group_name, change_id=None):
if self._import_strategy.has_revisions:
self._import_strategy.remove(group_name, change_id=change_id)
self._regenerate_base_configuration()
if not self._import_strategy.has_revisions:
# The base revision file is no longer needed if there are no
# overrides. It will be regenerated based on the current
# configuration file on the first 'apply()'.
operating_system.remove(self._base_revision_file, force=True,
as_root=self._requires_root)
def _regenerate_base_configuration(self):
"""Gather all configuration changes and apply them in order on the base
revision. Write the results to the configuration file.
"""
if not os.path.exists(self._base_revision_file):
# Initialize the file with the current configuration contents if it
# does not exist.
operating_system.copy(
self._base_config_path, self._base_revision_file,
force=True, preserve=True, as_root=self._requires_root)
base_revision = operating_system.read_file(
self._base_revision_file, codec=self._codec)
changes = self._import_strategy.parse_updates()
updated_revision = guestagent_utils.update_dict(changes, base_revision)
operating_system.write_file(
self._base_config_path, updated_revision, codec=self._codec,
as_root=self._requires_root)
| |
#!/usr/bin/env python
# CREATED:2013-03-11 18:14:30 by Brian McFee <brm2132@columbia.edu>
# unit tests for librosa.beat
from __future__ import print_function
from nose.tools import nottest, raises, eq_
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import matplotlib
matplotlib.use('Agg')
import numpy as np
import librosa
from test_core import files, load
__EXAMPLE_FILE = 'data/test1_22050.wav'
def test_onset_strength():
def __test(infile):
DATA = load(infile)
# Compute onset envelope using the same spectrogram
onsets = librosa.onset.onset_strength(y=None,
sr=8000,
S=DATA['D'],
lag=1,
max_size=1,
center=False,
detrend=True,
aggregate=np.mean)
assert np.allclose(onsets[1:], DATA['onsetenv'][0])
pass
for infile in files('data/beat-onset-*.mat'):
yield (__test, infile)
def test_tempo():
def __test(infile):
DATA = load(infile)
# Estimate tempo from the given onset envelope
tempo = librosa.beat.estimate_tempo(DATA['onsetenv'][0],
sr=8000,
hop_length=32,
start_bpm=120.0)
assert (np.allclose(tempo, DATA['t'][0, 0]) or
np.allclose(tempo, DATA['t'][0, 1])), (tempo, DATA['t'])
for infile in files('data/beat-tempo-*.mat'):
yield (__test, infile)
@raises(librosa.ParameterError)
def test_beat_no_input():
librosa.beat.beat_track(y=None, onset_envelope=None)
def test_beat_no_onsets():
sr = 22050
hop_length = 512
duration = 30
onsets = np.zeros(duration * sr // hop_length)
tempo, beats = librosa.beat.beat_track(onset_envelope=onsets,
sr=sr,
hop_length=hop_length)
assert np.allclose(tempo, 0)
eq_(len(beats), 0)
def test_tempo_no_onsets():
sr = 22050
hop_length = 512
duration = 30
onsets = np.zeros(duration * sr // hop_length)
def __test(start_bpm):
tempo = librosa.beat.estimate_tempo(onsets, sr=sr,
hop_length=hop_length,
start_bpm=start_bpm)
eq_(tempo, start_bpm)
for start_bpm in [40, 60, 120, 240]:
yield __test, start_bpm
def test_beat():
y, sr = librosa.load(__EXAMPLE_FILE)
hop_length = 512
onset_env = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
def __test(with_audio, with_tempo, start_bpm, bpm, trim, tightness):
if with_audio:
_y = y
_ons = None
else:
_y = None
_ons = onset_env
tempo, beats = librosa.beat.beat_track(y=_y,
sr=sr,
onset_envelope=_ons,
hop_length=hop_length,
start_bpm=start_bpm,
tightness=tightness,
trim=trim,
bpm=bpm)
assert tempo >= 0
if len(beats) > 0:
assert beats.min() >= 0
assert beats.max() <= len(onset_env)
for with_audio in [False, True]:
for with_tempo in [False, True]:
for trim in [False, True]:
for start_bpm in [-20, 0, 60, 120, 240]:
for bpm in [-20, 0, None, 150, 360]:
for tightness in [0, 100, 10000]:
if (tightness <= 0 or
(bpm is not None and bpm <= 0) or
(start_bpm is not None and bpm is None and start_bpm <= 0)):
tf = raises(librosa.ParameterError)(__test)
else:
tf = __test
yield (tf, with_audio, with_tempo,
start_bpm, bpm, trim, tightness)
def test_beat_units():
def __test(units, hop_length, y, sr):
tempo, b1 = librosa.beat.beat_track(y=y, sr=sr, hop_length=hop_length)
_, b2 = librosa.beat.beat_track(y=y, sr=sr, hop_length=hop_length,
units=units)
t1 = librosa.frames_to_time(b1, sr=sr, hop_length=hop_length)
if units == 'time':
t2 = b2
elif units == 'samples':
t2 = librosa.samples_to_time(b2, sr=sr)
elif units == 'frames':
t2 = librosa.frames_to_time(b2, sr=sr, hop_length=hop_length)
assert np.allclose(t1, t2)
for sr in [None, 11025]:
y, sr = librosa.load(__EXAMPLE_FILE, sr=sr)
for hop_length in [512, 1024]:
for units in ['frames', 'time', 'samples']:
yield __test, units, hop_length, y, sr
yield raises(librosa.ParameterError)(__test), 'bad units', hop_length, y, sr
# Beat tracking regression test is no longer enabled due to librosa's
# corrections
@nottest
def deprecated_test_beat():
def __test(infile):
DATA = load(infile)
(bpm, beats) = librosa.beat.beat_track(y=None,
sr=8000,
hop_length=32,
onset_envelope=DATA['onsetenv'][0])
beat_times = librosa.frames_to_time(beats, sr=8000, hop_length=32)
assert np.allclose(beat_times, DATA['beats'])
for infile in files('data/beat-beat-*.mat'):
yield (__test, infile)
| |
import gensim
import sklearn, sklearn.datasets
import sklearn.naive_bayes, sklearn.linear_model, sklearn.svm, sklearn.neighbors, sklearn.ensemble
import matplotlib.pyplot as plt
import scipy.sparse
import numpy as np
import time, re
import pickle
# Helpers to process text documents.
class TextDataset(object):
def clean_text(self, num='substitute'):
# TODO: stemming, lemmatisation
for i,doc in enumerate(self.documents):
# Digits.
if num is 'spell':
doc = doc.replace('0', ' zero ')
doc = doc.replace('1', ' one ')
doc = doc.replace('2', ' two ')
doc = doc.replace('3', ' three ')
doc = doc.replace('4', ' four ')
doc = doc.replace('5', ' five ')
doc = doc.replace('6', ' six ')
doc = doc.replace('7', ' seven ')
doc = doc.replace('8', ' eight ')
doc = doc.replace('9', ' nine ')
elif num is 'substitute':
# All numbers are equal. Useful for embedding (countable words) ?
doc = re.sub('(\\d+)', ' NUM ', doc)
elif num is 'remove':
# Numbers are uninformative (they are all over the place). Useful for bag-of-words ?
# But maybe some kind of documents contain more numbers, e.g. finance.
# Some documents are indeed full of numbers. At least in 20NEWS.
doc = re.sub('[0-9]', ' ', doc)
# Remove everything except a-z characters and single space.
doc = doc.replace('$', ' dollar ')
doc = doc.lower()
doc = re.sub('[^a-z]', ' ', doc)
doc = ' '.join(doc.split()) # same as doc = re.sub('\s{2,}', ' ', doc)
self.documents[i] = doc
def vectorize(self, **params):
# TODO: count or tf-idf. Or in normalize ?
vectorizer = sklearn.feature_extraction.text.CountVectorizer(**params)
self.data = vectorizer.fit_transform(self.documents)
self.vocab = vectorizer.get_feature_names()
assert len(self.vocab) == self.data.shape[1]
def data_info(self, show_classes=False):
N, M = self.data.shape
sparsity = self.data.nnz / N / M * 100
print('N = {} documents, M = {} words, sparsity={:.4f}%'.format(N, M, sparsity))
if show_classes:
for i in range(len(self.class_names)):
num = sum(self.labels == i)
print(' {:5d} documents in class {:2d} ({})'.format(num, i, self.class_names[i]))
def show_document(self, i):
label = self.labels[i]
name = self.class_names[label]
try:
text = self.documents[i]
wc = len(text.split())
except AttributeError:
text = None
wc = 'N/A'
print('document {}: label {} --> {}, {} words'.format(i, label, name, wc))
try:
vector = self.data[i,:]
for j in range(vector.shape[1]):
if vector[0,j] != 0:
print(' {:.2f} "{}" ({})'.format(vector[0,j], self.vocab[j], j))
except AttributeError:
pass
return text
def keep_documents(self, idx):
"""Keep the documents given by the index, discard the others."""
self.documents = [self.documents[i] for i in idx]
self.labels = self.labels[idx]
self.data = self.data[idx,:]
def keep_words(self, idx):
"""Keep the documents given by the index, discard the others."""
self.data = self.data[:,idx]
self.vocab = [self.vocab[i] for i in idx]
try:
self.embeddings = self.embeddings[idx,:]
except AttributeError:
pass
def remove_short_documents(self, nwords, vocab='selected'):
"""Remove a document if it contains less than nwords."""
if vocab is 'selected':
# Word count with selected vocabulary.
wc = self.data.sum(axis=1)
wc = np.squeeze(np.asarray(wc))
elif vocab is 'full':
# Word count with full vocabulary.
wc = np.empty(len(self.documents), dtype=np.int)
for i,doc in enumerate(self.documents):
wc[i] = len(doc.split())
idx = np.argwhere(wc >= nwords).squeeze()
self.keep_documents(idx)
return wc
def keep_top_words(self, M, Mprint=20):
"""Keep in the vocaluary the M words who appear most often."""
freq = self.data.sum(axis=0)
freq = np.squeeze(np.asarray(freq))
idx = np.argsort(freq)[::-1]
idx = idx[:M]
self.keep_words(idx)
print('most frequent words')
for i in range(Mprint):
print(' {:3d}: {:10s} {:6d} counts'.format(i, self.vocab[i], freq[idx][i]))
return freq[idx]
def normalize(self, norm='l1'):
"""Normalize data to unit length."""
# TODO: TF-IDF.
data = self.data.astype(np.float64)
self.data = sklearn.preprocessing.normalize(data, axis=1, norm=norm)
def embed(self, filename=None, size=100):
"""Embed the vocabulary using pre-trained vectors."""
if filename:
model = gensim.models.Word2Vec.load_word2vec_format(filename, binary=True)
size = model.vector_size
else:
class Sentences(object):
def __init__(self, documents):
self.documents = documents
def __iter__(self):
for document in self.documents:
yield document.split()
model = gensim.models.Word2Vec(Sentences(self.documents), size)
self.embeddings = np.empty((len(self.vocab), size))
keep = []
not_found = 0
for i,word in enumerate(self.vocab):
try:
self.embeddings[i,:] = model[word]
keep.append(i)
except KeyError:
not_found += 1
print('{} words not found in corpus'.format(not_found, i))
self.keep_words(keep)
class Text20News(TextDataset):
def __init__(self, **params):
dataset = sklearn.datasets.fetch_20newsgroups(**params)
self.documents = dataset.data
self.labels = dataset.target
self.class_names = dataset.target_names
assert max(self.labels) + 1 == len(self.class_names)
N, C = len(self.documents), len(self.class_names)
print('N = {} documents, C = {} classes'.format(N, C))
class Posts(TextDataset):
def __init__(self, **params):
#dataset = sklearn.datasets.fetch_20newsgroups(**params)
#self.documents = dataset.data
#self.labels = dataset.target
#self.class_names = dataset.target_names
dataset = pickle.load(open(params['data_home'], 'rb'))
if params['subset'] == 'train': dataset = dataset['train']
else: dataset= dataset['test']
self.documents = np.array(dataset['data'])
self.labels = np.array(dataset['labels'])
self.class_names = ['social_media', 'news', 'weblog']
assert max(self.labels) + 1 == len(self.class_names)
N, C = len(self.documents), len(self.class_names)
print('N = {} documents, C = {} classes'.format(N, C))
class TextRCV1(TextDataset):
def __init__(self, **params):
dataset = sklearn.datasets.fetch_rcv1(**params)
self.data = dataset.data
self.target = dataset.target
self.class_names = dataset.target_names
assert len(self.class_names) == 103 # 103 categories according to LYRL2004
N, C = self.target.shape
assert C == len(self.class_names)
print('N = {} documents, C = {} classes'.format(N, C))
def remove_classes(self, keep):
## Construct a lookup table for labels.
labels_row = []
labels_col = []
class_lookup = {}
for i,name in enumerate(self.class_names):
class_lookup[name] = i
self.class_names = keep
# Index of classes to keep.
idx_keep = np.empty(len(keep))
for i,cat in enumerate(keep):
idx_keep[i] = class_lookup[cat]
self.target = self.target[:,idx_keep]
assert self.target.shape[1] == len(keep)
def show_doc_per_class(self, print_=False):
"""Number of documents per class."""
docs_per_class = np.array(self.target.astype(np.uint64).sum(axis=0)).squeeze()
print('categories ({} assignments in total)'.format(docs_per_class.sum()))
if print_:
for i,cat in enumerate(self.class_names):
print(' {:5s}: {:6d} documents'.format(cat, docs_per_class[i]))
plt.figure(figsize=(17,5))
plt.plot(sorted(docs_per_class[::-1]),'.')
def show_classes_per_doc(self):
"""Number of classes per document."""
classes_per_doc = np.array(self.target.sum(axis=1)).squeeze()
plt.figure(figsize=(17,5))
plt.plot(sorted(classes_per_doc[::-1]),'.')
def select_documents(self):
classes_per_doc = np.array(self.target.sum(axis=1)).squeeze()
self.target = self.target[classes_per_doc==1]
self.data = self.data[classes_per_doc==1, :]
# Convert labels from indicator form to single value.
N, C = self.target.shape
target = self.target.tocoo()
self.labels = target.col
assert self.labels.min() == 0
assert self.labels.max() == C - 1
# Bruna and Dropout used 2 * 201369 = 402738 documents. Probably the difference btw v1 and v2.
#return classes_per_doc
### Helpers to quantify classifier's quality.
def baseline(train_data, train_labels, test_data, test_labels, omit=[]):
"""Train various classifiers to get a baseline."""
clf, train_accuracy, test_accuracy, train_f1, test_f1, exec_time = [], [], [], [], [], []
clf.append(sklearn.neighbors.KNeighborsClassifier(n_neighbors=10))
clf.append(sklearn.linear_model.LogisticRegression())
clf.append(sklearn.naive_bayes.BernoulliNB(alpha=.01))
clf.append(sklearn.ensemble.RandomForestClassifier())
clf.append(sklearn.naive_bayes.MultinomialNB(alpha=.01))
clf.append(sklearn.linear_model.RidgeClassifier())
clf.append(sklearn.svm.LinearSVC())
for i,c in enumerate(clf):
if i not in omit:
t_start = time.process_time()
c.fit(train_data, train_labels)
train_pred = c.predict(train_data)
test_pred = c.predict(test_data)
train_accuracy.append('{:5.2f}'.format(100*sklearn.metrics.accuracy_score(train_labels, train_pred)))
test_accuracy.append('{:5.2f}'.format(100*sklearn.metrics.accuracy_score(test_labels, test_pred)))
train_f1.append('{:5.2f}'.format(100*sklearn.metrics.f1_score(train_labels, train_pred, average='weighted')))
test_f1.append('{:5.2f}'.format(100*sklearn.metrics.f1_score(test_labels, test_pred, average='weighted')))
exec_time.append('{:5.2f}'.format(time.process_time() - t_start))
print('Train accuracy: {}'.format(' '.join(train_accuracy)))
print('Test accuracy: {}'.format(' '.join(test_accuracy)))
print('Train F1 (weighted): {}'.format(' '.join(train_f1)))
print('Test F1 (weighted): {}'.format(' '.join(test_f1)))
print('Execution time: {}'.format(' '.join(exec_time)))
def grid_search(params, grid_params, train_data, train_labels, val_data,
val_labels, test_data, test_labels, model):
"""Explore the hyper-parameter space with an exhaustive grid search."""
params = params.copy()
train_accuracy, test_accuracy, train_f1, test_f1 = [], [], [], []
grid = sklearn.grid_search.ParameterGrid(grid_params)
print('grid search: {} combinations to evaluate'.format(len(grid)))
for grid_params in grid:
params.update(grid_params)
name = '{}'.format(grid)
print('\n\n {} \n\n'.format(grid_params))
m = model(params)
m.fit(train_data, train_labels, val_data, val_labels)
string, accuracy, f1, loss = m.evaluate(train_data, train_labels)
train_accuracy.append('{:5.2f}'.format(accuracy)); train_f1.append('{:5.2f}'.format(f1))
print('train {}'.format(string))
string, accuracy, f1, loss = m.evaluate(test_data, test_labels)
test_accuracy.append('{:5.2f}'.format(accuracy)); test_f1.append('{:5.2f}'.format(f1))
print('test {}'.format(string))
print('\n\n')
print('Train accuracy: {}'.format(' '.join(train_accuracy)))
print('Test accuracy: {}'.format(' '.join(test_accuracy)))
print('Train F1 (weighted): {}'.format(' '.join(train_f1)))
print('Test F1 (weighted): {}'.format(' '.join(test_f1)))
for i,grid_params in enumerate(grid):
print('{} --> {} {} {} {}'.format(grid_params, train_accuracy[i], test_accuracy[i], train_f1[i], test_f1[i]))
class model_perf(object):
def __init__(s):
s.names, s.params = set(), {}
s.fit_accuracies, s.fit_losses, s.fit_time = {}, {}, {}
s.train_accuracy, s.train_f1, s.train_loss = {}, {}, {}
s.test_accuracy, s.test_f1, s.test_loss = {}, {}, {}
def test(s, model, name, params, train_data, train_labels, val_data, val_labels, test_data, test_labels):
s.params[name] = params
s.fit_accuracies[name], s.fit_losses[name], s.fit_time[name] = \
model.fit(train_data, train_labels, val_data, val_labels)
string, s.train_accuracy[name], s.train_f1[name], s.train_loss[name] = \
model.evaluate(train_data, train_labels)
print('train {}'.format(string))
string, s.test_accuracy[name], s.test_f1[name], s.test_loss[name] = \
model.evaluate(test_data, test_labels)
print('test {}'.format(string))
s.names.add(name)
def show(s, fontsize=None):
if fontsize:
plt.rc('pdf', fonttype=42)
plt.rc('ps', fonttype=42)
plt.rc('font', size=fontsize) # controls default text sizes
plt.rc('axes', titlesize=fontsize) # fontsize of the axes title
plt.rc('axes', labelsize=fontsize) # fontsize of the x any y labels
plt.rc('xtick', labelsize=fontsize) # fontsize of the tick labels
plt.rc('ytick', labelsize=fontsize) # fontsize of the tick labels
plt.rc('legend', fontsize=fontsize) # legend fontsize
plt.rc('figure', titlesize=fontsize) # size of the figure title
print(' accuracy F1 loss time [ms] name')
print('test train test train test train')
for name in sorted(s.names):
print('{:5.2f} {:5.2f} {:5.2f} {:5.2f} {:.2e} {:.2e} {:3.0f} {}'.format(
s.test_accuracy[name], s.train_accuracy[name],
s.test_f1[name], s.train_f1[name],
s.test_loss[name], s.train_loss[name], s.fit_time[name]*1000, name))
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
for name in sorted(s.names):
steps = np.arange(len(s.fit_accuracies[name])) + 1
steps *= s.params[name]['eval_frequency']
ax[0].plot(steps, s.fit_accuracies[name], '.-', label=name)
ax[1].plot(steps, s.fit_losses[name], '.-', label=name)
ax[0].set_xlim(min(steps), max(steps))
ax[1].set_xlim(min(steps), max(steps))
ax[0].set_xlabel('step')
ax[1].set_xlabel('step')
ax[0].set_ylabel('validation accuracy')
ax[1].set_ylabel('training loss')
ax[0].legend(loc='lower right')
ax[1].legend(loc='upper right')
#fig.savefig('training.pdf')
| |
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
import errno
import os
import pyglet
from pyglet.app.xlib import XlibSelectDevice
from .base import Device, Control, RelativeAxis, AbsoluteAxis, Button, Joystick
from .base import DeviceOpenException
from .evdev_constants import *
from .evdev_constants import _rel_raw_names, _abs_raw_names, _key_raw_names
c = pyglet.lib.load_library('c')
_IOC_NRBITS = 8
_IOC_TYPEBITS = 8
_IOC_SIZEBITS = 14
_IOC_DIRBITS = 2
_IOC_NRMASK = ((1 << _IOC_NRBITS)-1)
_IOC_TYPEMASK = ((1 << _IOC_TYPEBITS)-1)
_IOC_SIZEMASK = ((1 << _IOC_SIZEBITS)-1)
_IOC_DIRMASK = ((1 << _IOC_DIRBITS)-1)
_IOC_NRSHIFT = 0
_IOC_TYPESHIFT = (_IOC_NRSHIFT+_IOC_NRBITS)
_IOC_SIZESHIFT = (_IOC_TYPESHIFT+_IOC_TYPEBITS)
_IOC_DIRSHIFT = (_IOC_SIZESHIFT+_IOC_SIZEBITS)
_IOC_NONE = 0
_IOC_WRITE = 1
_IOC_READ = 2
def _IOC(dir, type, nr, size):
return ((dir << _IOC_DIRSHIFT) |
(type << _IOC_TYPESHIFT) |
(nr << _IOC_NRSHIFT) |
(size << _IOC_SIZESHIFT))
def _IOR(type, nr, struct):
request = _IOC(_IOC_READ, ord(type), nr, ctypes.sizeof(struct))
def f(fileno):
buffer = struct()
if c.ioctl(fileno, request, ctypes.byref(buffer)) < 0:
err = ctypes.c_int.in_dll(c, 'errno').value
raise OSError(err, errno.errorcode[err])
return buffer
return f
def _IOR_len(type, nr):
def f(fileno, buffer):
request = _IOC(_IOC_READ, ord(type), nr, ctypes.sizeof(buffer))
if c.ioctl(fileno, request, ctypes.byref(buffer)) < 0:
err = ctypes.c_int.in_dll(c, 'errno').value
raise OSError(err, errno.errorcode[err])
return buffer
return f
def _IOR_str(type, nr):
g = _IOR_len(type, nr)
def f(fileno, len=256):
return g(fileno, ctypes.create_string_buffer(len)).value
return f
time_t = ctypes.c_long
suseconds_t = ctypes.c_long
class timeval(ctypes.Structure):
_fields_ = (
('tv_sec', time_t),
('tv_usec', suseconds_t)
)
class input_event(ctypes.Structure):
_fields_ = (
('time', timeval),
('type', ctypes.c_uint16),
('code', ctypes.c_uint16),
('value', ctypes.c_int32)
)
class input_id(ctypes.Structure):
_fields_ = (
('bustype', ctypes.c_uint16),
('vendor', ctypes.c_uint16),
('product', ctypes.c_uint16),
('version', ctypes.c_uint16),
)
class input_absinfo(ctypes.Structure):
_fields_ = (
('value', ctypes.c_int32),
('minimum', ctypes.c_int32),
('maximum', ctypes.c_int32),
('fuzz', ctypes.c_int32),
('flat', ctypes.c_int32),
)
EVIOCGVERSION = _IOR('E', 0x01, ctypes.c_int)
EVIOCGID = _IOR('E', 0x02, input_id)
EVIOCGNAME = _IOR_str('E', 0x06)
EVIOCGPHYS = _IOR_str('E', 0x07)
EVIOCGUNIQ = _IOR_str('E', 0x08)
def EVIOCGBIT(fileno, ev, buffer):
return _IOR_len('E', 0x20 + ev)(fileno, buffer)
def EVIOCGABS(fileno, abs):
buffer = input_absinfo()
return _IOR_len('E', 0x40 + abs)(fileno, buffer)
def get_set_bits(bytes):
bits = set()
j = 0
for byte in bytes:
for i in range(8):
if byte & 1:
bits.add(j + i)
byte >>= 1
j += 8
return bits
_abs_names = {
ABS_X: AbsoluteAxis.X,
ABS_Y: AbsoluteAxis.Y,
ABS_Z: AbsoluteAxis.Z,
ABS_RX: AbsoluteAxis.RX,
ABS_RY: AbsoluteAxis.RY,
ABS_RZ: AbsoluteAxis.RZ,
ABS_HAT0X: AbsoluteAxis.HAT_X,
ABS_HAT0Y: AbsoluteAxis.HAT_Y,
}
_rel_names = {
REL_X: RelativeAxis.X,
REL_Y: RelativeAxis.Y,
REL_Z: RelativeAxis.Z,
REL_RX: RelativeAxis.RX,
REL_RY: RelativeAxis.RY,
REL_RZ: RelativeAxis.RZ,
REL_WHEEL: RelativeAxis.WHEEL,
}
def _create_control(fileno, event_type, event_code):
if event_type == EV_ABS:
raw_name = _abs_raw_names.get(event_code, 'EV_ABS(%x)' % event_code)
name = _abs_names.get(event_code)
absinfo = EVIOCGABS(fileno, event_code)
value = absinfo.value
min = absinfo.minimum
max = absinfo.maximum
control = AbsoluteAxis(name, min, max, raw_name)
control._set_value(value)
if name == 'hat_y':
control.inverted = True
elif event_type == EV_REL:
raw_name = _rel_raw_names.get(event_code, 'EV_REL(%x)' % event_code)
name = _rel_names.get(event_code)
# TODO min/max?
control = RelativeAxis(name, raw_name)
elif event_type == EV_KEY:
raw_name = _key_raw_names.get(event_code, 'EV_KEY(%x)' % event_code)
name = None
control = Button(name, raw_name)
else:
value = min = max = 0 # TODO
return None
control._event_type = event_type
control._event_code = event_code
return control
def _create_joystick(device):
# Look for something with an ABS X and ABS Y axis, and a joystick 0 button
have_x = False
have_y = False
have_button = False
for control in device.controls:
if control._event_type == EV_ABS and control._event_code == ABS_X:
have_x = True
elif control._event_type == EV_ABS and control._event_code == ABS_Y:
have_y = True
elif control._event_type == EV_KEY and \
control._event_code in (BTN_JOYSTICK, BTN_GAMEPAD):
have_button = True
if not (have_x and have_y and have_button):
return
return Joystick(device)
event_types = {
EV_KEY: KEY_MAX,
EV_REL: REL_MAX,
EV_ABS: ABS_MAX,
EV_MSC: MSC_MAX,
EV_LED: LED_MAX,
EV_SND: SND_MAX,
}
class EvdevDevice(XlibSelectDevice, Device):
_fileno = None
def __init__(self, display, filename):
self._filename = filename
fileno = os.open(filename, os.O_RDONLY)
#event_version = EVIOCGVERSION(fileno).value
id = EVIOCGID(fileno)
self.id_bustype = id.bustype
self.id_vendor = hex(id.vendor)
self.id_product = hex(id.product)
self.id_version = id.version
name = EVIOCGNAME(fileno)
try:
name = name.decode('utf-8')
except UnicodeDecodeError:
try:
name = name.decode('latin-1')
except UnicodeDecodeError:
pass
try:
self.phys = EVIOCGPHYS(fileno)
except OSError:
self.phys = ''
try:
self.uniq = EVIOCGUNIQ(fileno)
except OSError:
self.uniq = ''
self.controls = []
self.control_map = {}
event_types_bits = (ctypes.c_byte * 4)()
EVIOCGBIT(fileno, 0, event_types_bits)
for event_type in get_set_bits(event_types_bits):
if event_type not in event_types:
continue
max_code = event_types[event_type]
nbytes = max_code // 8 + 1
event_codes_bits = (ctypes.c_byte * nbytes)()
EVIOCGBIT(fileno, event_type, event_codes_bits)
for event_code in get_set_bits(event_codes_bits):
control = _create_control(fileno, event_type, event_code)
if control:
self.control_map[(event_type, event_code)] = control
self.controls.append(control)
os.close(fileno)
super(EvdevDevice, self).__init__(display, name)
def open(self, window=None, exclusive=False):
super(EvdevDevice, self).open(window, exclusive)
try:
self._fileno = os.open(self._filename, os.O_RDONLY | os.O_NONBLOCK)
except OSError as e:
raise DeviceOpenException(e)
pyglet.app.platform_event_loop._select_devices.add(self)
def close(self):
super(EvdevDevice, self).close()
if not self._fileno:
return
pyglet.app.platform_event_loop._select_devices.remove(self)
os.close(self._fileno)
self._fileno = None
def get_controls(self):
return self.controls
# XlibSelectDevice interface
def fileno(self):
return self._fileno
def poll(self):
# TODO
return False
def select(self):
if not self._fileno:
return
events = (input_event * 64)()
bytes = c.read(self._fileno, events, ctypes.sizeof(events))
if bytes < 0:
return
n_events = bytes // ctypes.sizeof(input_event)
for event in events[:n_events]:
try:
control = self.control_map[(event.type, event.code)]
control._set_value(event.value)
except KeyError:
pass
_devices = {}
def get_devices(display=None):
base = '/dev/input'
for filename in os.listdir(base):
if filename.startswith('event'):
path = os.path.join(base, filename)
if path in _devices:
continue
try:
_devices[path] = EvdevDevice(display, path)
except OSError:
pass
return list(_devices.values())
def get_joysticks(display=None):
return [_f for _f in [_create_joystick(d) for d in get_devices(display)] if _f]
| |
# Natural Language Toolkit: TextTiling
#
# Copyright (C) 2001-2017 NLTK Project
# Author: George Boutsioukis
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import re
import math
try:
import numpy
except ImportError:
pass
from nltk.tokenize.api import TokenizerI
BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1
LC, HC = 0, 1
DEFAULT_SMOOTHING = [0]
class TextTilingTokenizer(TokenizerI):
"""Tokenize a document into topical sections using the TextTiling algorithm.
This algorithm detects subtopic shifts based on the analysis of lexical
co-occurrence patterns.
The process starts by tokenizing the text into pseudosentences of
a fixed size w. Then, depending on the method used, similarity
scores are assigned at sentence gaps. The algorithm proceeds by
detecting the peak differences between these scores and marking
them as boundaries. The boundaries are normalized to the closest
paragraph break and the segmented text is returned.
:param w: Pseudosentence size
:type w: int
:param k: Size (in sentences) of the block used in the block comparison method
:type k: int
:param similarity_method: The method used for determining similarity scores:
`BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`.
:type similarity_method: constant
:param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus)
:type stopwords: list(str)
:param smoothing_method: The method used for smoothing the score plot:
`DEFAULT_SMOOTHING` (default)
:type smoothing_method: constant
:param smoothing_width: The width of the window used by the smoothing method
:type smoothing_width: int
:param smoothing_rounds: The number of smoothing passes
:type smoothing_rounds: int
:param cutoff_policy: The policy used to determine the number of boundaries:
`HC` (default) or `LC`
:type cutoff_policy: constant
>>> from nltk.corpus import brown
>>> tt = TextTilingTokenizer(demo_mode=True)
>>> text = brown.raw()[:10000]
>>> s, ss, d, b = tt.tokenize(text)
>>> b
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]
"""
def __init__(self,
w=20,
k=10,
similarity_method=BLOCK_COMPARISON,
stopwords=None,
smoothing_method=DEFAULT_SMOOTHING,
smoothing_width=2,
smoothing_rounds=1,
cutoff_policy=HC,
demo_mode=False):
if stopwords is None:
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
self.__dict__.update(locals())
del self.__dict__['self']
def tokenize(self, text):
"""Return a tokenized copy of *text*, where each "token" represents
a separate topic."""
lowercase_text = text.lower()
paragraph_breaks = self._mark_paragraph_breaks(text)
text_length = len(lowercase_text)
# Tokenization step starts here
# Remove punctuation
nopunct_text = ''.join(c for c in lowercase_text
if re.match("[a-z\-\' \n\t]", c))
nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text)
tokseqs = self._divide_to_tokensequences(nopunct_text)
# The morphological stemming step mentioned in the TextTile
# paper is not implemented. A comment in the original C
# implementation states that it offers no benefit to the
# process. It might be interesting to test the existing
# stemmers though.
#words = _stem_words(words)
# Filter stopwords
for ts in tokseqs:
ts.wrdindex_list = [wi for wi in ts.wrdindex_list
if wi[0] not in self.stopwords]
token_table = self._create_token_table(tokseqs, nopunct_par_breaks)
# End of the Tokenization step
# Lexical score determination
if self.similarity_method == BLOCK_COMPARISON:
gap_scores = self._block_comparison(tokseqs, token_table)
elif self.similarity_method == VOCABULARY_INTRODUCTION:
raise NotImplementedError("Vocabulary introduction not implemented")
if self.smoothing_method == DEFAULT_SMOOTHING:
smooth_scores = self._smooth_scores(gap_scores)
# End of Lexical score Determination
# Boundary identification
depth_scores = self._depth_scores(smooth_scores)
segment_boundaries = self._identify_boundaries(depth_scores)
normalized_boundaries = self._normalize_boundaries(text,
segment_boundaries,
paragraph_breaks)
# End of Boundary Identification
segmented_text = []
prevb = 0
for b in normalized_boundaries:
if b == 0:
continue
segmented_text.append(text[prevb:b])
prevb = b
if prevb < text_length: # append any text that may be remaining
segmented_text.append(text[prevb:])
if not segmented_text:
segmented_text = [text]
if self.demo_mode:
return gap_scores, smooth_scores, depth_scores, segment_boundaries
return segmented_text
def _block_comparison(self, tokseqs, token_table):
"Implements the block comparison method"
def blk_frq(tok, block):
ts_occs = filter(lambda o: o[0] in block,
token_table[tok].ts_occurences)
freq = sum([tsocc[1] for tsocc in ts_occs])
return freq
gap_scores = []
numgaps = len(tokseqs)-1
for curr_gap in range(numgaps):
score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0
score = 0.0
#adjust window size for boundary conditions
if curr_gap < self.k-1:
window_size = curr_gap + 1
elif curr_gap > numgaps-self.k:
window_size = numgaps - curr_gap
else:
window_size = self.k
b1 = [ts.index
for ts in tokseqs[curr_gap-window_size+1 : curr_gap+1]]
b2 = [ts.index
for ts in tokseqs[curr_gap+1 : curr_gap+window_size+1]]
for t in token_table:
score_dividend += blk_frq(t, b1)*blk_frq(t, b2)
score_divisor_b1 += blk_frq(t, b1)**2
score_divisor_b2 += blk_frq(t, b2)**2
try:
score = score_dividend/math.sqrt(score_divisor_b1*
score_divisor_b2)
except ZeroDivisionError:
pass # score += 0.0
gap_scores.append(score)
return gap_scores
def _smooth_scores(self, gap_scores):
"Wraps the smooth function from the SciPy Cookbook"
return list(smooth(numpy.array(gap_scores[:]),
window_len = self.smoothing_width+1))
def _mark_paragraph_breaks(self, text):
"""Identifies indented text or line breaks as the beginning of
paragraphs"""
MIN_PARAGRAPH = 100
pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*")
matches = pattern.finditer(text)
last_break = 0
pbreaks = [0]
for pb in matches:
if pb.start()-last_break < MIN_PARAGRAPH:
continue
else:
pbreaks.append(pb.start())
last_break = pb.start()
return pbreaks
def _divide_to_tokensequences(self, text):
"Divides the text into pseudosentences of fixed size"
w = self.w
wrdindex_list = []
matches = re.finditer("\w+", text)
for match in matches:
wrdindex_list.append((match.group(), match.start()))
return [TokenSequence(i/w, wrdindex_list[i:i+w])
for i in range(0, len(wrdindex_list), w)]
def _create_token_table(self, token_sequences, par_breaks):
"Creates a table of TokenTableFields"
token_table = {}
current_par = 0
current_tok_seq = 0
pb_iter = par_breaks.__iter__()
current_par_break = next(pb_iter)
if current_par_break == 0:
try:
current_par_break = next(pb_iter) #skip break at 0
except StopIteration:
raise ValueError(
"No paragraph breaks were found(text too short perhaps?)"
)
for ts in token_sequences:
for word, index in ts.wrdindex_list:
try:
while index > current_par_break:
current_par_break = next(pb_iter)
current_par += 1
except StopIteration:
#hit bottom
pass
if word in token_table:
token_table[word].total_count += 1
if token_table[word].last_par != current_par:
token_table[word].last_par = current_par
token_table[word].par_count += 1
if token_table[word].last_tok_seq != current_tok_seq:
token_table[word].last_tok_seq = current_tok_seq
token_table[word]\
.ts_occurences.append([current_tok_seq,1])
else:
token_table[word].ts_occurences[-1][1] += 1
else: #new word
token_table[word] = TokenTableField(first_pos=index,
ts_occurences= \
[[current_tok_seq,1]],
total_count=1,
par_count=1,
last_par=current_par,
last_tok_seq= \
current_tok_seq)
current_tok_seq += 1
return token_table
def _identify_boundaries(self, depth_scores):
"""Identifies boundaries at the peaks of similarity score
differences"""
boundaries = [0 for x in depth_scores]
avg = sum(depth_scores)/len(depth_scores)
stdev = numpy.std(depth_scores)
#SB: what is the purpose of this conditional?
if self.cutoff_policy == LC:
cutoff = avg-stdev/2.0
else:
cutoff = avg-stdev/2.0
depth_tuples = sorted(zip(depth_scores, range(len(depth_scores))))
depth_tuples.reverse()
hp = list(filter(lambda x:x[0]>cutoff, depth_tuples))
for dt in hp:
boundaries[dt[1]] = 1
for dt2 in hp: #undo if there is a boundary close already
if dt[1] != dt2[1] and abs(dt2[1]-dt[1]) < 4 \
and boundaries[dt2[1]] == 1:
boundaries[dt[1]] = 0
return boundaries
def _depth_scores(self, scores):
"""Calculates the depth of each gap, i.e. the average difference
between the left and right peaks and the gap's score"""
depth_scores = [0 for x in scores]
#clip boundaries: this holds on the rule of thumb(my thumb)
#that a section shouldn't be smaller than at least 2
#pseudosentences for small texts and around 5 for larger ones.
clip = min(max(len(scores)/10, 2), 5)
index = clip
for gapscore in scores[clip:-clip]:
lpeak = gapscore
for score in scores[index::-1]:
if score >= lpeak:
lpeak = score
else:
break
rpeak = gapscore
for score in scores[index:]:
if score >= rpeak:
rpeak = score
else:
break
depth_scores[index] = lpeak + rpeak - 2 * gapscore
index += 1
return depth_scores
def _normalize_boundaries(self, text, boundaries, paragraph_breaks):
"""Normalize the boundaries identified to the original text's
paragraph breaks"""
norm_boundaries = []
char_count, word_count, gaps_seen = 0, 0, 0
seen_word = False
for char in text:
char_count += 1
if char in " \t\n" and seen_word:
seen_word = False
word_count += 1
if char not in " \t\n" and not seen_word:
seen_word=True
if gaps_seen < len(boundaries) and word_count > \
(max(gaps_seen*self.w, self.w)):
if boundaries[gaps_seen] == 1:
#find closest paragraph break
best_fit = len(text)
for br in paragraph_breaks:
if best_fit > abs(br-char_count):
best_fit = abs(br-char_count)
bestbr = br
else:
break
if bestbr not in norm_boundaries: #avoid duplicates
norm_boundaries.append(bestbr)
gaps_seen += 1
return norm_boundaries
class TokenTableField(object):
"""A field in the token table holding parameters for each token,
used later in the process"""
def __init__(self,
first_pos,
ts_occurences,
total_count=1,
par_count=1,
last_par=0,
last_tok_seq=None):
self.__dict__.update(locals())
del self.__dict__['self']
class TokenSequence(object):
"A token list with its original length and its index"
def __init__(self,
index,
wrdindex_list,
original_length=None):
original_length=original_length or len(wrdindex_list)
self.__dict__.update(locals())
del self.__dict__['self']
#Pasted from the SciPy cookbook: http://www.scipy.org/Cookbook/SignalSmooth
def smooth(x,window_len=11,window='flat'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the beginning and end part of the output signal.
:param x: the input signal
:param window_len: the dimension of the smoothing window; should be an odd integer
:param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
:return: the smoothed signal
example::
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
:see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve,
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w = numpy.ones(window_len,'d')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
def demo(text=None):
from nltk.corpus import brown
from matplotlib import pylab
tt = TextTilingTokenizer(demo_mode=True)
if text is None: text = brown.raw()[:10000]
s, ss, d, b = tt.tokenize(text)
pylab.xlabel("Sentence Gap index")
pylab.ylabel("Gap Scores")
pylab.plot(range(len(s)), s, label="Gap Scores")
pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
pylab.plot(range(len(d)), d, label="Depth scores")
pylab.stem(range(len(b)), b)
pylab.legend()
pylab.show()
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import tempfile
from io import BytesIO
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.utils.py3 import b
from libcloud.utils.py3 import basestring
from libcloud.common.types import InvalidCredsError
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.azure_blobs import AzureBlobsStorageDriver
from libcloud.storage.drivers.azure_blobs import AZURE_BLOCK_MAX_SIZE
from libcloud.storage.drivers.azure_blobs import AZURE_PAGE_CHUNK_SIZE
from libcloud.test import unittest
from libcloud.test import MockHttp, generate_random_data # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_AZURE_BLOBS_PARAMS
class AzureBlobsMockHttp(MockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('azure_blobs')
base_headers = {}
def _UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.UNAUTHORIZED])
def _list_containers_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_containers_1.xml')
else:
body = self.fixtures.load('list_containers_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container_EMPTY(self, method, url, body, headers):
if method == 'DELETE':
body = u''
return (httplib.ACCEPTED,
body,
self.base_headers,
httplib.responses[httplib.ACCEPTED])
else:
body = self.fixtures.load('list_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _new__container_INVALID_NAME(self, method, url, body, headers):
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
def _test_container(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_objects_1.xml')
else:
body = self.fixtures.load('list_objects_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container100(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_container200(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test_container200_test(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['content-length'] = '12345'
headers['content-type'] = 'application/zip'
headers['x-ms-blob-type'] = 'Block'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-rabbits'] = 'monkeys'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test2_test_list_containers(self, method, url, body, headers):
# test_get_object
body = self.fixtures.load('list_containers.xml')
headers = {'content-type': 'application/zip',
'etag': '"e31208wqsdoj329jd"',
'x-amz-meta-rabbits': 'monkeys',
'content-length': '12345',
'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT'
}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _new_container_ALREADY_EXISTS(self, method, url, body, headers):
# test_create_container
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.CONFLICT])
def _new_container(self, method, url, body, headers):
# test_create_container, test_delete_container
headers = {}
if method == 'PUT':
status = httplib.CREATED
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
elif method == 'DELETE':
status = httplib.NO_CONTENT
return (status,
body,
headers,
httplib.responses[status])
def _new_container_DOESNT_EXIST(self, method, url, body, headers):
# test_delete_container
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_NOT_FOUND(self, method, url, body, headers):
# test_delete_container_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body,
headers):
# test_delete_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_foo_bar_object_DELETE(self, method, url, body, headers):
# test_delete_object
return (httplib.ACCEPTED,
body,
headers,
httplib.responses[httplib.ACCEPTED])
def _foo_bar_container_foo_test_upload(self, method, url, body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_block(self, method, url,
body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_page(self, method, url,
body, headers):
# test_upload_object_success
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_blocklist(self, method, url,
body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_lease(self, method, url,
body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
action = headers['x-ms-lease-action']
rheaders = {'x-ms-lease-id': 'someleaseid'}
body = ''
if action == 'acquire':
return (httplib.CREATED,
body,
rheaders,
httplib.responses[httplib.CREATED])
else:
if headers.get('x-ms-lease-id', None) != 'someleaseid':
return (httplib.BAD_REQUEST,
body,
rheaders,
httplib.responses[httplib.BAD_REQUEST])
return (httplib.OK,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_INVALID_HASH(self, method, url,
body, headers):
# test_upload_object_invalid_hash1
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_bar_object(self, method, url, body, headers):
# test_upload_object_invalid_file_size
self._assert_content_length_header_is_string(headers=headers)
body = generate_random_data(1000)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url,
body, headers):
# test_upload_object_invalid_file_size
self._assert_content_length_header_is_string(headers=headers)
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _assert_content_length_header_is_string(self, headers):
if 'Content-Length' in headers:
self.assertTrue(isinstance(headers['Content-Length'], basestring))
class AzureBlobsTests(unittest.TestCase):
driver_type = AzureBlobsStorageDriver
driver_args = STORAGE_AZURE_BLOBS_PARAMS
mock_response_klass = AzureBlobsMockHttp
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args)
def setUp(self):
self.driver_type.connectionCls.conn_class = self.mock_response_klass
self.mock_response_klass.type = None
self.driver = self.create_driver()
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
file_path = os.path.abspath(__file__) + '.temp'
try:
os.unlink(file_path)
except OSError:
pass
def test_invalid_credentials(self):
self.mock_response_klass.type = 'UNAUTHORIZED'
try:
self.driver.list_containers()
except InvalidCredsError:
e = sys.exc_info()[1]
self.assertEqual(True, isinstance(e, InvalidCredsError))
else:
self.fail('Exception was not thrown')
def test_list_containers_empty(self):
self.mock_response_klass.type = 'list_containers_EMPTY'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_list_containers_success(self):
self.mock_response_klass.type = 'list_containers'
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
containers = self.driver.list_containers()
self.assertEqual(len(containers), 4)
self.assertTrue('last_modified' in containers[1].extra)
self.assertTrue('url' in containers[1].extra)
self.assertTrue('etag' in containers[1].extra)
self.assertTrue('lease' in containers[1].extra)
self.assertTrue('meta_data' in containers[1].extra)
def test_list_container_objects_empty(self):
self.mock_response_klass.type = 'EMPTY'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
def test_list_container_objects_success(self):
self.mock_response_klass.type = None
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 4)
obj = objects[1]
self.assertEqual(obj.name, 'object2.txt')
self.assertEqual(obj.hash, '0x8CFB90F1BA8CD8F')
self.assertEqual(obj.size, 1048576)
self.assertEqual(obj.container.name, 'test_container')
self.assertTrue('meta1' in obj.meta_data)
self.assertTrue('meta2' in obj.meta_data)
self.assertTrue('last_modified' in obj.extra)
self.assertTrue('content_type' in obj.extra)
self.assertTrue('content_encoding' in obj.extra)
self.assertTrue('content_language' in obj.extra)
def test_get_container_doesnt_exist(self):
self.mock_response_klass.type = None
try:
self.driver.get_container(container_name='test_container100')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_container_success(self):
self.mock_response_klass.type = None
container = self.driver.get_container(
container_name='test_container200')
self.assertTrue(container.name, 'test_container200')
self.assertTrue(container.extra['etag'], '0x8CFB877BB56A6FB')
self.assertTrue(container.extra['last_modified'],
'Fri, 04 Jan 2013 09:48:06 GMT')
self.assertTrue(container.extra['lease']['status'], 'unlocked')
self.assertTrue(container.extra['lease']['state'], 'available')
self.assertTrue(container.extra['meta_data']['meta1'], 'value1')
def test_get_object_container_doesnt_exist(self):
# This method makes two requests which makes mocking the response a bit
# trickier
self.mock_response_klass.type = None
try:
self.driver.get_object(container_name='test_container100',
object_name='test')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_object_success(self):
# This method makes two requests which makes mocking the response a bit
# trickier
self.mock_response_klass.type = None
obj = self.driver.get_object(container_name='test_container200',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'test_container200')
self.assertEqual(obj.size, 12345)
self.assertEqual(obj.hash, '0x8CFB877BB56A6FB')
self.assertEqual(obj.extra['last_modified'],
'Fri, 04 Jan 2013 09:48:06 GMT')
self.assertEqual(obj.extra['content_type'], 'application/zip')
self.assertEqual(obj.meta_data['rabbits'], 'monkeys')
def test_create_container_invalid_name(self):
# invalid container name
self.mock_response_klass.type = 'INVALID_NAME'
try:
self.driver.create_container(container_name='new--container')
except InvalidContainerNameError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_already_exists(self):
# container with this name already exists
self.mock_response_klass.type = 'ALREADY_EXISTS'
try:
self.driver.create_container(container_name='new-container')
except ContainerAlreadyExistsError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_success(self):
# success
self.mock_response_klass.type = None
name = 'new-container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'DOESNT_EXIST'
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_container_not_empty(self):
self.mock_response_klass.type = None
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name='test_container', extra={},
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerIsNotEmptyError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_container_success(self):
self.mock_response_klass.type = 'EMPTY'
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name='test_container', extra={},
driver=self.driver)
self.assertTrue(self.driver.delete_container(container=container))
def test_delete_container_not_found(self):
self.mock_response_klass.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Container does not exist but an exception was not' +
'thrown')
def test_download_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_invalid_file_size(self):
self.mock_response_klass.type = 'INVALID_SIZE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertFalse(result)
def test_download_object_invalid_file_already_exists(self):
self.mock_response_klass.type = 'INVALID_SIZE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__)
try:
self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
except LibcloudError:
pass
else:
self.fail('Exception was not thrown')
def test_download_object_as_stream_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_upload_object_invalid_ex_blob_type(self):
# Invalid hash is detected on the amazon side and BAD_REQUEST is
# returned
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
try:
self.driver.upload_object(file_path=file_path, container=container,
object_name=object_name,
verify_hash=True,
ex_blob_type='invalid-blob')
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(str(e).lower().find('invalid blob type') != -1)
else:
self.fail('Exception was not thrown')
def test_upload_object_invalid_md5(self):
# Invalid md5 is returned by azure
self.mock_response_klass.type = 'INVALID_HASH'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
file_path = os.path.abspath(__file__)
try:
self.driver.upload_object(file_path=file_path, container=container,
object_name=object_name,
verify_hash=True)
except ObjectHashMismatchError:
pass
else:
self.fail(
'Invalid hash was returned but an exception was not thrown')
def test_upload_small_block_object_success(self):
file_path = os.path.abspath(__file__)
file_size = os.stat(file_path).st_size
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob')
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
def test_upload_big_block_object_success(self):
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_BLOCK_MAX_SIZE + 1
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob')
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
def test_upload_page_object_success(self):
self.mock_response_klass.use_param = None
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_PAGE_CHUNK_SIZE * 4
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='PageBlob')
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
def test_upload_page_object_failure(self):
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_PAGE_CHUNK_SIZE * 2 + 1
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
try:
self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='PageBlob')
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(str(e).lower().find('not aligned') != -1)
os.remove(file_path)
def test_upload_small_block_object_success_with_lease(self):
self.mock_response_klass.use_param = 'comp'
file_path = os.path.abspath(__file__)
file_size = os.stat(file_path).st_size
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob',
ex_use_lease=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
self.mock_response_klass.use_param = None
def test_upload_big_block_object_success_with_lease(self):
self.mock_response_klass.use_param = 'comp'
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_BLOCK_MAX_SIZE * 2
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob',
ex_use_lease=False)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
self.mock_response_klass.use_param = None
def test_upload_page_object_success_with_lease(self):
self.mock_response_klass.use_param = 'comp'
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_PAGE_CHUNK_SIZE * 4
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='PageBlob',
ex_use_lease=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
self.mock_response_klass.use_param = None
def test_upload_blob_object_via_stream(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
iterator = BytesIO(b('345'))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='BlockBlob')
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
self.mock_response_klass.use_param = None
def test_upload_blob_object_via_stream_with_lease(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
iterator = BytesIO(b('345'))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='BlockBlob',
ex_use_lease=True)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
self.mock_response_klass.use_param = None
def test_upload_page_object_via_stream(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
blob_size = AZURE_PAGE_CHUNK_SIZE
iterator = BytesIO(b('1' * blob_size))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='PageBlob',
ex_page_blob_size=blob_size)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, blob_size)
self.mock_response_klass.use_param = None
def test_upload_page_object_via_stream_with_lease(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
blob_size = AZURE_PAGE_CHUNK_SIZE
iterator = BytesIO(b('1' * blob_size))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='PageBlob',
ex_page_blob_size=blob_size,
ex_use_lease=True)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, blob_size)
def test_delete_object_not_found(self):
self.mock_response_klass.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
try:
self.driver.delete_object(obj=obj)
except ObjectDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_object_success(self):
self.mock_response_klass.type = 'DELETE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
result = self.driver.delete_object(obj=obj)
self.assertTrue(result)
def test_storage_driver_host(self):
# Non regression tests for issue LIBCLOUD-399 dealing with the bad
# management of the connectionCls.host class attribute
driver1 = self.driver_type('fakeaccount1', 'deadbeafcafebabe==')
driver2 = self.driver_type('fakeaccount2', 'deadbeafcafebabe==')
driver3 = self.driver_type('fakeaccount3', 'deadbeafcafebabe==',
host='test.foo.bar.com')
host1 = driver1.connection.host
host2 = driver2.connection.host
host3 = driver3.connection.host
self.assertEqual(host1, 'fakeaccount1.blob.core.windows.net')
self.assertEqual(host2, 'fakeaccount2.blob.core.windows.net')
self.assertEqual(host3, 'test.foo.bar.com')
if __name__ == '__main__':
sys.exit(unittest.main())
| |
import numpy as np
from scipy.integrate import tplquad,dblquad
k = 8.987551e9 #coulomb's constant
m = 1 #kg
dt = 0.1 #s
a = 0.5
#NOTE THIS IS FORCE ELEMENT. YOU HAVE TO INTEGRATE THIS THROUGH THE ENTIRE SURFACE TO GET OVERALL FORCE.
#outputs force element in direction i, given coulomb's constant k, test charge position p, and density rho for a sphere.
def Force_element_sphere(r,theta,phi):
r_vector = np.array([r*np.sin(phi)*np.cos(theta),r*np.sin(phi)*np.sin(theta),r*np.cos(phi)])
distance= p-r_vector
dV = np.linalg.norm(r_vector)**2*np.sin(phi) #volume element for sphere
dq = dV*rho
dF = dq*k*distance/(np.linalg.norm(distance)**3) #coloumbs law
return dF[i]
#outputs force element in direction i (i will be defined later) , given coulomb's constant k, test charge position p, and
#density rho for a DISK.
def Force_element_disk(r,theta):
r_vector = np.array([r*np.cos(theta),r*np.sin(theta),0])
distance= p-r_vector
dS = np.linalg.norm(r_vector)
dq = dS*rho
dF = dq*k*distance/(np.linalg.norm(distance)**3) #coloumbs law
return dF[i]
def Force_element_torus(theta,z,r):
#Note that 'a' or minor radius of torus is fixed here at 2. For ring torus, r must be greater than a
r_vector = np.array([r*np.cos(theta),r*np.sin(theta),z])
distance= p-r_vector
dV = np.linalg.norm(r_vector)*2 #volume element for sphere
dq = dV*rho
dF = dq*k*distance/(np.linalg.norm(distance)**3) #coloumbs law
return dF[i]
def Force_element_cube(x, y, z):
r_vector = np.array([x, y, z])
distance = p - r_vector
dV = 1
dq = dV*rho
dF = dq*k*distance/(np.linalg.norm(distance)**3)
return dF[i]
class surface:
"EM surface takes arguments density, shape, and linear dim. Linear dim can be radius, width, etc"
"make_plot returns an array of data that defines the physical surface, which we use for 3D plotting later"
def __init__(self, density=1.,shape='sphere',linear_dim=1.):
self.rho = density
self.s = shape
self.dim= linear_dim
def make_plot(self):
u = np.linspace(0, 2 * np.pi, 35) #35 being the number of points that are optimal between visual resolution & computational effeciency
v = np.linspace(0, 2 * np.pi, 35)
cube_side = np.linspace(-self.dim, self.dim, 35)
#v_t = np.linspace(0,2*np.pi,37) actually changing the definition of v to 2*pi doesnt make a difference to the sphere or disk plot
[uu,vv] = np.meshgrid (u,v)
if self.s=='sphere':
x = self.dim * np.sin(uu) * np.cos(vv);
y = self.dim * np.sin(uu) * np.sin(vv);
z = self.dim * np.cos(uu)
if self.s=='disk':
x = self.dim * np.sin(uu) * np.cos(vv);
y = self.dim * np.sin(uu) * np.sin(vv);
z = 0 * np.cos(uu);
if self.s=='torus':
x = (self.dim + a*np.cos(vv)) * np.cos(uu);
y = (self.dim + a*np.cos(vv)) * np.sin(uu);
z = a*np.sin(vv);
if self.s=='cube':
side1, side2 = np.meshgrid(cube_side, cube_side)
zeroside = -1*self.dim*np.ones(side1.shape)
oneside = np.ones(side1.shape)
oneside = self.dim*oneside
x = np.append(side1, side1, axis = 0)
x = np.append(x, side2, axis = 0)
x = np.append(x, side2, axis = 0)
x = np.append(x, zeroside, axis = 0)
x = np.append(x, oneside, axis = 0)
y = np.append(side2, side2, axis = 0)
y = np.append(y, zeroside, axis = 0)
y = np.append(y, oneside, axis = 0)
y = np.append(y, side1, axis = 0)
y = np.append(y, side1, axis = 0)
z = np.append(zeroside, oneside, axis = 0)
z = np.append(z, side1, axis = 0)
z = np.append(z, side1, axis = 0)
z = np.append(z, side2, axis = 0)
z = np.append(z, side2, axis = 0)
return [x,y,z]
class test_charge:
"test charge takes attribute position, which should be a 3d array"
def __init__(self,position=[2.,2.,2.],charge=1.,initial_velocity=np.array([0.,0.,0.])):
self.p = np.array(position)
self.q_1=charge
self.v=initial_velocity
def force(self,surface):
F = list()
global p,rho,i,q_1 #making sure these values don't get lost in the class by definining them as global.
rho = surface.rho # Wouldnt it be better to define this as a global variable within in the surface class
p = self.p
q_1= self.q_1
if surface.s=='sphere':
for j in range(3): #cycling through the three dimensions of the force (because scipy doesn't do vector integration)
i=j
#tplquad returns the triple integral of the force
F.append(tplquad(Force_element_sphere,0,np.pi,lambda theta:0, lambda theta: 2*np.pi,
lambda phi,theta: 0, lambda phi, theta: surface.dim,epsrel=0.4)[0])
elif surface.s =='disk' :
for j in range(3):
i=j
#tplquad returns the triple integral of the force
F.append(dblquad(Force_element_disk,0,2*np.pi,lambda theta: 0, lambda theta: surface.dim,epsrel=0.4)[0])
elif surface.s=='torus':
for j in range(3):
i=j
#tplquad returns the triple integral of the force
F.append(tplquad(Force_element_torus,surface.dim-a,surface.dim+a,lambda r: 0,lambda r: np.sqrt(a**2 - (r-surface.dim)**2),
lambda z,r: 0, lambda z,r: 2*np.pi,epsrel=0.4)[0])
elif surface.s=='cube':
for j in range(3):
i=j
#tplquad returns the triple integral of the force
F.append(tplquad(Force_element_cube,-surface.dim,surface.dim,lambda x: -surface.dim, lambda x: surface.dim, lambda x,y: -surface.dim, lambda x,y: surface.dim, epsrel=0.3)[0])
return F
def move(self,force=[0.,0.,0.]):
print(type(self.v))
print(self.v)
print(type(force))
self.v = self.v + force*dt/m
def f(x):
return [3.,3.,3.]
electron = test_charge(position=[1.,1.,1.],charge=2,initial_velocity=np.array([1.,0.,0.]))
class Field():
def __init__(self,type='magnetic',func=f,lim_x =[0,2],lim_y=[0,2],lim_z=[0,2]):
self.func=func
self.type=type
self.lim_x = lim_x
self.lim_y = lim_y
self.lim_z=lim_z
def force_oncharge(self,charge):
if charge.p[0] >= self.lim_x[0] and charge.p[0] <= self.lim_x[1] and charge.p[1] >= self.lim_y[0] and charge.p[1]<=self.lim_y[1]and charge.p[2] >= self.lim_z[0] and charge.p[2]<=self.lim_z[1]:
B = (self.func(charge.p))
F= charge.q_1*np.cross(charge.v,B)
return F
else:
return 0
b = Field()
print(b.force_oncharge(electron))
positions = {}
for i in range (0,10):
electron.move(b.force_oncharge(electron))
print(electron.p)
#you either do it through a circle or draw specifc current elements
a = 2 ; #a is the radius of the circle
shape = "circle"
perm = 4*np.pi*10e-7
current = 2;
#end goal is to make a shape class and define these functions within them
def Create_Parameter_Axis(shape):
phi = np.linspace(0, 2 * np.pi, 35)
x = y = np.linspace(0,1,35)
if shape == "circle":
parameter = phi
elif shape == "triangle":
parameter = [x,y]
elif shape == "rectangle":
parameter = [x,y]
else:
print " you entered the wrong parameter "
return parameter
def Draw_current_loop(parameter,shape)
def calc_this(shape):
if shape == "circle":
a = 2
dI= np.array([-a*np.sin(phi),a*np.cos(phi),0])
r_vec = np.array([-a*np.cos(phi),a*np.sin(phi),z])
return dI,r_vec
def Biot_equation (current,perm,r_scal,dI,r_vec):
xprod = np.cross (dI,r_vec)
dB = (perm*current/(4*np.pi)*r_scal**2) * xprod
return dB
"""Do the integration using a for loop like before and do it for the different dimensions """
"""DO THE DRAWING BITS """
phi = np.linspace(0, 2 * np.pi, 35)
xc = a*np.cos(phi);
yc = a*np.sin(phi);
zc = 0;
trace4 = go.Scatter3d(x = xc ,
y = yc,
z = zc,
name = "h",
line = dict(width = 6, color = 'rgb(0,0,0)'),
);
| |
"""
Reads JSON files and uploads sensor data to the 52North SOS implementation. Code is tuned to work with data from SmartSantander project: http://www.smartsantander.eu/
It formats data into the body of POST request using JSON, and uploads data into a SOS with a JSON binding.
A JSON file with the following structure is required:
{"markers": [ {"id": "anyvalue", "anyelement": "anyvalue",... , ..."tags": "from an specified list of tags"}]}
Permission to perform transactional operations should be enable in the SOS.
Redundant data (e.g., data with the same Id and time stamp) is ignored.
It includes several options to upload data using multiple threads. This may crash the service if it cannot handle all request, this depends on the rubustness of the server and the SOS implementation itself.
Create: May 25, 2017
Author: Manuel G. Garcia
"""
import json as json
import os
import re
import glob
import datetime
import requests
import concurrent.futures
import time as time_
from . import wrapper
from . import transactional
# OM_types dictionary
om_types = {"m": "OM_Measurement",
"co": "OM_CategoryObservation",
"cto": "OM_CountObservation",
"to": "OM_TextObservation",
"go": "OM_GeometryObservation",
"tho": "OM_TruthObservation"}
def num(s): # Necessary to convert longitude and latitude from a string to a number.
"""
Convert string into a number (float or integer)
:param s: string containing only digits
:return: float or integer
"""
try:
return int(s)
except ValueError:
return float(s)
def timeFromFile(filename=str):
"""
Extract the date and time which is part of a file name. Ex: 'data_stream-2016-07-21T135509.json'. Use when time is not reported for each sensor.
:param filename: string which contains a date and time
:return: time stamp in ISO format
"""
time_st = re.findall(r"\d\d\d\d[-]\d\d[-]\d\d[T]\d+", filename)
iso_time = time_st[0][:10] + " " + time_st[0][11:13] + ":" + time_st[0][13:15] + ":" + time_st[0][15:]
return iso_time
def history(hist_directory):
'''
Opens a history file or creates a new one at root directory.
A history file keeps a record of which sensors and observations have been processed
File structure = {node: {count: int, latest: time_of_last_observation}}
:param hist_directory: path to directory to store history files.
:return: The newest history file in root directory OR
an empty history file
'''
os.chdir(hist_directory) # set Windows directory
try:
# load latest modified history file
newest = max(glob.iglob('*.json'), key=os.path.getmtime)
his = open(newest)
pool = json.load(his)
his.close()
# when no history file is found
except ValueError: # on empty directory
# raise message
print('------------------------------------')
print('WARNING!:')
print('Empty directory for history files')
print('Starting new record')
print('------------------------------------')
pool = {} # start an empty dictionary for history
return pool
def loadData(root_directory, file_name, nest='markers'):
'''
Opens and reads a json file in the root directory
:param root_directory: path to directory containing json files.
:param file_name: file name
:param nest: key name of the most upper object in the JSON file, which is an array. Default 'markers'
:return: dictionary containing json objects
'''
# open file
with open(root_directory + file_name) as f:
jdata = json.load(f)
jdata = jdata[nest]
return jdata
def cleanData(objectlist, has_tag=str, time_attrib=True):
"""
Check if objects in a list contains elements: 'id', georeference, valid time and tags.
:param objectlist: a list containing valid JSON objects. As returned by loadData function.
:param has_tag: objects with this tag name will be kept. The rest will be removed.
:param time_attrib: When True a time attribute check will be ignored.
:return: a list of json objects
"""
# counter for removed objects
i = 0
cleanList = []
for o in objectlist:
# Keep objects with key 'id' and tag = has_tag
# Keep objects with georeference, e.g. 'longitude' not null and 'longitude'/'latitude' is not zero.
if ('id' in o
and o["longitude"] is not None and
o["tags"] == has_tag and
num(o["longitude"]) != 0.0 and
num(o["latitude"]) != 0.0):
# filter based on valid time.
if time_attrib is True:
try:
if 'Last update' in o:
reported_time = o['Last update'] # reported time
else:
reported_time = o['LastValue'] # Another key for time (in waste collector)
except KeyError: # When object has not this key
print("*** Object has no 'Time' attribute ***")
continue # Go to the next object
else: # if object has time attribute
# Filter zero time
if reported_time != '0000-00-00 00:00:00': # Sensors/observations will be added
# only when objects hold a valid time
cleanList.append(o) # add to clean list.
else: # when time attribute is false
cleanList.append(o)
else:
# print('------------------------')
# print("!!!No 'id' name in object: " + str(i))
# print('------------------------')
i += 1 # increase counter
print(str(i) + ' Objects were removed!')
return cleanList
class Sos():
def __init__(self, url, token=''):
self.sosurl = str(url) # url to access the SOS
self.token = str(token) # security token, optional
# Test if URL exists
try:
test = requests.get(self.sosurl)
# TODO: test for token authorization
test.raise_for_status()
except requests.HTTPError:
print("The URL is not valid")
def upload_directory2sos(sos, directory, sensor_type, history_path, threads=1, time_attribute=True,
spatial_profile=True):
"""
Parses all JSON files in a directory, prepares SOS requests for registering sensors and observations, and uploads data to an existing SOS.
Application is limited by an intense use of memory when a directory contains a very large number of files.
The use of multi-thread may crash the SOS. To limit the number of crashes, the function will stop for 20 seconds every after every 50 files.
:param sos: Object describing an existing SOS with valid URL and token.
:param directory: path to the directory which contains a JSON file.
:param sensor_type: the type of sensors for which requests will be prepare (e.g., 'light', 'weather_station', etc.)
:param history_path: path to directory for history logs
:param threads: number of threads for multi-thread uploading. Default is 1 thread.
:param time_attribute: states if specific sensor type contains a time attribute or not. Default is True.
:param spatial_profile: switches between the use of insertObservationSP (True) to insertObservation (False).
:return: None
"""
json_files = os.listdir(directory) # list all files in directory
counter = 0 # initiate counter for monitoring progress
start_time = datetime.datetime.now()
print('=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/')
print('Process stated at: ', str(start_time))
print('PROCESSING all files in directory: ', directory)
for f in sorted(json_files): # loop over json files. Files sorted by name.
print('---->>Working on file: ', f)
print(' >> Parsing file ', str(counter + 1), ' out of: ', str(len(json_files)))
print('----------------------------------------------------------')
# Load data from JSON file and prepare requests
request_collection = requests_from_file(directory, f, sensor_type, history_path, time_attribute,
spatial_profile)
upload2sos(sos, request_collection, history_path, threads)
counter += 1
# Put the program to sleep after processing 'n' files.
n = 50
if counter > 0 and (counter % n) == 0:
wait_time = 20 # time in seconds
# print('=============================================')
print('\n >>> The monkey is tired <<< ')
print(' ********************* ')
print(' ** ** ')
print(' ** GETTING MORE ** ')
print(' ** BANANAS ** ')
print(' ** ** ')
print(' ********************* ')
print(' >>> Wait ', wait_time, ' seconds ', ' <<< ')
# print('=============================================')
time_.sleep(wait_time)
else:
pass
end_time = datetime.datetime.now()
elapse_t = end_time - start_time
print('------------------------------')
print('>> Directory Upload Complete <<')
print('-> Total upload time: ' + str(elapse_t))
print('------------------------------')
return None
def requests_from_file(directory, file_name, sensor_type, hist_path, time_attrib=True, spatial_profile=True):
"""
Parse a single JSON file and prepare SOS requests for registering sensors and observations.
:param directory: path to the directory which contains a JSON file
:param file_name: name of a JSON file containing sensor data
:param sensor_type: the type of sensors for which requests will be prepare (e.g., 'light', 'weather_station', etc.)
:param hist_path: path to directory for history logs
:param time_attrib: states if specific sensor type contains a time attribute or not. Default is True.
:param spatial_profile: switches between the use of insertObservationSP (True) to insertObservation (False).
:return: a list of valid requests, and up-to-date history log
"""
print('=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/')
print('PROCESSING a single file: ', file_name)
print('----------------------------------------------------------')
jdata = loadData(directory, file_name)
# ------------------------------
# Parsing Parameters:
# ------------------------------
# sensor type
type_sensor = wrapper.SensorType(sensor_type)
sensor_attrib = type_sensor.pattern['attributes']
# parsing history
hist = history(hist_path)
# Remove invalid objects
# print(type_sensor.pattern['name'])
clean_obj = cleanData(jdata, type_sensor.pattern['name'], time_attrib)
# Chose Insert Observation function:
if spatial_profile is True:
insertobservation = transactional.insertObservationSP # with Transactional Profile
else:
insertobservation = transactional.insertObservation # without it
# Chose Inser Sensor function:
if type_sensor.pattern['type'] == 'mobile':
insertsensor = transactional.insertMobileSensor
else:
insertsensor = transactional.insertSensor
prepared_requests = [] # request collector
for o in clean_obj: # loop over each object in input file
ide = o['id']
if ide in hist:
# if node was previously processed
# fetch time:
if time_attrib:
t = o['Last update']
else:
# TODO: time needs transformation wrt server-time
t = timeFromFile(file_name) # get time form file name
if t not in hist[str(ide)]["times"]: # check if object has new time
# change time format
tt = t.split()
time = tt[0] + 'T' + tt[1] + '+00:00'
body = wrapper.Batch(ide) # initiate batch instance
for a in sensor_attrib: # loop over each attribute
# OM type
om = type_sensor.om_types[a[1]]
# define procedure
procedure = wrapper.Procedure(ide, a[0], 'http://www.geosmartcity.nl/test/observableProperty/', om)
off_name = 'offering for ' + ide + '_' + type_sensor.pattern['name']
# WARNING: defining an offering for each node
offering = wrapper.Offering('http://www.geosmartcity.nl/test/offering/', ide, off_name)
# Indexing observation identifier
# Index := ide_count+1
new_ide = ide + '_' + "".join(a[0].split()) + '_' + str(
hist[ide]['count'] + 1) # ID like: ide_(count +1)
observation = wrapper.Observation(new_ide)
observation.uom = om
observation.phTime, observation.rTime = time, time # same time for both
# fetch observation value
# Skip 'Location' attribute
if a[1] != "go":
# Get value for attribute in TypeSensor object
try:
val = str(o[a[0]])
# get numeric value
except KeyError: # when key doesn't exits in object
continue # Skip request for this attribute
val_num = re.findall(r"[-+]?\d*\.\d+|\d+", val)
# change to float data type
if len(
val_num) == 0: # Node attribute had not data, or reported an empty (regarded as Null) value.
print('Empty val_num for: ' + str(ide))
if om == "OM_Measurement":
observation.Value = -9.99 # alternative 'null' value for 'float' types
elif om == "OM_CountObservation":
observation.Value = -1111 # alternative 'null' value for 'integer' type
else: # TODO: add more alternative values
continue
# Fetch magnitude
unit = ''.join([i for i in val if not i.isdigit()])
for character in [" ", ".", "-"]:
unit = unit.replace(character, "")
observation.unit = unit
else:
observation.Value = num(val_num[0]) # value of the observation
# Fetch magnitud for the value:
# observation.unit = re.sub((val_num[0] + ' '), "", val, count=1)
unit = ''.join([i for i in val if not i.isdigit()])
for character in [" ", ".", "-"]:
unit = unit.replace(character, "")
observation.unit = unit
else: # For goemetry observation
observation.Value = None
observation.unit = None
# feature of interest:
coord = (float(o['longitude']), float(o['latitude']), -9.99) # No data := -9.99
foi = wrapper.FoI('degree', 'm', coord, ide)
# prepare body request
if type_sensor.pattern["type"] == "mobile":
body_obs = insertobservation(observation, foi, offering, procedure, a[0])
else:
body_obs = insertobservation(observation, foi, offering, procedure, a[0])
body.add_request(body_obs) # collect insert observation request
prepared_requests.append(body)
# After insert observation (parsing) is successful
# update sensor history
old_val = hist[ide]["count"]
hist[ide]["count"] = old_val + 1 # update counter
hist[ide]["times"].append(t) # store new time
else:
continue
else:
# if node is new in hist
# insert sensor to SOS
# print('NEW ' + type_sensor.pattern['name'] + ' SENSOR for: ' + str(ide))
# phenomena / result time:
if time_attrib:
try:
t = o['Last update']
except KeyError:
t = o['LastValue'] # special case (waste collector)
else:
# TODO: time needs transformation wrt server-time
t = timeFromFile(file_name) # get time form file name
# change time format
tt = t.split()
time = tt[0] + 'T' + tt[1] + '+00:00'
body_sensor = ""
# Start batch instance
body = wrapper.Batch(ide)
# Prepare Sensor Registration:
for a in sensor_attrib:
# OM type
om = type_sensor.om_types[a[1]]
# print(type_sensor.om_types[a[1]])
# define procedure
procedure = wrapper.Procedure(ide, a[0], 'http://www.geosmartcity.nl/test/observableProperty/', om)
off_name = 'offering for ' + ide + '_' + type_sensor.pattern['name']
# WARNING: defining an offering for each node
offering = wrapper.Offering('http://www.geosmartcity.nl/test/offering/', ide, off_name)
# feature of interest:
try:
coord = (float(o['longitude']), float(o['latitude']), -9.99) # No data := -9.99
except TypeError:
print(o)
# Feature of interest
foi = wrapper.FoI('degree', 'm', coord, ide)
# prepare body for insert sensor
body_sensor = insertsensor(offering, procedure, foi, type_sensor)
body.add_request(body_sensor) # append insert sensor request
# Prepare Insert Observation Requests:
cuenta = 0
for a in sensor_attrib:
# OM type
om = type_sensor.om_types[a[1]]
# define procedure
procedure = wrapper.Procedure(ide, a[0], 'http://www.geosmartcity.nl/test/observableProperty/', om)
off_name = 'offering for ' + ide + '_' + type_sensor.pattern['name']
# WARNING: defining an offering for each node
offering = wrapper.Offering('http://www.geosmartcity.nl/test/offering/', ide, off_name)
# Indexing observation identifier
new_ide = ide + '_' + "".join(a[0].split()) + '_1' # ID like: ide_(count +1)
# print("id: "+new_ide)
observation = wrapper.Observation(new_ide)
observation.uom = om
observation.phTime, observation.rTime = time, time # same time for both
# Skip 'Location' attribute
if a[1] != "go":
# fetch observation value
try: # Avoid stop, when sensor type report different attributes
val = str(o[a[0]])
# get numeric value
except KeyError:
# print('Sensor without this attribute..!!!' + ' Sensor: ' + str(ide) +' attribute: ' + str(a))
continue
val_num = re.findall(r"[-+]?\d*\.\d+|\d+", val)
# change to float data type
# print("type of measurement:", om)
if len(val_num) == 0:
print('Empty val_num for: ' + str(ide))
if om == "OM_Measurement":
observation.Value = -9.99 # alternative 'null' value for 'float' data type in Database
elif om == "OM_CountObservation":
observation.Value = -1111 # alternative 'null' value for 'integer' data type in Database
else: # TODO: add more alternative values
continue
# Fetch magnitude
# When No magnitude an empty string is returned.
unit = ''.join([i for i in val if not i.isdigit()])
for character in [" ", ".", "-"]:
unit = unit.replace(character, "")
# print("unit at: ", o["id"], " for ", a, " is: ", unit)
observation.unit = unit # val should contain only the magnitud
else:
observation.Value = num(val_num[0]) # value of the observation
# Fetch magnitud for the value:
# observation.unit = re.sub((val_num[0] + ' '), "", val, count=1)
unit = ''.join([i for i in val if not i.isdigit()])
for character in [" ", ".", "-"]:
unit = unit.replace(character, "")
# print("unit at: ", o["id"], " for ", a, " is: ", unit)
observation.unit = unit
else: # For goemetry observation
observation.Value = None
observation.unit = None
# feature of interest:
coord = (float(o['longitude']), float(o['latitude']), -9.99) # No data := -9.99
foi = wrapper.FoI('degree', 'm', coord, ide)
# insert observation to SOS
if type_sensor.pattern["type"] == "mobile":
# TODO: modify insert sensor function for mobile sensors
body_obs = insertobservation(observation, foi, offering, procedure, a[
0]) # TODO: Fix, this will produce an error if a mobile sensor is declared
else:
body_obs = insertobservation(observation, foi, offering, procedure, a[0])
body.add_request(body_obs) # add observation request
cuenta += 1
prepared_requests.append(body)
# After sensor and observation are successful
# Update sensor history with new record
hist[ide] = {"count": 1, "times": [t]}
# insert parsing history. TODO: Is this necessary?
# hist["last parsed"] = {"runtime error": {}, "file name" : '', "run time": ''}
return {"requests": prepared_requests, "history": hist, "file": file_name}
def upload2sos(sos, request_collection, hist_path, threads=1):
"""
Upload data to a SOS using HTTP POST requests
:param sos: Object describing an existing SOS
:param request_collection: dictionary containing: HTTP requests, historic log, and name parsed file. Each request is an instance of Batch class
:param hist_path: directory in which the history log files will be saved
:param threads: number of threads for multi-thread uploading. Default is 1 thread.
:return: None
"""
# TODO: currently it work for a single file (a list of Batch objects). Estend it to deal with multiple files and including a 'sleep' time might not be of practical case.
# If new requests were created
err_log = {} # initiate error log
num_posts = len(request_collection['requests']) # number of requests
hist = request_collection['history']
file_name = request_collection['file']
re_quests = request_collection['requests']
start_time = datetime.datetime.now()
if num_posts > 0:
# send requests
print('-----------------------------')
print('UPLOADING DATA TO: ' + sos.sosurl)
print('SENDING ', str(num_posts), ' REQUESTS...', 'Using:', str(threads), 'threads')
print('WARNING: Uploading redundant data won"t be flagged', '...working to fix it...')
# wrapper.sosPost(my_requests[count].reqs(), url, token, response=False)
# my_requests.clear()
# count += 1
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
future_to_req = {executor.submit(wrapper.sosPost, reques.reqs(), sos.sosurl, sos.token, True): reques for
reques in re_quests}
for future in concurrent.futures.as_completed(future_to_req):
req = future_to_req[future] # Batch instances
# print(future.result())
try:
# TODO: server is not reporting errors when sending redundant data. Check sosPost as well.
# # print('hello')
future.result()
# # future.result()
except Exception as exc:
future.exception()
err_log[str(datetime.datetime.now())] = [req.id, exc, req.body]
print('%r generated an exception: %s Request: %s' % (req.id, exc, req.body))
e_time = datetime.datetime.now()
wait = e_time - start_time
reqs_per_post = len(re_quests) # an aproximation
# print('Upload time: ', file_name, str(wait), )
print('SOS server load: ', str(round(reqs_per_post / wait.total_seconds(), 1)), 'Rps')
# print('Accumulated time: ', str(datetime.datetime.now() - start_time))
print('------------------------------')
request_collection.clear()
# count += 1
# update history log file:
updateHistory(hist_path, file_name, hist, err_log)
# report not new requests were send
else:
print("*** No NEW sensors nor NEW observations in file %r ***" % file_name)
# count += 1
# Create error log file if any error are reported during uploading
if len(err_log) > 0:
# file name
efile = 'runtime-errors' + datetime.datetime.now().strftime("%Y-%m-%dT%H%M%S") + '.log'
ef = open(hist_path + efile, 'w') # save to same directory as history log files
json.dump(err_log, ef)
ef.close()
end_time = datetime.datetime.now()
elapse_t = end_time - start_time
print('------------------------------')
print('File Upload Complete')
print('Upload time: ' + str(elapse_t))
print('------------------------------')
return None
def updateHistory(hist_path, file_name, latest_history_log, error_log):
"""
Updates the history log of requests sent to the SOS server. It writes a new file containing the latest changes to a local directory.
If errors in he server occurred, an error log will be added to the history file
:param hist_path: path a directory to store the new (updated) history log file
:param file_name: name of the source file which is uploading.
:param latest_history_log: up to date history log, formatted as JSON
:param error_log: error reports. Formatted as JSON
:return: new history log file formatted as JSON
"""
hist = latest_history_log
hist['last upload'] = {"name": file_name, "run time": str(datetime.datetime.now()), "runtime error": error_log}
fname = 'hist-' + datetime.datetime.now().strftime("%Y-%m-%dT%H%M%S") + '.json'
fn = open(hist_path + fname, 'w') # create new history file
json.dump(hist, fn) # write to file
fn.close()
print("History log file was updated!!")
return None
# TODO: URI from waste sensors are not valid. They contain spaces and special characters. They have to be remove
def main():
# TODO: Write some tests
# dir = 'c:/sos_santander/raw_data/sample/'
# f_name = "santander_example_data.json"
# f_name2 = "data_stream-2016-07-01T080007.json"
# h_dir = 'c:/Temp/hist_temp/'
url = 'http://xx.xx.xx.xxxx:8080/sos-4.4/service'
token = 'TWFudWVsIEdhcmNpYQ=='
# rq = requests_from_file(dir, f_name, 'bus', h_dir, time_attrib=True)
# r = rq["requests"][0]
# print(r.reqs())
sos = Sos(url, token)
# upload2sos(sos, rq, h_dir, 3)
# upload_directory2sos(sos, dir, 'light', h_dir,3)
if __name__ == '__main__':
main()
| |
import os
MONGO_HOST = os.environ.get('MONGO_HOST', 'localhost')
MONGO_PORT = os.environ.get('MONGO_PORT', 27017)
MONGO_DBNAME = os.environ.get('MONGO_DBNAME', 'atlas')
DATE_FORMAT = '%Y-%m-%d %H:%M:%S GMT'
# Enable reads (GET), inserts (POST), and DELETE for resources/collections.
RESOURCE_METHODS = ['GET', 'POST']
# Enable reads (GET), edits (PATCH), replacements (PUT), and deletes of
# individual items.
ITEM_METHODS = ['GET', 'PATCH', 'PUT', 'DELETE']
# Allow public GET by default can override for a specific resource or item.
PUBLIC_METHODS = ['GET']
# Default to return 500 results per page. Allow up to 2000.
PAGINATION_LIMIT = 2000
PAGINATION_DEFAULT = 500
# Add support for CORS
X_DOMAINS = '*'
X_HEADERS = ['Access-Control-Allow-Origin', 'If-Match',
'Authorization', 'User-Agent', 'Content-Type']
# Allow $regex filtering. Default config blocks where and regex.
MONGO_QUERY_BLACKLIST = ['$where']
# Require etags
ENFORCE_IF_MATCH = True
# Definitions of schemas for Items. Schema is based on Cerberus grammar
# https://github.com/nicolaiarocci/cerberus.
#
# Mongo creates the following: '_created', '_updated', '_etag', and '_id'.
# We don't use those fields in our logic because want to be able to move or
# recreate a record without losing any information.
# Code schema. Defines a code asset that can be applied to a site.
# We nest in 'meta' to allow us to check for a unique combo
CODE_SCHEMA = {
'meta': {
'type': 'dict',
'unique': True,
'schema': {
'name': {
'type': 'string',
'minlength': 3,
'required': True,
},
'version': {
'type': 'string',
'minlength': 1,
'required': True,
},
'code_type': {
'type': 'string',
'allowed': ['library', 'theme', 'module', 'core', 'profile'],
'required': True,
},
'label': {
'type': 'string',
'minlength': 3,
},
'is_current': {
'type': 'boolean',
'default': False,
'required': True,
},
'tag': {
'type': 'list',
},
},
},
'deploy': {
'type': 'dict',
'schema': {
'registry_rebuild': {
'type': 'boolean',
'default': False,
'required': True,
},
'cache_clear': {
'type': 'boolean',
'default': True,
'required': True,
},
'update_database': {
'type': 'boolean',
'default': True,
'required': True,
},
},
},
'git_url': {
'type': 'string',
'regex': '((git|ssh|http(s)?)|(git@[\w\.]+))(:(//)?)([\w\.@\:/\-~]+)(\.git)(/)?',
'required': True,
},
'commit_hash': {
'type': 'string',
'required': True,
'unique': True
},
'created_by': {
'type': 'string',
},
'modified_by': {
'type': 'string',
},
}
QUERY_SCHEMA = {
'title': {
'type': 'string',
'required': True,
},
'description': {
'type': 'string',
},
'endpoint': {
'type': 'list',
'allowed': ["code", "site", "statistic"],
'required': True,
},
'query': {
'type': 'string',
'unique': True,
},
'tags': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'rank': {
'type': 'integer',
},
'created_by': {
'type': 'string',
},
'modified_by': {
'type': 'string',
},
}
# Site schema.
SITES_SCHEMA = {
'path': {
'type': 'string',
'unique': True,
},
'db_key': {
'type': 'string',
},
'sid': {
'type': 'string',
'minlength': 9,
'maxlength': 14,
'unique': True,
},
'type': {
'type': 'string',
'allowed': ['express', 'legacy', 'homepage'],
'default': 'express',
},
'status': {
'type': 'string',
'allowed': [
'pending',
'available',
'installing',
'installed',
'launching',
'launched',
'locked',
'take_down',
'down',
'restore',
],
'default': 'pending',
},
'environment': {
'type': 'string',
'allowed': [
'local',
'dev',
'test',
'prod'
],
},
'pool': {
'type': 'string',
'allowed': [
'poolb-express',
'poolb-homepage',
'WWWLegacy'],
'default': 'poolb-express',
},
'update_group': {
'type': 'integer',
},
'f5only': {
'type': 'boolean',
'default': False
},
'settings': {
'type': 'dict',
'schema': {
'page_cache_maximum_age': {
'type': 'integer',
'default': 10800,
},
'siteimprove_site': {
'type': 'integer',
},
'siteimprove_group': {
'type': 'integer',
},
'cse_creator': {
'type': 'string',
},
'cse_id': {
'type': 'string',
},
},
},
'tag': {
'type': 'list',
},
'code': {
'type': 'dict',
'schema': {
'core': {
'type': 'objectid',
'data_relation': {
'resource': 'code',
'field': '_id',
'embeddable': True,
},
},
'profile': {
'type': 'objectid',
'data_relation': {
'resource': 'code',
'field': '_id',
'embeddable': True,
},
},
'package': {
'type': 'list',
'schema': {
'type': 'objectid',
'data_relation': {
'resource': 'code',
'field': '_id',
'embeddable': True,
},
}
},
},
},
'dates': {
'type': 'dict',
'schema': {
# See https://docs.python.org/2/library/datetime.html#datetime.datetime for format.
'created': {
'type': 'datetime',
},
'assigned': {
'type': 'datetime',
},
'launched': {
'type': 'datetime',
},
'locked': {
'type': 'datetime',
},
'taken_down': {
'type': 'datetime',
'nullable': True,
},
},
},
'statistics': {
'type': 'objectid',
'data_relation': {
'resource': 'statistics',
'field': '_id',
'embeddable': True,
'unique': True,
},
},
'created_by': {
'type': 'string',
},
'modified_by': {
'type': 'string',
},
}
STATISTICS_SCHEMA = {
'site': {
'type': 'objectid',
'data_relation': {
'resource': 'sites',
'field': '_id',
},
'required': True,
'unique': True,
},
'name': {
'type': 'string',
'minlength': 1,
'nullable': True,
},
'status': {
'type': 'string',
'minlength': 1,
'nullable': True,
},
'nodes_total': {
'type': 'integer',
'nullable': True,
},
'node_revision_total': {
'type': 'integer',
'nullable': True,
},
'nodes_by_type': {
'type': 'dict',
'nullable': True,
'schema': {
'page': {'type': 'integer', 'nullable': True},
'file': {'type': 'integer', 'nullable': True},
'faqs': {'type': 'integer', 'nullable': True},
'content_list_page': {'type': 'integer', 'nullable': True},
'webform': {'type': 'integer', 'nullable': True},
'article': {'type': 'integer', 'nullable': True},
'article_list_page': {'type': 'integer', 'nullable': True},
'person': {'type': 'integer', 'nullable': True},
'person_list_page': {'type': 'integer', 'nullable': True},
'photo_gallery': {'type': 'integer', 'nullable': True},
},
},
'nodes_other': {
'type': 'string',
'nullable': True,
},
'days_since_last_edit': {
'type': 'integer',
'nullable': True,
},
'days_since_last_login': {
'type': 'integer',
'nullable': True,
},
'beans_total': {
'type': 'integer',
'nullable': True,
},
'beans_by_type': {
'type': 'dict',
'nullable': True,
'schema': {
'hero_unit': {'type': 'integer', 'nullable': True},
'slider': {'type': 'integer', 'nullable': True},
'block': {'type': 'integer', 'nullable': True},
'content_list': {'type': 'integer', 'nullable': True},
'feature_callout': {'type': 'integer', 'nullable': True},
'quicktab': {'type': 'integer', 'nullable': True},
'video_reveal': {'type': 'integer', 'nullable': True},
'block_row': {'type': 'integer', 'nullable': True},
'block_section': {'type': 'integer', 'nullable': True},
'cu_events_calendar_block': {'type': 'integer', 'nullable': True},
'events_calendar_grid': {'type': 'integer', 'nullable': True},
'rss': {'type': 'integer', 'nullable': True},
'articles': {'type': 'integer', 'nullable': True},
'article_feature': {'type': 'integer', 'nullable': True},
'article_grid': {'type': 'integer', 'nullable': True},
'article_slider': {'type': 'integer', 'nullable': True},
'people_list_block': {'type': 'integer', 'nullable': True},
'social_links': {'type': 'integer', 'nullable': True},
'facebook_activity': {'type': 'integer', 'nullable': True},
'facebook_like_button': {'type': 'integer', 'nullable': True},
'twitter_block': {'type': 'integer', 'nullable': True},
},
},
'beans_other': {
'type': 'string',
'nullable': True,
},
'context': {
'type': 'dict',
'nullable': True,
'schema': {
'condition': {
'type': 'dict',
'nullable': True,
'schema': {
'context': {'type': 'integer', 'nullable': True},
'context_all': {'type': 'integer', 'nullable': True},
'default': {'type': 'integer', 'nullable': True},
'layout': {'type': 'integer', 'nullable': True},
'menu': {'type': 'integer', 'nullable': True},
'node': {'type': 'integer', 'nullable': True},
'node_taxonomy': {'type': 'integer', 'nullable': True},
'path': {'type': 'integer', 'nullable': True},
'query_param': {'type': 'integer', 'nullable': True},
'query_string': {'type': 'integer', 'nullable': True},
'sitewide': {'type': 'integer', 'nullable': True},
'sitewide_public': {'type': 'integer', 'nullable': True},
'taxonomy_term': {'type': 'integer', 'nullable': True},
'user': {'type': 'integer', 'nullable': True},
'user_page': {'type': 'integer', 'nullable': True},
'views': {'type': 'integer', 'nullable': True},
},
},
'reaction': {
'type': 'dict',
'nullable': True,
'schema': {
'backstretch': {'type': 'integer', 'nullable': True},
'block': {'type': 'integer', 'nullable': True},
'breadcrumb': {'type': 'integer', 'nullable': True},
'column_override': {'type': 'integer', 'nullable': True},
'cu_share': {'type': 'integer', 'nullable': True},
'menu': {'type': 'integer', 'nullable': True},
'region': {'type': 'integer', 'nullable': True},
'template_suggestions': {'type': 'integer', 'nullable': True},
'theme': {'type': 'integer', 'nullable': True},
'theme_html': {'type': 'integer', 'nullable': True},
'title_image': {'type': 'integer', 'nullable': True},
},
},
},
},
'context_other_conditions': {
'type': 'string',
'nullable': True,
},
'context_other_reactions': {
'type': 'string',
'nullable': True,
},
'variable_cron_last': {
'type': 'integer',
'nullable': True,
},
'variable_site_403': {
'type': 'string',
'nullable': True,
},
'variable_site_404': {
'type': 'string',
'nullable': True,
},
'variable_theme_default': {
'type': 'string',
'nullable': True,
},
'variable_ga_account': {
'type': 'string',
'nullable': True,
},
'variable_livechat_license_number': {
'type': 'string',
'nullable': True,
},
'profile_module_manager': {
'type': 'string',
'nullable': True,
},
'express_code_version': {
'type': 'string',
'nullable': True,
},
'express_core_schema_version': {
'type': 'integer',
'nullable': True,
},
'theme_is_responsive': {
'type': 'boolean',
'nullable': True,
},
'overridden_features': {
'type': 'dict',
'nullable': True,
},
'drupal_system_status': {
'type': 'boolean',
'nullable': True,
},
'custom_logo_settings': {
'type': 'boolean',
'nullable': True,
},
'users': {
'type': 'dict',
'nullable': True,
'schema': {
'email_address': {
'type': 'dict',
'nullable': True,
'schema': {
'edit_my_content': {
'type': 'list',
'nullable': True,
},
'content_editor': {
'type': 'list',
'nullable': True,
},
'site_contact': {
'type': 'list',
'nullable': True,
},
},
},
'username': {
'type': 'dict',
'nullable': True,
'schema': {
'edit_my_content': {
'type': 'list',
'nullable': True,
},
'content_editor': {
'type': 'list',
'nullable': True,
},
'site_contact': {
'type': 'list',
'nullable': True,
},
},
},
'no_valid_owner': {
'type': 'boolean',
'nullable': True,
},
'counts': {
'type': 'dict',
'nullable': True,
'schema': {
'edit_my_content': {
'type': 'integer',
'nullable': True,
},
'content_editor': {
'type': 'integer',
'nullable': True,
},
'site_contact': {
'type': 'integer',
'nullable': True,
},
},
},
},
},
'bundles': {
'type': 'dict',
'nullable': True,
'schema': {
'cu_advanced_content_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_advanced_design_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_advanced_layout_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_events_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_feeds_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_forms_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_news_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_people_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_photo_gallery_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_seo_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_social_media_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_seo_admin_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_test_content_admin_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'cu_debug_admin_bundle': {
'type': 'dict',
'nullable': True,
'schema': {
'schema_version': {
'type': 'integer',
'nullable': True,
},
},
},
'other': {
'type': 'string',
'nullable': True,
},
},
},
'webforms': {
'type': 'dict',
'nullable': True,
'schema': {
'total_submissions': {'type': 'integer', 'nullable': True},
'active_forms': {'type': 'integer', 'nullable': True},
'inactive_forms': {'type': 'integer', 'nullable': True},
},
},
'created_by': {
'type': 'string',
'nullable': True,
},
'modified_by': {
'type': 'string',
'nullable': True,
},
}
COMMANDS_SCHEMA = {
'name': {
'type': 'string',
'minlength': 3,
'required': True,
},
'command': {
'type': 'string',
'minlength': 3,
'required': True,
},
# String that is stored needs to be posted with Unicode character encodings
'query': {
'type': 'string',
'minlength': 9,
},
'single_server': {
'type': 'boolean',
'required': True,
'default': True,
},
'created_by': {
'type': 'string',
},
'modified_by': {
'type': 'string',
},
}
"""
Definitions of Resources.
Tells Eve what methods and schemas apply to a given resource.
"""
# Code resource
CODE = {
'item_title': 'code',
'public_methods': ['GET'],
'public_item_methods': ['GET'],
'versioning': True,
'soft_delete': True,
'schema': CODE_SCHEMA,
}
# Query resource
QUERY = {
'item_title': 'query',
'public_methods': ['GET'],
'public_item_methods': ['GET'],
'versioning': True,
'schema': QUERY_SCHEMA,
}
# Sites resource
SITES = {
'item_title': 'site',
# Allow lookup by 'sid' in addition to '_id'
'additional_lookup': {
'url': 'regex("[\w]+")',
'field': 'sid'
},
'public_methods': ['GET'],
'public_item_methods': ['GET'],
'versioning': True,
'soft_delete': True,
'schema': SITES_SCHEMA,
}
# Statistics resource
STATISTICS = {
'item_title': 'statistics',
'public_methods': ['GET'],
'public_item_methods': ['GET'],
'versioning': True,
'soft_delete': True,
'schema': STATISTICS_SCHEMA,
}
# Command resource
# Empty public_item_methods means that you can't call actual commands without authentication.
# Anonymous users can list the commands, but not call them.
COMMANDS = {
'item_title': 'commands',
'public_methods': ['GET'],
'public_item_methods': [],
'versioning': True,
'schema': COMMANDS_SCHEMA,
}
# Domain definition. Tells Eve what resources are available on this domain.
DOMAIN = {
'sites': SITES,
'code': CODE,
'commands': COMMANDS,
'query': QUERY,
'statistics': STATISTICS,
}
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Ophelia address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Ophelia address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
"""
Photometric Redshifts
=====================
This scripts shows simple methods to derive photometric redshifts using machine learning.
Data can be downloaded from the Kaggle website:
https://inclass.kaggle.com/c/PhotometricRedshiftEstimation/data
:requires: pandas
:requires: numpy
:requires: scikit-learn
:requires: matplotlib
tested with:
pandas 0.15.2
Numpy 1.9.1
sklearn 0.15.2
matplotlib 1.4.2
:author: Sami-Matias Niemi
:contact: s.niemi@icloud.com
:version: 0.8
"""
import matplotlib
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
matplotlib.rcParams['image.interpolation'] = 'none'
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import cross_validation
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor as GBR
from sklearn import linear_model
from sklearn.svm import SVR
from sklearn import grid_search
from sklearn.cross_validation import cross_val_score
from sklearn.learning_curve import validation_curve
from sklearn.learning_curve import learning_curve
from sklearn import metrics
from sklearn import preprocessing
import copy
import cPickle
def loadKaggledata(folder='MachineLearning/photo-z/kaggleData/', useErrors=True):
"""
Load Kaggle photometric redshift competition data. These data are from 2012 and at low-z.
train: ID, u, g, r, i, z, uErr, gErr, rErr, iErr, zErr, redshift
query: ID, u, g, r, i, z, uErr, gErr, rErr, iErr, zErr
solution: ID, redshift, estimatedRedshiftError
"""
filename = folder + 'train.csv'
data = pd.read_csv(filename, index_col=0, usecols=['ID', 'u', 'g', 'r', 'i', 'z',
'modelmagerr_u', 'modelmagerr_g',
'modelmagerr_r', 'modelmagerr_i',
'modelmagerr_z', 'redshift'])
if useErrors:
data_features = data[['u', 'g', 'r', 'i', 'z',
'modelmagerr_u', 'modelmagerr_g',
'modelmagerr_r', 'modelmagerr_i',
'modelmagerr_z']]
else:
data_features = data[['u', 'g', 'r', 'i', 'z']]
data_redshifts = data[['redshift']]
X_train, X_test, y_train, y_test = train_test_split(data_features.values,
data_redshifts.values,
test_size=0.35,
random_state=42)
# remove mean dn scale to unit variance
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#make 1D vectors
y_train = y_train.ravel()
y_test = y_test.ravel()
print "feature vector shape=", data_features.values.shape
print 'Training sample shape=', X_train.shape
print 'Testing sample shape=', X_test.shape
print 'Target training redshift sample shape=', y_train.shape
print 'Testing redshift sample shape=', y_test.shape
return X_train, X_test, y_train, y_test
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best", shadow=True, fancybox=True)
return plt
def randomForest(X_train, X_test, y_train, y_test, search=True, save=False):
"""
A random forest regressor.
A random forest is a meta estimator that fits a number of classifying decision
trees on various sub-samples of the dataset and use averaging to improve the
predictive accuracy and control over-fitting.
Can run a grid search to look for the best parameters (search=True) and
save the model to a file (save=True).
"""
if search:
# parameter values over which we will search
parameters = {'min_samples_split': [1, 2, 3, 10],
'min_samples_leaf': [1, 2, 3, 10],
'max_features': [None, 'sqrt', 7],
'max_depth': [None, 15, 30, 40]}
rf = RandomForestRegressor(n_estimators=100, n_jobs=4, verbose=1)
#note: one can run out of memory if using n_jobs=-1..
rf_tuned = grid_search.GridSearchCV(rf, parameters, scoring='r2', n_jobs=2, verbose=1, cv=3)
else:
rf_tuned = RandomForestRegressor(n_estimators=2000,
max_depth=28,
max_features=7,
min_samples_split=2,
min_samples_leaf=2,
n_jobs=-1, verbose=1)
#n_estimators=5000 will take about 36GB of RAM
print '\nTraining...'
rf_optimised = rf_tuned.fit(X_train, y=y_train)
print 'Done'
if search:
print 'The best score and estimator:'
print(rf_optimised.best_score_)
print(rf_optimised.best_estimator_)
rf_optimised = rf_optimised.best_estimator_
if save:
print 'Save the Random Forest to a pickled file; note that the model can quickly take 100G'
fp = open('model/RF.pkl', 'w')
cPickle.dump(rf_optimised, fp)
fp.close()
print '\nPredicting...'
predicted = rf_optimised.predict(X_test)
expected = y_test.copy()
print 'Done'
return predicted, expected
def SupportVectorRegression(X_train, X_test, y_train, y_test, search, save=False):
"""
Support Vector Regression.
Can run a grid search to look for the best parameters (search=True) and
save the model to a file (save=True).
"""
if search:
# parameter values over which we will search
parameters = {'C': [0.1, 0.5, 1., 1.5, 2.],
'kernel': ['rbf', 'sigmoid', 'poly'],
'degree': [3, 5]}
s = SVR()
clf = grid_search.GridSearchCV(s, parameters, scoring='r2',
n_jobs=-1, verbose=1, cv=3)
else:
clf = SVR(verbose=1)
print '\nTraining...'
clf.fit(X_train, y_train)
print 'Done'
if search:
print 'The best score and estimator:'
print(clf.best_score_)
print(clf.best_estimator_)
print 'Best hyperparameters:'
print clf.best_params_
clf = clf.best_estimator
if save:
print 'Save the SVR model to a pickled file...'
fp = open('model/SVR.pkl', 'w')
cPickle.dump(clf, fp)
fp.close()
print '\nPredicting...'
predicted = clf.predict(X_test)
expected = y_test.copy()
print 'Done'
return predicted, expected
def BayesianRidge(X_train, X_test, y_train, y_test, search=True):
"""
Bayesian Ridge Regression.
"""
print '\nTraining...'
clf = linear_model.BayesianRidge(n_iter=1000, tol=1e-3, alpha_1=1.,
fit_intercept=True, normalize=False, verbose=1)
clf.fit(X_train, y_train)
print 'Done'
print '\nPredicting...'
predicted = clf.predict(X_test)
expected = y_test.copy()
print 'Done'
return predicted, expected
def GradientBoostingRegressor(X_train, X_test, y_train, y_test, search, save=False):
"""
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Can run a grid search to look for the best parameters (search=True) and
save the model to a file (save=True).
Among the most important hyperparameters for GBRT are:
#. number of regression trees (n_estimators)
#. depth of each individual tree (max_depth)
#. loss function (loss)
#. learning rate (learning_rate)
"""
if search:
# parameter values over which we will search
parameters = {'loss': ['ls', 'huber'],
'learning_rate': [0.001, 0.01, 0.05, 0.1, 0.5],
'max_depth': [1, 2, 3, 5, 7, None],
'max_features': ['sqrt', None]}
s = GBR(n_estimators=500, verbose=1)
clf = grid_search.GridSearchCV(s, parameters, scoring='r2',
n_jobs=-1, verbose=1, cv=3)
else:
clf = GBR(verbose=1, n_estimators=5000,learning_rate=0.05, loss='huber', max_depth=3, subsample=0.8)
print '\nTraining...'
clf.fit(X_train, y_train)
print 'Done'
if search:
print 'The best score and estimator:'
print(clf.best_score_)
print(clf.best_estimator_)
print 'Best hyperparameters:'
print clf.best_params_
clf = clf.best_estimator_
if save:
print 'Save the BGR model to a pickled file...'
fp = open('model/GBR.pkl', 'w')
cPickle.dump(clf, fp)
fp.close()
print '\nPredicting...'
predicted = clf.predict(X_test)
expected = y_test.copy()
print 'Done'
return predicted, expected
def randomForestTestPlots(X_train, X_test, y_train, y_test):
"""
Validation Curve
================
Underfitting - both the training and the validation score are low.
Overfitting - training score is good but the validation score is low.
When the score is high for both, the method is working pretty well.
Learning Curve
==============
learning curve shows the validation and training score of an estimator
for varying numbers of training samples. It is a tool to find out how much
we benefit from adding more training data and whether the estimator
suffers more from a variance error or a bias error. If both the validation
score and the training score converge to a value that is too low with
increasing size of the training set, we will not benefit much from more
training data.
"""
title = "Validation Curve (Random Forest)"
print title
param_range = np.round(np.linspace(0, 60, 15) + 1).astype(np.int)
rf = RandomForestRegressor(n_estimators=100,
max_features=6,
min_samples_split=2,
n_jobs=2, verbose=1)
#validation curve
train_scores, test_scores = validation_curve(rf,
X_train,
y_train,
'max_depth',
param_range,
n_jobs=-1,
verbose=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title(title)
plt.xlabel("max_depth")
plt.ylabel("Score")
plt.ylim(0.8, 1.01)
plt.plot(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.plot(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.savefig('RandomForestValidationCurve.pdf')
plt.close()
#learning curve
title = "Learning Curves (Random Forest)"
print title
cv = cross_validation.ShuffleSplit(X_train.shape[0], n_iter=50,
test_size=0.2, random_state=0)
plot_learning_curve(rf, title, X_train, y_train, ylim=(0.85, 1.01), cv=cv, n_jobs=-1)
plt.savefig('RandomForestLearningCurve.pdf')
plt.close()
def GradientBoostingRegressorTestPlots(X_train, X_test, y_train, y_test, n_estimators=1000):
"""
An important diagnostic when using GBRT in practise is the so-called deviance
plot that shows the training/testing error (or deviance) as a function of the
number of trees.
"""
def fmt_params(params):
return ", ".join("{0}={1}".format(key, val) for key, val in params.iteritems())
def deviance_plot(est, X_test, y_test, ax=None, label='', train_color='#2c7bb6',
test_color='#d7191c', alpha=1.0):
"""Deviance plot for ``est``, use ``X_test`` and ``y_test`` for test error. """
test_dev = np.empty(n_estimators)
for i, pred in enumerate(est.staged_predict(X_test)):
test_dev[i] = est.loss_(y_test, pred)
if ax is None:
fig = plt.figure(figsize=(8, 5))
ax = plt.gca()
ax.plot(np.arange(n_estimators) + 1, test_dev, color=test_color, label='Test %s' % label,
linewidth=2, alpha=alpha)
ax.plot(np.arange(n_estimators) + 1, est.train_score_, color=train_color,
label='Train %s' % label, linewidth=2, alpha=alpha)
ax.set_ylabel('Error')
ax.set_xlabel('n_estimators')
return test_dev, ax
est = GBR(n_estimators=n_estimators, verbose=1)
est.fit(X_train, y_train)
feature_importance = est.feature_importances_
test_dev, ax = deviance_plot(est, X_test, y_test)
ax.legend(loc='upper right')
ax.annotate('Lowest test error', xy=(test_dev.argmin() + 1, test_dev.min() + 0.02), xycoords='data',
xytext=(150, 1.0), textcoords='data',
arrowprops=dict(arrowstyle="->", connectionstyle="arc"))
plt.savefig('GBRdeviance.pdf')
plt.close()
#sample leaves
fig = plt.figure(figsize=(8, 5))
ax = plt.gca()
for params, (test_color, train_color) in [({'min_samples_leaf': 1},
('#d7191c', '#2c7bb6')),
({'min_samples_leaf': 4},
('#fdae61', '#abd9e9'))]:
est = GBR(n_estimators=n_estimators, verbose=1)
est.set_params(**params)
est.fit(X_train, y_train)
test_dev, ax = deviance_plot(est, X_test, y_test, ax=ax, label=fmt_params(params),
train_color=train_color, test_color=test_color)
plt.legend(loc='upper right')
plt.savefig('GBRTree.pdf')
plt.close()
#lerning rate
fig = plt.figure(figsize=(8, 5))
ax = plt.gca()
for params, (test_color, train_color) in [({'learning_rate': 0.2},
('#d7191c', '#2c7bb6')),
({'learning_rate': 0.7},
('#fdae61', '#abd9e9'))]:
est = GBR(n_estimators=n_estimators, verbose=1)
est.set_params(**params)
est.fit(X_train, y_train)
test_dev, ax = deviance_plot(est, X_test, y_test, ax=ax, label=fmt_params(params),
train_color=train_color, test_color=test_color)
plt.legend(loc='upper right')
plt.savefig('GBRShrinkage.pdf')
plt.close()
#sub-samples
fig = plt.figure(figsize=(8, 5))
ax = plt.gca()
for params, (test_color, train_color) in [({'subsample': 1.},
('#d7191c', '#2c7bb6')),
({'subsample': 0.7},
('#fdae61', '#abd9e9'))]:
est = GBR(n_estimators=n_estimators, verbose=1)
est.set_params(**params)
est.fit(X_train, y_train)
test_dev, ax = deviance_plot(est, X_test, y_test, ax=ax, label=fmt_params(params),
train_color=train_color, test_color=test_color)
plt.legend(loc='upper right')
plt.savefig('GBRSubsample.pdf')
plt.close()
#feature importance
feature_names = ['u', 'g', 'r', 'i', 'z', 'modelmagerr_u', 'modelmagerr_g',
'modelmagerr_r', 'modelmagerr_i', 'modelmagerr_z']
feature_names = np.asarray(feature_names)
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 1, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.savefig('GBRImportance.pdf')
plt.close()
def plotResults(predicted, expected, output):
"""
Generate a simple plot demonstrating the results.
"""
var = metrics.explained_variance_score(expected, predicted)
mae = metrics.mean_absolute_error(expected, predicted)
mse = metrics.mean_squared_error(expected, predicted)
r2 = metrics.r2_score(expected, predicted)
rms = np.sqrt(np.mean((expected - predicted) ** 2))
print output
print 'Explained variance (best possible score is 1.0, lower values are worse):', var
print 'Mean Absolute Error (best is 0.0):', mae
print 'Mean Squred Error (best is 0.0):', mse
print 'R2 score (best is 1.0):', r2
print 'RMS:', rms
print '\n\n\n'
title = 'RMS=%.4f, MSE=%.4f, R2=%.3f' % (rms, mse, r2)
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.title(title)
ax1.scatter(expected, predicted, alpha=0.2, s=5)
ax1.set_xlabel("Spectroscopic Redshift")
ax1.set_ylabel("Photo-z")
ax1.plot([0, 8], [0, 8], '-r')
ax1.set_xlim(0, 1.1*expected.max())
ax1.set_ylim(0, 1.1*expected.max())
plt.savefig(output+'Results.pdf')
plt.close()
def runRandomForestKaggle(useErrors=True, search=False, test=False):
"""
Simple Random Forest on Kaggle training data.
"""
X_train, X_test, y_train, y_test = loadKaggledata(useErrors=useErrors)
if test: randomForestTestPlots(X_train, X_test, y_train, y_test)
predictedRF, expectedRF = randomForest(X_train, X_test, y_train, y_test, search=search)
plotResults(predictedRF, expectedRF, output='RandomForestKaggleErrors')
def runBayesianRidgeKaggle(useErrors=True):
"""
Run Bayesian Ridge on Kaggle training data.
"""
X_train, X_test, y_train, y_test = loadKaggledata(useErrors=useErrors)
predicted, expected = BayesianRidge(X_train, X_test, y_train, y_test)
plotResults(predicted, expected, output='BayesianRidgeKaggleErrors')
def runSupportVectorRegression(useErrors=False, search=False):
"""
Pretty slow to run.
"""
X_train, X_test, y_train, y_test = loadKaggledata(useErrors=useErrors)
predicted, expected = SupportVectorRegression(X_train, X_test, y_train, y_test, search)
plotResults(predicted, expected, output='SVRKaggleErrors')
def runGradientBoostingRegressor(useErrors=True, search=False, test=True):
"""
Run Gradient Boosting on Kaggle training data.
"""
X_train, X_test, y_train, y_test = loadKaggledata(useErrors=useErrors)
if test: GradientBoostingRegressorTestPlots(X_train, X_test, y_train, y_test)
predicted, expected = GradientBoostingRegressor(X_train, X_test, y_train, y_test, search)
plotResults(predicted, expected, output='GBRKaggleErrors')
if __name__ == '__main__':
runGradientBoostingRegressor()
runBayesianRidgeKaggle()
runRandomForestKaggle()
| |
import os
import re
from datetime import datetime, date
from werkzeug.utils import secure_filename
from flask import request, redirect, render_template, \
url_for, flash, send_from_directory
from flask_login import current_user
from mongoengine import DoesNotExist
from mongoengine.context_managers import switch_db
from bs4 import BeautifulSoup
from . import main
from .. import config, basedir, wiki_md
from .forms import BasicEditForm, WikiEditForm, SearchForm, CommentForm,\
RenameForm, UploadForm, VersionRecoverForm
from ..models import Permission, WikiGroup, WikiComment, WikiPage, WikiFile, WikiCache,\
render_wiki_file, render_wiki_image
from ..email import send_email
from ..wiki_util.pagination import calc_page_num
from ..decorators import admin_required, user_required, guest_required
def wiki_render_template(template, group, *args, **kwargs):
with switch_db(WikiCache, group) as _WikiCache:
_cache = _WikiCache.objects.first()
if _cache.latest_change_time.date() == date.today():
latest_change_time = _cache.latest_change_time.strftime('[%H:%M]')
else:
latest_change_time = _cache.latest_change_time.strftime('[%b %d]')
return render_template(template, group=group,
keypages_id_title=_cache.keypages_id_title,
changes_id_title=_cache.changes_id_title[:-6:-1],
latest_change_time = latest_change_time,
*args, **kwargs)
@main.route('/')
def index():
all_groups = WikiGroup.objects(active=True).all()
return render_template('cover.html', all_groups=all_groups)
@main.route('/<group>/search', methods=['GET', 'POST'])
@guest_required
def search(group):
"""Search text on wiki page, `weights{ title:10, content:2, comment:1 }`
"""
search_keyword = request.args.get('search')
result_page = request.args.get('page', default=1, type=int)
form = SearchForm(search=search_keyword)
results, start_page, end_page = None, None, None
if search_keyword and not search_keyword.isspace():
with switch_db(WikiPage, group) as _WikiPage:
results = _WikiPage.objects.search_text(search_keyword). \
only('id', 'title', 'modified_on', 'modified_by'). \
order_by('$text_score').paginate(page=result_page, per_page=100)
start_page, end_page = calc_page_num(result_page, results.pages)
if form.validate_on_submit():
return redirect(url_for('.search', group=group, search=form.search.data))
try:
total_pages = results.pages
except AttributeError:
total_pages = 0
return wiki_render_template('search.html',
group=group,
form=form,
results=results,
start_page=start_page,
end_page=end_page,
total_pages=total_pages)
@main.route('/<group>/keypage-edit', methods=['GET', 'POST'])
@admin_required
def wiki_keypage_edit(group):
with switch_db(WikiCache, group) as _WikiCache:
_cache = _WikiCache.objects.first()
_keypage_titles = [i[1] for i in _cache.keypages_id_title]
form = BasicEditForm(textArea='\n'.join(_keypage_titles))
if form.validate_on_submit():
new_titles = form.textArea.data.splitlines()
_cache.update_keypages(group, *new_titles)
return redirect(url_for('.wiki_group_home', group=group))
return wiki_render_template('wiki_keypage_edit.html', group=group, form=form)
@main.route('/<group>/changes')
@guest_required
def wiki_show_changes(group):
with switch_db(WikiPage, group) as _WikiPage, \
switch_db(WikiCache, group) as _WikiCache:
_cache = _WikiCache.objects.only('changes_id_title').first()
changed_pages = []
for _id, pageTitle in _cache.changes_id_title[::-1]:
try:
changed_pages.append(_WikiPage.objects.\
only('id', 'title', 'modified_by', 'modified_on').get(id=_id))
except DoesNotExist:
# TODO: handle it rather than ignore it
pass
return wiki_render_template('wiki_changes.html',
group=group,
changed_pages=changed_pages)
@main.route('/<group>/<page_id>/page', methods=['GET', 'POST'])
@guest_required
def wiki_page(group, page_id):
form = CommentForm()
if form.validate_on_submit() and current_user.can(group, Permission.WRITE):
_, comment_html = wiki_md(group, form.textArea.data, is_comment=True)
new_comment = WikiComment(
id='{}-{}'.format(datetime.utcnow().strftime('%s'), current_user.id),
author=current_user.name,
html=comment_html,
md=form.textArea.data
)
with switch_db(WikiPage, group) as _WikiPage, \
switch_db(WikiCache, group) as _WikiCache:
_WikiPage.objects(id=page_id).update_one(push__comments=new_comment)
page = _WikiPage.objects.only('id', 'title').get_or_404(id=page_id)
_cache = _WikiCache.objects.only('changes_id_title').first()
_cache.add_changed_page(page.id, page.title, datetime.now())
user_emails = [u.email for u in wiki_md.users_to_notify]
send_email(user_emails, 'You are mentioned',
'{} ({}) mentioned you at <a href="{}#wiki-comment-box">{}</a>'.\
format(current_user.name,
current_user.email,
request.base_url,
page.title))
return redirect(url_for('.wiki_page',
group=group,
page_id=page_id,
_anchor='wiki-comment-box'))
with switch_db(WikiPage, group) as _WikiPage:
page = _WikiPage.objects.exclude('md', 'refs', 'files').get_or_404(id=page_id)
return wiki_render_template('wiki_page.html', group=group, page=page, form=form)
href_prog = re.compile(r'\/(.+?)\/([0-9a-f]{24})\/page(#.*)?')
@main.route('/<group>/<page_id>/edit', methods=['GET', 'POST'])
@user_required
def wiki_page_edit(group, page_id):
with switch_db(WikiPage, group) as _WikiPage:
page = _WikiPage.objects.exclude('html', 'comments').get_or_404(id=page_id)
form = WikiEditForm(current_version=page.current_version)
upload_form = UploadForm()
if form.validate_on_submit():
if form.current_version.data == page.current_version:
toc, html = wiki_md(group, form.textArea.data)
page.update_content(group, form.textArea.data, html, toc)
# Make sure wiki page references using raw html are also kept track of.
soup = BeautifulSoup(form.textArea.data, 'html.parser')
hrefs = [a['href'] for a in soup.find_all('a', class_='wiki-page')]
for href in hrefs:
m = href_prog.fullmatch(href)
try:
href_group, href_page_id = m.group(1), m.group(2)
assert group == href_group
href_page = _WikiPage.objects(id=href_page_id).only('id').first()
if href_page:
wiki_md.wiki_refs.append(href_page)
except (AttributeError, AssertionError):
pass
_WikiPage.objects(id=page.id).update(set__refs=wiki_md.wiki_refs,
set__files=wiki_md.wiki_files)
return redirect(url_for('.wiki_page', group=group, page_id=page_id))
else:
flash('Other changes have been made to this '
'page since you started editing it.')
return wiki_render_template('wiki_page_edit.html',
group=group,
page=page,
form=form,
upload_form=upload_form)
@main.route('/<group>/<page_id>/upload')
@user_required
def wiki_upload_file(group, page_id):
form = UploadForm()
return render_template('wiki_upload_file.html',
group=group,
form=form,
page_id=page_id)
@main.route('/do-upload/<group>', methods=['POST'])
@user_required
def wiki_do_upload(group):
"""Handle the ajax upload request from a normal page.
Since every uploaded file is renamed to its id in database,
There is no need to secure its filename during uploading.
However, users should try to upload files with a secure
filename, namely using ascii encoding and normal characters.
Since storage is cheap, a secured filename is also saved in
`WikiFile` such that there is no need to run `secure_filename`
each time a file is requested.
"""
form = request.form
with switch_db(WikiPage, group) as _WikiPage:
page_id = form.get('page_id', None)
parent_page = _WikiPage.objects.exclude('comments', 'refs').get(id=page_id)
file_md = ''
file_html = ''
for i, file in enumerate(request.files.getlist("file")):
# save uploaded file info to database
wiki_file = WikiFile(name=file.filename,
secured_name=secure_filename(file.filename),
mime_type=file.mimetype,
uploaded_by=current_user.name)
file.save(os.path.join(config.UPLOAD_FOLDER, group, str(wiki_file.id)))
# Use the position of file pointer to get file size
wiki_file.size = file.tell()
wiki_file.switch_db(group).save()
if 'image' in file.mimetype:
file_md += '\n\n[image:{}]'.format(wiki_file.id)
file_html += '<p>{}</p>'.\
format(render_wiki_image(group, wiki_file.id, wiki_file.name))
else:
file_md += '\n\n[file:{}]'.format(wiki_file.id)
file_html += '<p>{}</p>'.\
format(render_wiki_file(group, wiki_file.id, wiki_file.name))
parent_page.files.append(wiki_file)
parent_page.update_content(group,
parent_page.md+file_md,
parent_page.html+file_html,
parent_page.toc)
return ''
@main.route('/do-upload/from-edit/<group>', methods=['POST'])
@user_required
def wiki_do_upload_from_edit(group):
"""Handle the ajax upload request from an editing page.
Since every uploaded file is renamed to its id in database,
There is no need to secure its filename during uploading.
However, users should try to upload files with a secure
filename, namely using ascii encoding and normal characters.
Since storage is cheap, a secured filename is also saved in
`WikiFile` such that there is no need to run `secure_filename`
each time a file is requested.
"""
file_md = ''
for i, file in enumerate(request.files.getlist("file")):
# save uploaded file info to database
wiki_file = WikiFile(name=file.filename,
secured_name=secure_filename(file.filename),
mime_type=file.mimetype,
uploaded_by=current_user.name)
# save the uploaded file to server
file.save(os.path.join(config.UPLOAD_FOLDER, group, str(wiki_file.id)))
# Use the position of file pointer to get file size
wiki_file.size = file.tell()
wiki_file.switch_db(group).save()
# update the page where the files are uploaded to
file_type = 'image' if 'image' in file.mimetype else 'file'
file_md += '\n\n[{}:{}]'.format(file_type, wiki_file.id)
return file_md
@main.route('/<group>/<page_id>/versions', methods=['GET', 'POST'])
@user_required
def wiki_page_versions(group, page_id):
with switch_db(WikiPage, group) as _WikiPage:
page = _WikiPage.objects.exclude('html', 'comments').get_or_404(id=page_id)
if page.current_version == 1:
return redirect(url_for('.wiki_page', group=group, page_id=page_id))
form = VersionRecoverForm()
if form.validate_on_submit():
if form.version.data >= page.current_version:
flash('Please enter an old version number.')
else:
recovered_content = page.get_version_content(group, form.version.data)
toc, html = wiki_md(group, recovered_content)
page.update_content(group, recovered_content, html, toc)
_WikiPage.objects(id=page.id).update(add_to_set__refs=wiki_md.wiki_refs,
add_to_set__files=wiki_md.wiki_files)
return redirect(url_for('.wiki_page', group=group, page_id=page_id))
old_ver_num = request.args.get('version', default=page.current_version - 1, type=int)
new_ver_num = old_ver_num + 1
diff_table = page.make_wikipage_diff(group, old_ver_num, new_ver_num)
start_page, end_page = calc_page_num(old_ver_num, page.current_version-1)
return wiki_render_template('wiki_page_versions.html',
group=group,
page=page,
form=form,
old_ver_num=old_ver_num,
new_ver_num=new_ver_num,
diff_table=diff_table,
start_page=start_page,
end_page=end_page,
total_pages=page.current_version-1)
@main.route('/<group>/<page_id>/rename', methods=['GET', 'POST'])
@user_required
def wiki_rename_page(group, page_id):
with switch_db(WikiPage, group) as _WikiPage:
page = _WikiPage.objects.only('id', 'title').get_or_404(id=page_id)
if page.title == 'Home':
return redirect(url_for('.wiki_group_home', group=group))
form = RenameForm(new_title=page.title)
if form.validate_on_submit():
new_title = form.new_title.data
if page.title == new_title:
flash('The page name is not changed.')
elif _WikiPage.objects(title=new_title).count() > 0:
flash('The new page title has already been taken.')
else:
page.rename(group, new_title)
return redirect(url_for('.wiki_page', group=group, page_id=page_id))
return wiki_render_template('wiki_rename_page.html', group=group, page=page, form=form)
@main.route('/<group>/<page_id>/references')
@guest_required
def wiki_references(group, page_id):
with switch_db(WikiPage, group) as _WikiPage:
page = _WikiPage.objects.only('title').get_or_404(id=page_id)
referenced_by = _WikiPage.objects(refs__contains=page_id).\
only('id', 'title').all()
# The pages which reference `page`
return wiki_render_template('wiki_references.html',
group=group,
page=page,
referenced_by=referenced_by)
@main.route('/<group>/file/<int:file_id>')
@guest_required
def wiki_file(group, file_id):
"""Uploaded files are saved on server with their
database id as filename, so filenames are secured
when user try to download.
"""
fn = request.args.get('filename')
if not fn:
with switch_db(WikiFile, group) as _WikiFile:
wiki_file = _WikiFile.objects.get_or_404(id=file_id)
fn = wiki_file.secured_name
return send_from_directory(os.path.join(config.UPLOAD_FOLDER, group),
str(file_id),
as_attachment=True,
attachment_filename=fn)
@main.route('/<group>/')
@main.route('/<group>/home')
@guest_required
def wiki_group_home(group):
with switch_db(WikiPage, group) as _WikiPage:
wiki_group_homepage = _WikiPage.objects(title='Home').only('id').first()
return redirect(url_for('.wiki_page',
group=group,
page_id=str(wiki_group_homepage.id)))
@main.route('/<group>/markdown')
@guest_required
def wiki_markdown_instruction(group):
return wiki_render_template('wiki_markdown.html', group=group)
| |
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.homogenization.utils import iter_sym
from sfepy.terms.terms import Term, terms
from sfepy.terms.terms_th import THTerm, ETHTerm
## expr = """
## e = 1/2 * (grad( vec( u ) ) + grad( vec( u ) ).T)
## D = map( D_sym )
## s = D * e
## div( s )
## """
## """
## e[i,j] = 1/2 * (der[j]( u[i] ) + der[i]( u[j] ))
## map =
## D[i,j,k,l]
## s[i,j] = D[i,j,k,l] * e[k,l]
## """
class LinearElasticTerm(Term):
r"""
General linear elasticity term, with :math:`D_{ijkl}` given in
the usual matrix form exploiting symmetry: in 3D it is :math:`6\times6`
with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in 2D it is
:math:`3\times3` with the indices ordered as :math:`[11, 22, 12]`. Can be
evaluated. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
:Arguments 1:
- material : :math:`D_{ijkl}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`D_{ijkl}`
- parameter_1 : :math:`\ul{w}`
- parameter_2 : :math:`\ul{u}`
"""
name = 'dw_lin_elastic'
arg_types = (('material', 'virtual', 'state'),
('material', 'parameter_1', 'parameter_2'))
arg_shapes = {'material' : 'S, S', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'}
modes = ('weak', 'eval')
## symbolic = {'expression': expr,
## 'map' : {'u' : 'state', 'D_sym' : 'material'}}
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mode == 'weak':
if diff_var is None:
strain = self.get(state, 'cauchy_strain')
fmode = 0
else:
strain = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return 1.0, strain, mat, vg, fmode
elif mode == 'eval':
strain1 = self.get(virtual, 'cauchy_strain')
strain2 = self.get(state, 'cauchy_strain')
return 1.0, strain1, strain2, mat, vg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.dw_lin_elastic
else:
self.function = terms.d_lin_elastic
class LinearElasticIsotropicTerm(LinearElasticTerm):
r"""
Isotropic linear elasticity term.
:Definition:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u}) \mbox{ with }
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
:Arguments:
- material_1 : :math:`\lambda`
- material_2 : :math:`\mu`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`D_{ijkl}`
- parameter_1 : :math:`\ul{w}`
- parameter_2 : :math:`\ul{u}`
"""
name = 'dw_lin_elastic_iso'
arg_types = (('material_1', 'material_2', 'virtual', 'state'),
('material_1', 'material_2', 'parameter_1', 'parameter_2'))
arg_shapes = {'material_1' : '1, 1', 'material_2' : '1, 1',
'virtual' : ('D', 'state'), 'state' : 'D',
'parameter_1' : 'D', 'parameter_2' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
def get_fargs(self, lam, mu, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
from sfepy.mechanics.matcoefs import stiffness_from_lame
mat = stiffness_from_lame(self.region.dim, lam, mu)
return LinearElasticTerm.get_fargs(self, mat, virtual, state,
mode=mode, term_mode=term_mode,
diff_var=diff_var, **kwargs)
def get_eval_shape(self, mat1, mat2, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
return LinearElasticTerm.get_eval_shape(self, None, None, state)
class SDLinearElasticTerm(Term):
r"""
Sensitivity analysis of the linear elastic term.
:Definition:
.. math::
\int_{\Omega} \hat{D}_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
.. math::
\hat{D}_{ijkl} = D_{ijkl}(\nabla \cdot \ul{\Vcal})
- D_{ijkq}{\partial \Vcal_l \over \partial x_q}
- D_{iqkl}{\partial \Vcal_j \over \partial x_q}
:Arguments:
- material : :math:`D_{ijkl}`
- parameter_w : :math:`\ul{w}`
- parameter_u : :math:`\ul{u}`
- parameter_mesh_velocity : :math:`\ul{\Vcal}`
"""
name = 'd_sd_lin_elastic'
arg_types = ('material', 'parameter_w', 'parameter_u',
'parameter_mesh_velocity')
arg_shapes = {'material' : 'S, S',
'parameter_w' : 'D', 'parameter_u' : 'D',
'parameter_mesh_velocity' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
function = terms.d_sd_lin_elastic
def get_fargs(self, mat, par_w, par_u, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
grad_w = self.get(par_w, 'grad')
grad_u = self.get(par_u, 'grad')
grad_mv = self.get(par_mv, 'grad')
return 1.0, grad_w, grad_u, grad_mv, mat, vg
def get_eval_shape(self, mat, par_w, par_u, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class LinearElasticTHTerm(THTerm):
r"""
Fading memory linear elastic (viscous) term. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} \left [\int_0^t
\Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{u}(\tau)) \difd{\tau}
\right]\,e_{ij}(\ul{v})
:Arguments:
- ts : :class:`TimeStepper` instance
- material : :math:`\Hcal_{ijkl}(\tau)`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_elastic_th'
arg_types = ('ts', 'material', 'virtual', 'state')
arg_shapes = {'material' : '.: N, S, S',
'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.dw_lin_elastic)
def get_fargs(self, ts, mats, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
if mode == 'weak':
if diff_var is None:
def iter_kernel():
for ii, mat in enumerate(mats):
strain = self.get(state, 'cauchy_strain',
step=-ii)
mat = nm.tile(mat, (n_el, n_qp, 1, 1))
yield ii, (ts.dt, strain, mat, vg, 0)
fargs = iter_kernel
else:
strain = nm.array([0], ndmin=4, dtype=nm.float64)
mat = nm.tile(mats[0], (n_el, n_qp, 1, 1))
fargs = ts.dt, strain, mat, vg, 1
return fargs
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
class LinearElasticETHTerm(ETHTerm):
r"""
This term has the same definition as dw_lin_elastic_th, but assumes an
exponential approximation of the convolution kernel resulting in much
higher efficiency. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} \left [\int_0^t
\Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{u}(\tau)) \difd{\tau}
\right]\,e_{ij}(\ul{v})
:Arguments:
- ts : :class:`TimeStepper` instance
- material_0 : :math:`\Hcal_{ijkl}(0)`
- material_1 : :math:`\exp(-\lambda \Delta t)` (decay at :math:`t_1`)
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_elastic_eth'
arg_types = ('ts', 'material_0', 'material_1', 'virtual', 'state')
arg_shapes = {'material_0' : 'S, S', 'material_1' : '1, 1',
'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.dw_lin_elastic)
def get_fargs(self, ts, mat0, mat1, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _, key = self.get_mapping(state, return_key=True)
if diff_var is None:
strain = self.get(state, 'cauchy_strain')
key += tuple(self.arg_names[ii] for ii in [1, 2, 4])
data = self.get_eth_data(key, state, mat1, strain)
fargs = (ts.dt, data.history + data.values, mat0, vg, 0)
else:
aux = nm.array([0], ndmin=4, dtype=nm.float64)
fargs = (ts.dt, aux, mat0, vg, 1)
return fargs
class LinearPrestressTerm(Term):
r"""
Linear prestress term, with the prestress :math:`\sigma_{ij}` given in
the usual vector form exploiting symmetry: in 3D it has 6 components
with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in 2D it has
3 components with the indices ordered as :math:`[11, 22, 12]`. Can be
evaluated.
:Definition:
.. math::
\int_{\Omega} \sigma_{ij} e_{ij}(\ul{v})
:Arguments 1:
- material : :math:`\sigma_{ij}`
- virtual : :math:`\ul{v}`
:Arguments 2:
- material : :math:`\sigma_{ij}`
- parameter : :math:`\ul{u}`
"""
name = 'dw_lin_prestress'
arg_types = (('material', 'virtual'),
('material', 'parameter'))
arg_shapes = {'material' : 'S, 1', 'virtual' : ('D', None),
'parameter' : 'D'}
modes = ('weak', 'eval')
def get_fargs(self, mat, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
if mode == 'weak':
return mat, vg
else:
strain = self.get(virtual, 'cauchy_strain')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return strain, mat, vg, fmode
def get_eval_shape(self, mat, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(virtual)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, 1, 1), virtual.dtype
def d_lin_prestress(self, out, strain, mat, vg, fmode):
aux = dot_sequences(mat, strain, mode='ATB')
if fmode == 2:
out[:] = aux
status = 0
else:
status = vg.integrate(out, aux, fmode)
return status
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.dw_lin_prestress
else:
self.function = self.d_lin_prestress
class LinearStrainFiberTerm(Term):
r"""
Linear (pre)strain fiber term with the unit direction vector
:math:`\ul{d}`.
:Definition:
.. math::
\int_{\Omega} D_{ijkl} e_{ij}(\ul{v}) \left(d_k d_l\right)
:Arguments:
- material_1 : :math:`D_{ijkl}`
- material_2 : :math:`\ul{d}`
- virtual : :math:`\ul{v}`
"""
name = 'dw_lin_strain_fib'
arg_types = ('material_1', 'material_2', 'virtual')
arg_shapes = {'material_1' : 'S, S', 'material_2' : 'D, 1',
'virtual' : ('D', None)}
function = staticmethod(terms.dw_lin_strain_fib)
def get_fargs(self, mat1, mat2, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
omega = nm.empty(mat1.shape[:3] + (1,), dtype=nm.float64)
for ii, (ir, ic) in enumerate(iter_sym(mat2.shape[2])):
omega[..., ii, 0] = mat2[..., ir, 0] * mat2[..., ic, 0]
return mat1, omega, vg
class CauchyStrainTerm(Term):
r"""
Evaluate Cauchy strain tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22,
12]`. The last three (non-diagonal) components are doubled so that it is
energetically conjugate to the Cauchy stress tensor with the same storage.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \ull{e}(\ul{w})
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} \ull{e}(\ul{w}) /
\int_{T_K} 1
.. math::
\ull{e}(\ul{w})|_{qp}
:Arguments:
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_strain'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
@staticmethod
def function(out, strain, vg, fmode):
if fmode == 2:
out[:] = strain
status = 0
else:
status = terms.de_cauchy_strain(out, strain, vg, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
strain = self.get(parameter, 'cauchy_strain')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return strain, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim * (dim + 1) // 2, 1), parameter.dtype
class CauchyStrainSTerm(CauchyStrainTerm):
r"""
Evaluate Cauchy strain tensor on a surface region.
See :class:`CauchyStrainTerm`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Gamma} \ull{e}(\ul{w})
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} \ull{e}(\ul{w}) /
\int_{T_K} 1
.. math::
\ull{e}(\ul{w})|_{qp}
:Arguments:
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_strain_s'
arg_types = ('parameter',)
integration = 'surface_extra'
class CauchyStressTerm(Term):
r"""
Evaluate Cauchy stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} D_{ijkl} e_{kl}(\ul{w})
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} D_{ijkl} e_{kl}(\ul{w}) / \int_{T_K} 1
.. math::
D_{ijkl} e_{kl}(\ul{w})|_{qp}
:Arguments:
- material : :math:`D_{ijkl}`
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_stress'
arg_types = ('material', 'parameter')
arg_shapes = {'material' : 'S, S', 'parameter' : 'D'}
@staticmethod
def function(out, coef, strain, mat, vg, fmode):
if fmode == 2:
out[:] = dot_sequences(mat, strain)
status = 0
else:
status = terms.de_cauchy_stress(out, strain, mat, vg, fmode)
if coef is not None:
out *= coef
return status
def get_fargs(self, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
strain = self.get(parameter, 'cauchy_strain')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return None, strain, mat, vg, fmode
def get_eval_shape(self, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim * (dim + 1) // 2, 1), parameter.dtype
class CauchyStressTHTerm(CauchyStressTerm, THTerm):
r"""
Evaluate fading memory Cauchy stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau))
\difd{\tau}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} \int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau))
\difd{\tau} / \int_{T_K} 1
.. math::
\int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau)) \difd{\tau}|_{qp}
:Arguments:
- ts : :class:`TimeStepper` instance
- material : :math:`\Hcal_{ijkl}(\tau)`
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_stress_th'
arg_types = ('ts', 'material', 'parameter')
arg_shapes = {'material' : '.: N, S, S', 'parameter' : 'D'}
def get_fargs(self, ts, mats, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
def iter_kernel():
for ii, mat in enumerate(mats):
strain = self.get(state, 'cauchy_strain',
step=-ii)
mat = nm.tile(mat, (n_el, n_qp, 1, 1))
yield ii, (ts.dt, strain, mat, vg, fmode)
return iter_kernel
def get_eval_shape(self, ts, mats, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
out = CauchyStressTerm.get_eval_shape(self, mats, parameter, mode,
term_mode, diff_var, **kwargs)
return out
class CauchyStressETHTerm(CauchyStressTerm, ETHTerm):
r"""
Evaluate fading memory Cauchy stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Assumes an exponential approximation of the convolution kernel resulting in
much higher efficiency.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau))
\difd{\tau}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} \int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau))
\difd{\tau} / \int_{T_K} 1
.. math::
\int_0^t \Hcal_{ijkl}(t-\tau)\,e_{kl}(\ul{w}(\tau)) \difd{\tau}|_{qp}
:Arguments:
- ts : :class:`TimeStepper` instance
- material_0 : :math:`\Hcal_{ijkl}(0)`
- material_1 : :math:`\exp(-\lambda \Delta t)` (decay at :math:`t_1`)
- parameter : :math:`\ul{w}`
"""
name = 'ev_cauchy_stress_eth'
arg_types = ('ts', 'material_0', 'material_1', 'parameter')
arg_shapes = {'material_0' : 'S, S', 'material_1' : '1, 1',
'parameter' : 'D'}
def get_fargs(self, ts, mat0, mat1, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _, key = self.get_mapping(state, return_key=True)
strain = self.get(state, 'cauchy_strain')
key += tuple(self.arg_names[1:])
data = self.get_eth_data(key, state, mat1, strain)
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return ts.dt, data.history + data.values, mat0, vg, fmode
def get_eval_shape(self, ts, mat0, mat1, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
out = CauchyStressTerm.get_eval_shape(self, mat0, parameter, mode,
term_mode, diff_var, **kwargs)
return out
class NonsymElasticTerm(Term):
r"""
Elasticity term with non-symmetric gradient. The indices of matrix
:math:`D_{ijkl}` are ordered as
:math:`[11, 12, 13, 21, 22, 23, 31, 32, 33]` in 3D and as
:math:`[11, 12, 21, 22]` in 2D.
:Definition:
.. math::
\int_{\Omega} \ull{D} \nabla\ul{u} : \nabla\ul{v}
:Arguments 1:
- material : :math:`\ull{D}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`\ull{D}`
- parameter_1 : :math:`\ul{w}`
- parameter_2 : :math:`\ul{u}`
"""
name = 'dw_nonsym_elastic'
arg_types = (('material', 'virtual', 'state'),
('material', 'parameter_1', 'parameter_2'))
arg_shapes = {'material' : 'D2, D2', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'}
modes = ('weak', 'eval')
geometries = ['2_3', '2_4', '3_4', '3_8']
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0,1,3,2))
nel, nqp, nr, nc = grad.shape
grad = grad.reshape((nel,nqp,nr*nc,1))
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, mat, vg, fmode
elif mode == 'eval':
grad1 = self.get(virtual, 'grad').transpose((0,1,3,2))
grad2 = self.get(state, 'grad').transpose((0,1,3,2))
nel, nqp, nr, nc = grad1.shape
return 1.0,\
grad1.reshape((nel,nqp,nr*nc,1)),\
grad2.reshape((nel,nqp,nr*nc,1)),\
mat, vg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.dw_nonsym_elastic
else:
self.function = terms.d_lin_elastic
def _build_wave_strain_op(vec, bf):
dim = len(vec)
if dim == 2:
n0, n1 = vec
nmat = nm.array([[n0, 0],
[0, n1],
[n1, n0]], dtype=nm.float64)
else:
n0, n1, n2 = vec
nmat = nm.array([[n0, 0, 0],
[0, n1, 0],
[0, 0, n2],
[n1, n0, 0],
[n2, 0, n0],
[0, n2, n1]], dtype=nm.float64)
out = nm.einsum('ik,cqkj->cqij', nmat, bf)
return out
def _build_cauchy_strain_op(bfg):
dim = bfg.shape[2]
if dim == 2:
g1, g2 = bfg[..., 0:1, :], bfg[..., 1:2, :]
zz = nm.zeros_like(g1)
out = nm.block([[g1, zz],
[zz, g2],
[g2, g1]])
else:
g1, g2, g3 = bfg[..., 0:1, :], bfg[..., 1:2, :], bfg[..., 2:3, :]
zz = nm.zeros_like(g1)
out = nm.block([[g1, zz, zz],
[zz, g2, zz],
[zz, zz, g3],
[g2, g1, zz],
[g3, zz, g1],
[zz, g3, g2]])
return out
class ElasticWaveTerm(Term):
r"""
Elastic dispersion term involving the wave strain :math:`g_{ij}`,
:math:`g_{ij}(\ul{u}) = \frac{1}{2}(u_i \kappa_j + \kappa_i u_j)`, with the
wave vector :math:`\ul{\kappa}`. :math:`D_{ijkl}` is given in the usual
matrix form exploiting symmetry: in 3D it is :math:`6\times6` with the
indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in 2D it is
:math:`3\times3` with the indices ordered as :math:`[11, 22, 12]`.
:Definition:
.. math::
\int_{\Omega} D_{ijkl}\ g_{ij}(\ul{v}) g_{kl}(\ul{u})
:Arguments:
- material_1 : :math:`D_{ijkl}`
- material_2 : :math:`\ul{\kappa}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_elastic_wave'
arg_types = ('material_1', 'material_2', 'virtual', 'state')
arg_shapes = {'material_1' : 'S, S', 'material_2' : '.: D',
'virtual' : ('D', 'state'), 'state' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
@staticmethod
def function(out, out_qp, geo, fmode):
status = geo.integrate(out, out_qp)
return status
def get_fargs(self, mat, kappa, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
from sfepy.discrete.variables import create_adof_conn
geo, _ = self.get_mapping(state)
n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(virtual)
# Expand basis for all components.
bf = geo.bf
ebf = nm.zeros(bf.shape[:2] + (dim, n_fn * dim), dtype=nm.float64)
for ir in range(dim):
ebf[..., ir, ir*n_fn:(ir+1)*n_fn] = bf[..., 0, :]
gmat = _build_wave_strain_op(kappa, ebf)
if diff_var is None:
econn = state.field.get_econn('volume', self.region)
adc = create_adof_conn(nm.arange(state.n_dof, dtype=nm.int32),
econn, n_c, 0)
vals = state()[adc]
# Same as nm.einsum('qij,cj->cqi', gmat[0], vals)[..., None]
aux = dot_sequences(gmat, vals[:, None, :, None])
out_qp = dot_sequences(gmat, dot_sequences(mat, aux), 'ATB')
fmode = 0
else:
out_qp = dot_sequences(gmat, dot_sequences(mat, gmat), 'ATB')
fmode = 1
return out_qp, geo, fmode
class ElasticWaveCauchyTerm(Term):
r"""
Elastic dispersion term involving the wave strain :math:`g_{ij}`,
:math:`g_{ij}(\ul{u}) = \frac{1}{2}(u_i \kappa_j + \kappa_i u_j)`, with the
wave vector :math:`\ul{\kappa}` and the elastic strain :math:`e_{ij}`.
:math:`D_{ijkl}` is given in the usual matrix form exploiting symmetry: in
3D it is :math:`6\times6` with the indices ordered as :math:`[11, 22, 33,
12, 13, 23]`, in 2D it is :math:`3\times3` with the indices ordered as
:math:`[11, 22, 12]`.
:Definition:
.. math::
\int_{\Omega} D_{ijkl}\ g_{ij}(\ul{v}) e_{kl}(\ul{u}) \;,
\int_{\Omega} D_{ijkl}\ g_{ij}(\ul{u}) e_{kl}(\ul{v})
:Arguments 1:
- material_1 : :math:`D_{ijkl}`
- material_2 : :math:`\ul{\kappa}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material_1 : :math:`D_{ijkl}`
- material_2 : :math:`\ul{\kappa}`
- state : :math:`\ul{u}`
- virtual : :math:`\ul{v}`
"""
name = 'dw_elastic_wave_cauchy'
arg_types = (('material_1', 'material_2', 'virtual', 'state'),
('material_1', 'material_2', 'state', 'virtual'))
arg_shapes = {'material_1' : 'S, S', 'material_2' : '.: D',
'virtual' : ('D', 'state'), 'state' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
modes = ('ge', 'eg')
@staticmethod
def function(out, out_qp, geo, fmode):
status = geo.integrate(out, out_qp)
return status
def get_fargs(self, mat, kappa, gvar, evar,
mode=None, term_mode=None, diff_var=None, **kwargs):
from sfepy.discrete.variables import create_adof_conn
geo, _ = self.get_mapping(evar)
n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(gvar)
# Expand basis for all components.
bf = geo.bf
ebf = nm.zeros(bf.shape[:2] + (dim, n_fn * dim), dtype=nm.float64)
for ir in range(dim):
ebf[..., ir, ir*n_fn:(ir+1)*n_fn] = bf[..., 0, :]
gmat = _build_wave_strain_op(kappa, ebf)
emat = _build_cauchy_strain_op(geo.bfg)
if diff_var is None:
avar = evar if self.mode == 'ge' else gvar
econn = avar.field.get_econn('volume', self.region)
adc = create_adof_conn(nm.arange(avar.n_dof, dtype=nm.int32),
econn, n_c, 0)
vals = avar()[adc]
if self.mode == 'ge':
# Same as aux = self.get(avar, 'cauchy_strain'),
aux = dot_sequences(emat, vals[:, None, :, None])
out_qp = dot_sequences(gmat, dot_sequences(mat, aux), 'ATB')
else:
aux = dot_sequences(gmat, vals[:, None, :, None])
out_qp = dot_sequences(emat, dot_sequences(mat, aux), 'ATB')
fmode = 0
else:
if self.mode == 'ge':
out_qp = dot_sequences(gmat, dot_sequences(mat, emat), 'ATB')
else:
out_qp = dot_sequences(emat, dot_sequences(mat, gmat), 'ATB')
fmode = 1
return out_qp, geo, fmode
| |
"""Routing utilities."""
from collections.abc import Mapping
from contextlib import contextmanager
import importlib
from aiohttp import web
__all__ = (
'ResourceRouter',
'add_route_context',
'add_resource_context',
)
class ResourceRouter(web.UrlDispatcher):
"""Router with an :meth:`add_resource` method for registering method-based handlers,
a.k.a "resources". Includes all the methods `aiohttp.web.UrlDispatcher` with the addition
of `add_resource`.
Example:
.. code-block:: python
from aiohttp import web
from aiohttp_utils.routing import ResourceRouter
app = web.Application(router=ResourceRouter())
class IndexResource:
async def get(self, request):
return web.Response(body=b'Got it', content_type='text/plain')
async def post(self, request):
return web.Response(body=b'Posted it', content_type='text/plain')
app.router.add_resource_object('/', IndexResource())
# Normal function-based handlers still work
async def handler(request):
return web.Response()
app.router.add_route('GET', '/simple', handler)
By default, handler names will be registered with the name ``<ClassName>:<method>``. ::
app.router['IndexResource:post'].url() == '/'
You can override the default names by passing a ``names`` dict to `add_resource`. ::
app.router.add_resource_object('/', IndexResource(), names={'get': 'index_get'})
app.router['index_get'].url() == '/'
"""
HTTP_METHOD_NAMES = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def get_default_handler_name(self, resource, method_name: str):
return '{resource.__class__.__name__}:{method_name}'.format(**locals())
def add_resource_object(self, path: str, resource,
methods: tuple = tuple(), names: Mapping = None):
"""Add routes by an resource instance's methods.
:param path: route path. Should be started with slash (``'/'``).
:param resource: A "resource" instance. May be an instance of a plain object.
:param methods: Methods (strings) to register.
:param names: Dictionary of ``name`` overrides.
"""
names = names or {}
if methods:
method_names = methods
else:
method_names = self.HTTP_METHOD_NAMES
for method_name in method_names:
handler = getattr(resource, method_name, None)
if handler:
name = names.get(method_name, self.get_default_handler_name(resource, method_name))
self.add_route(method_name.upper(), path, handler, name=name)
def make_path(path, url_prefix=None):
return ('/'.join((url_prefix.rstrip('/'), path.lstrip('/')))
if url_prefix
else path)
@contextmanager
def add_route_context(
app: web.Application, module=None,
url_prefix: str = None, name_prefix: str = None):
"""Context manager which yields a function for adding multiple routes from a given module.
Example:
.. code-block:: python
# myapp/articles/views.py
async def list_articles(request):
return web.Response(b'article list...')
async def create_article(request):
return web.Response(b'created article...')
.. code-block:: python
# myapp/app.py
from myapp.articles import views
with add_route_context(app, url_prefix='/api/', name_prefix='articles') as route:
route('GET', '/articles/', views.list_articles)
route('POST', '/articles/', views.create_article)
app.router['articles.list_articles'].url() # /api/articles/
If you prefer, you can also pass module and handler names as strings.
.. code-block:: python
with add_route_context(app, module='myapp.articles.views',
url_prefix='/api/', name_prefix='articles') as route:
route('GET', '/articles/', 'list_articles')
route('POST', '/articles/', 'create_article')
:param app: Application to add routes to.
:param module: Import path to module (str) or module object which contains the handlers.
:param url_prefix: Prefix to prepend to all route paths.
:param name_prefix: Prefix to prepend to all route names.
"""
if isinstance(module, (str, bytes)):
module = importlib.import_module(module)
def add_route(method, path, handler, name=None):
"""
:param str method: HTTP method.
:param str path: Path for the route.
:param handler: A handler function or a name of a handler function contained
in `module`.
:param str name: Name for the route. If `None`, defaults to the handler's
function name.
"""
if isinstance(handler, (str, bytes)):
if not module:
raise ValueError(
'Must pass module to add_route_context if passing handler name strings.'
)
name = name or handler
handler = getattr(module, handler)
else:
name = name or handler.__name__
path = make_path(path, url_prefix)
name = '.'.join((name_prefix, name)) if name_prefix else name
return app.router.add_route(method, path, handler, name=name)
yield add_route
def get_supported_method_names(resource):
return [
method_name for method_name in ResourceRouter.HTTP_METHOD_NAMES
if hasattr(resource, method_name)
]
@contextmanager
def add_resource_context(
app: web.Application, module=None,
url_prefix: str = None, name_prefix: str = None,
make_resource=lambda cls: cls()):
"""Context manager which yields a function for adding multiple resources from a given module
to an app using `ResourceRouter <aiohttp_utils.routing.ResourceRouter>`.
Example:
.. code-block:: python
# myapp/articles/views.py
class ArticleList:
async def get(self, request):
return web.Response(b'article list...')
class ArticleDetail:
async def get(self, request):
return web.Response(b'article detail...')
.. code-block:: python
# myapp/app.py
from myapp.articles import views
with add_resource_context(app, url_prefix='/api/') as route:
route('/articles/', views.ArticleList())
route('/articles/{pk}', views.ArticleDetail())
app.router['ArticleList:get'].url() # /api/articles/
app.router['ArticleDetail:get'].url(pk='42') # /api/articles/42
If you prefer, you can also pass module and class names as strings. ::
with add_resource_context(app, module='myapp.articles.views',
url_prefix='/api/') as route:
route('/articles/', 'ArticleList')
route('/articles/{pk}', 'ArticleDetail')
.. note::
If passing class names, the resource classes will be instantiated with no
arguments. You can change this behavior by overriding ``make_resource``.
.. code-block:: python
# myapp/authors/views.py
class AuthorList:
def __init__(self, db):
self.db = db
async def get(self, request):
# Fetch authors from self.db...
.. code-block:: python
# myapp/app.py
from myapp.database import db
with add_resource_context(app, module='myapp.authors.views',
url_prefix='/api/',
make_resource=lambda cls: cls(db=db)) as route:
route('/authors/', 'AuthorList')
:param app: Application to add routes to.
:param resource: Import path to module (str) or module object
which contains the resource classes.
:param url_prefix: Prefix to prepend to all route paths.
:param name_prefix: Prefix to prepend to all route names.
:param make_resource: Function which receives a resource class and returns
a resource instance.
"""
assert isinstance(app.router, ResourceRouter), 'app must be using ResourceRouter'
if isinstance(module, (str, bytes)):
module = importlib.import_module(module)
def get_base_name(resource, method_name, names):
return names.get(method_name,
app.router.get_default_handler_name(resource, method_name))
default_make_resource = make_resource
def add_route(path: str, resource,
names: Mapping = None, make_resource=None):
make_resource = make_resource or default_make_resource
names = names or {}
if isinstance(resource, (str, bytes)):
if not module:
raise ValueError(
'Must pass module to add_route_context if passing resource name strings.'
)
resource_cls = getattr(module, resource)
resource = make_resource(resource_cls)
path = make_path(path, url_prefix)
if name_prefix:
supported_method_names = get_supported_method_names(resource)
names = {
method_name: '.'.join(
(name_prefix, get_base_name(resource, method_name, names=names))
)
for method_name in supported_method_names
}
return app.router.add_resource_object(path, resource, names=names)
yield add_route
| |
import unittest
import os
import socket
import tempfile
from g1.asyncs.bases import adapters
from g1.asyncs.kernels import contexts
from g1.asyncs.kernels import errors
from g1.asyncs.kernels import kernels
try:
from g1.threads import futures
except ImportError:
futures = None
class FileAdapterTest(unittest.TestCase):
def setUp(self):
self.k = kernels.Kernel()
self.token = contexts.set_kernel(self.k)
r, w = os.pipe()
self.r = adapters.FileAdapter(os.fdopen(r, 'rb'))
self.w = adapters.FileAdapter(os.fdopen(w, 'wb'))
def tearDown(self):
self.r.close()
self.w.close()
contexts.KERNEL.reset(self.token)
self.k.close()
def assert_stats(self, **expect):
actual = self.k.get_stats()._asdict()
# Default ``expect`` entries to 0.
for name in actual:
if name not in expect:
expect[name] = 0
self.assertEqual(actual, expect)
def test_detach(self):
self.r.detach().close()
self.w.detach().close()
with self.assertRaisesRegex(
ValueError, r'raw stream has been detached'
):
self.r.closed # pylint: disable=pointless-statement
with self.assertRaisesRegex(
ValueError, r'raw stream has been detached'
):
self.w.closed # pylint: disable=pointless-statement
# These should not raise.
self.r.close()
self.w.close()
def test_disown(self):
self.r.disown().close()
self.w.disown().close()
self.assertIsNone(self.r.target)
self.assertIsNone(self.r._FileAdapter__file)
self.assertIsNone(self.w.target)
self.assertIsNone(self.w._FileAdapter__file)
# These should not raise.
self.r.close()
self.w.close()
def test_pipe(self):
num_chunks = 10
# The 65537 of chunk size seems to be a magic number that is
# larger than pipe's internal buffer size.
chunk_size = 65536 + 1
async def write():
num_written = 0
for i in range(num_chunks):
chunk = (b'%x' % (i + 1)) * chunk_size
while chunk:
num_bytes = await self.w.write(chunk)
self.assertGreater(num_bytes, 0)
chunk = chunk[num_bytes:]
num_written += num_bytes
await self.w.flush()
self.w.close()
self.w.close() # Safe to close repeatedly.
return num_written
async def read():
pieces = []
while True:
piece = await self.r.read()
if not piece:
break
pieces.append(piece)
self.r.close()
self.r.close() # Safe to close repeatedly.
return b''.join(pieces)
reader_task = self.k.spawn(read)
writer_task = self.k.spawn(write)
self.assertFalse(reader_task.is_completed())
self.assertFalse(writer_task.is_completed())
self.k.run(timeout=1)
self.assertTrue(reader_task.is_completed())
self.assertTrue(writer_task.is_completed())
expect_data = b''.join((b'%x' % (i + 1)) * chunk_size
for i in range(num_chunks))
self.assertEqual(reader_task.get_result_nonblocking(), expect_data)
self.assertEqual(
writer_task.get_result_nonblocking(), len(expect_data)
)
def test_close_read_pipe(self):
# This task is blocked before close.
reader_task = self.k.spawn(self.r.read)
with self.assertRaises(errors.KernelTimeout):
self.k.run(timeout=0)
self.assert_stats(num_ticks=1, num_tasks=1, num_poll=1)
self.r.close()
self.k.run(timeout=1)
self.assertTrue(reader_task.is_completed())
with self.assertRaisesRegex(ValueError, r'read of closed file'):
reader_task.get_result_nonblocking()
# This task accesses the file after close.
reader_task = self.k.spawn(self.r.read)
self.k.run(timeout=1)
with self.assertRaisesRegex(ValueError, r'read of closed file'):
reader_task.get_result_nonblocking()
def test_close_write_pipe(self):
async def writer_blocked():
chunk = b'\x00' * 65537
while True:
await self.w.write(chunk)
# This task is blocked before close.
writer_task = self.k.spawn(writer_blocked)
with self.assertRaises(errors.KernelTimeout):
self.k.run(timeout=0)
self.assert_stats(num_ticks=1, num_tasks=1, num_poll=1)
self.w.close()
self.k.run(timeout=1)
self.assertTrue(writer_task.is_completed())
with self.assertRaisesRegex(ValueError, r'write to closed file'):
writer_task.get_result_nonblocking()
# This task accesses the file after close.
writer_task = self.k.spawn(writer_blocked)
self.k.run(timeout=1)
self.assertTrue(writer_task.is_completed())
with self.assertRaisesRegex(ValueError, r'write to closed file'):
writer_task.get_result_nonblocking()
def test_close_blocked(self):
num_bytes = 65537
buffer = bytes(num_bytes)
# Let's check that ``close`` raises.
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
rr = os.fdopen(r, 'rb')
ww = os.fdopen(w, 'wb')
ww.write(buffer)
with self.assertRaises(BlockingIOError):
ww.close()
rr.close()
# But adapter's ``close`` won't raise.
async def writer():
await self.w.write(buffer)
self.w.close()
task = self.k.spawn(writer)
with self.assertLogs(adapters.__name__) as cm:
self.k.run(timeout=1)
self.assertRegex(
cm.output[0],
r'close error',
)
self.assertRegex(
cm.output[-1],
(
r'BlockingIOError: '
r'\[Errno 11\] write could not complete without blocking'
),
)
self.assertTrue(task.is_completed())
self.assertIsNone(task.get_exception_nonblocking())
# Not all of the data have been flushed out.
self.assertLess(len(self.r.target.read()), num_bytes)
class SocketAdapterTest(unittest.TestCase):
def setUp(self):
self.k = kernels.Kernel()
self.token = contexts.set_kernel(self.k)
s0, s1 = socket.socketpair()
self.s0 = adapters.SocketAdapter(s0)
self.s1 = adapters.SocketAdapter(s1)
def tearDown(self):
self.s0.close()
self.s1.close()
contexts.KERNEL.reset(self.token)
self.k.close()
def test_detach(self):
os.close(self.s0.detach())
os.close(self.s1.detach())
self.assertLess(self.s0.fileno(), 0)
self.assertLess(self.s1.fileno(), 0)
# These should not raise.
self.s0.close()
self.s1.close()
def test_disown(self):
self.s0.disown().close()
self.s1.disown().close()
self.assertIsNone(self.s0.target)
self.assertIsNone(self.s0._SocketAdapter__sock)
self.assertIsNone(self.s1.target)
self.assertIsNone(self.s1._SocketAdapter__sock)
# These should not raise.
self.s0.close()
self.s1.close()
async def recv(self):
pieces = []
while True:
piece = await self.s1.recv(4096)
if not piece:
break
pieces.append(piece)
self.s1.close()
return b''.join(pieces)
async def recv_into(self):
pieces = []
buffer = bytearray(4096)
view = memoryview(buffer)
while True:
num_recv = await self.s1.recv_into(buffer)
if num_recv == 0:
break
self.assertGreater(num_recv, 0)
pieces.append(bytes(view[:num_recv]))
self.s1.close()
return b''.join(pieces)
async def call_read(self, read_func):
pieces = []
buffer = bytearray(4096)
view = memoryview(buffer)
fp = adapters.FileAdapter(self.s1.target.makefile('rb'))
try:
while True:
num_recv = await read_func(fp, buffer)
if num_recv == 0:
break
self.assertGreater(num_recv, 0)
pieces.append(bytes(view[:num_recv]))
self.s1.close()
return b''.join(pieces)
finally:
fp.disown()
async def send(self, num_chunks, chunk_size):
num_sent = 0
for i in range(num_chunks):
chunk = (b'%x' % (i + 1)) * chunk_size
while chunk:
num_bytes = await self.s0.send(chunk)
self.assertGreater(num_bytes, 0)
chunk = chunk[num_bytes:]
num_sent += num_bytes
self.s0.close()
return num_sent
async def sendmsg(self, num_chunks, chunk_size):
chunks = [(b'%x' % (i + 1)) * chunk_size for i in range(num_chunks)]
num_sent = 0
while chunks:
num_bytes = await self.s0.sendmsg(chunks)
self.assertGreater(num_bytes, 0)
num_sent += num_bytes
while chunks:
if len(chunks[0]) <= num_bytes:
num_bytes -= len(chunks.pop(0))
else:
chunks[0] = chunks[0][num_bytes:]
break
self.s0.close()
return num_sent
async def sendfile(self, num_chunks, chunk_size):
with tempfile.NamedTemporaryFile() as tmp:
with open(tmp.name, 'wb') as tmp_file:
for i in range(num_chunks):
tmp_file.write((b'%x' % (i + 1)) * chunk_size)
with open(tmp.name, 'rb') as tmp_file:
num_sent = await self.s0.sendfile(tmp_file)
self.s0.close()
return num_sent
def test_socket_recv_into(self):
self.do_test_socket(self.send, self.recv_into)
def test_socket_read(self):
async def read_func(fp, buffer):
data = await fp.read(len(buffer))
if data:
buffer[:len(data)] = data
return len(data)
self.do_test_socket(self.send, lambda: self.call_read(read_func))
def test_socket_readinto(self):
async def read_func(fp, buffer):
return await fp.readinto(buffer)
self.do_test_socket(self.send, lambda: self.call_read(read_func))
def test_socket_readinto1(self):
async def read_func(fp, buffer):
return await fp.readinto1(buffer)
self.do_test_socket(self.send, lambda: self.call_read(read_func))
def test_socket_send(self):
self.do_test_socket(self.send, self.recv)
def test_socket_sendmsg(self):
self.do_test_socket(self.sendmsg, self.recv)
def test_socket_sendfile(self):
self.do_test_socket(self.sendfile, self.recv)
def do_test_socket(self, send_corofunc, recv_corofunc):
num_chunks = 10
chunk_size = 65536
send_task = self.k.spawn(send_corofunc(num_chunks, chunk_size))
recv_task = self.k.spawn(recv_corofunc())
self.assertFalse(send_task.is_completed())
self.assertFalse(recv_task.is_completed())
self.k.run(timeout=1)
self.assertTrue(send_task.is_completed())
self.assertTrue(recv_task.is_completed())
expect_data = b''.join((b'%x' % (i + 1)) * chunk_size
for i in range(num_chunks))
self.assertEqual(recv_task.get_result_nonblocking(), expect_data)
self.assertEqual(send_task.get_result_nonblocking(), len(expect_data))
def test_close(self):
# Access before socket is closed.
task0 = self.k.spawn(self.s0.recv(1024))
with self.assertRaises(errors.KernelTimeout):
self.k.run(timeout=0)
self.assertEqual(self.k.get_stats().num_poll, 1)
self.s0.close()
# Access after socket is closed.
task1 = self.k.spawn(self.s0.recv(1024))
task2 = self.k.spawn(self.s0.send(b'\x00'))
self.k.run(timeout=1)
self.assertTrue(task0.is_completed())
self.assertTrue(task1.is_completed())
self.assertTrue(task2.is_completed())
with self.assertRaisesRegex(OSError, r'Bad file descriptor'):
task0.get_result_nonblocking()
with self.assertRaisesRegex(OSError, r'Bad file descriptor'):
task1.get_result_nonblocking()
with self.assertRaisesRegex(OSError, r'Bad file descriptor'):
task2.get_result_nonblocking()
@unittest.skipIf(futures is None, 'g1.threads.futures unavailable')
class FutureAdapterTest(unittest.TestCase):
def setUp(self):
self.k = kernels.Kernel()
self.token = contexts.set_kernel(self.k)
def tearDown(self):
contexts.KERNEL.reset(self.token)
self.k.close()
def test_future(self):
f = adapters.FutureAdapter(futures.Future())
task = self.k.spawn(f.get_result())
self.assertFalse(task.is_completed())
with self.assertRaises(errors.KernelTimeout):
self.k.run(timeout=0)
self.assertFalse(task.is_completed())
f.set_result(42)
self.assertFalse(task.is_completed())
self.k.run()
self.assertTrue(task.is_completed())
self.assertEqual(task.get_result_nonblocking(), 42)
def test_completed_future(self):
f = adapters.FutureAdapter(futures.Future())
f.set_result(42)
task = self.k.spawn(f.get_result())
self.assertFalse(task.is_completed())
self.k.run()
self.assertTrue(task.is_completed())
self.assertEqual(task.get_result_nonblocking(), 42)
def test_system_exit(self):
f = adapters.FutureAdapter(futures.Future())
f.set_exception(SystemExit())
task = self.k.spawn(f.get_result())
self.assertFalse(task.is_completed())
self.k.run()
self.assertTrue(task.is_completed())
with self.assertRaises(SystemExit):
task.get_result_nonblocking()
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/python
# -*- coding: utf8 -*-
from bs4 import BeautifulSoup as Soup
import urls
import re
import proxy
from datetime import *
import time
from time import mktime
import functions
from pytz import timezone
import authenticate
import context_card
def student_surveys ( config, session = False ):
url = "https://www.lectio.dk/lectio/%s/spoergeskema_rapport.aspx?type=mine&elevid=%s" % ( str(config["school_id"]), str(config["student_id"]) )
if session is False:
session = authenticate.authenticate(config)
if session == False:
return {"status" : "error", "type" : "authenticate"}
# Insert the session information from the auth function
cookies = {
"lecmobile" : "0",
"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
"LastLoginUserName" : session["LastLoginUserName"],
"lectiogsc" : session["lectiogsc"],
"LectioTicket" : session["LectioTicket"]
}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("div", attrs={"id" : "s_m_Content_Content_answer_island_pa"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
surveys = []
ids = []
openForAnsweringProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/spoergeskema_besvar.aspx\?id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)")
ownProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/spoergeskema_rediger.aspx\?id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)")
openForReportingProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/spoergeskema\/spoergeskemarapportering.aspx\?id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)")
dateTimeProg = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")
if soup.find(attrs={"id" : "s_m_Content_Content_answer_island_pa"}).find("table").find(attrs={"class" : "noRecord"}) is None:
for row in soup.find(attrs={"id" : "s_m_Content_Content_answer_island_pa"}).findAll("tr")[1:]:
elements = row.findAll("td")
if not elements[3].find("span") is None:
dateGroups = dateTimeProg.match(elements[3].find("span").text.strip())
else:
dateGroups = dateTimeProg.match(elements[3].text)
date = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
idGroups = openForAnsweringProg.match(elements[0].find("a")["href"])
id = idGroups.group("survey_id") if not idGroups is None else ""
ids.append(id)
surveys.append({
"types" : ["open_for_answering"],
"survey_id" : id,
"anonymous" : True if elements[2].text == "Ja" else False,
"answer_date" : date,
"title" : elements[0].text.strip().replace("\r", "").replace("\n", "").replace("\t", "").encode("utf8")
})
if soup.find(attrs={"id" : "s_m_Content_Content_report_island_pa"}).find(attrs={"class" : "noRecord"}) is None:
for row in soup.find(attrs={"id" : "s_m_Content_Content_report_island_pa"}).findAll("tr")[1:]:
elements = row.findAll("td")
if not elements[2].find("span") is None:
dateGroups = dateTimeProg.match(elements[2].find("span").text.strip())
else:
dateGroups = dateTimeProg.match(elements[2].text)
answerDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
dateGroups = dateTimeProg.match(elements[3].text)
reportDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
dateGroups = dateTimeProg.match(elements[4].text)
endDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
idGroups = openForReportingProg.match(elements[0].find("a")["href"])
id = idGroups.group("survey_id") if not idGroups is None else ""
ids.append(id)
if id in ids:
for x in surveys:
if x["survey_id"] == id:
x["answer_date"] = answerDate
x["report_date"] = reportDate
x["end_date"] = endDate
x["types"].append("open_for_reporting")
else:
surveys.append({
"types" : "open_for_reporting",
"survey_id" : id,
"answer_date" : answerDate,
"report_date" : reportDate,
"end_date" : endDate,
"title" : elements[0].text.strip().replace("\r", "").replace("\n", "").replace("\t", "").encode("utf8")
})
if soup.find(attrs={"id" : "s_m_Content_Content_own_island_pa"}).find(attrs={"class" : "noRecord"}) is None:
for row in soup.find(attrs={"id" : "s_m_Content_Content_own_island_pa"}).findAll("tr")[1:]:
elements = row.findAll("td")
if not elements[1].find("span") is None:
dateGroups = dateTimeProg.match(elements[1].find("span").text.strip())
else:
dateGroups = dateTimeProg.match(elements[1].text)
answerDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
dateGroups = dateTimeProg.match(elements[2].text)
reportDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
dateGroups = dateTimeProg.match(elements[3].text)
endDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
idGroups = ownProg.match(elements[0].find("a")["href"])
id = idGroups.group("survey_id") if not idGroups is None else ""
if id in ids:
for x in surveys:
if x["survey_id"] == id:
x["owner_id"] = str(config["student_id"])
x["answer_date"] = answerDate
x["report_date"] = reportDate
x["end_date"] = endDate
else:
ids.append(id)
surveys.append({
"types" : ["closed"],
"survey_id" : id,
"answer_date" : answerDate,
"report_date" : reportDate,
"end_date" : endDate,
"title" : elements[0].text.strip().replace("\r", "").replace("\n", "").replace("\t", "").encode("utf8")
})
return {
"status" : "ok",
"surveys" : surveys
}
def survey_answer_page ( config, session = False ):
url = "https://www.lectio.dk/lectio/%s/spoergeskema_besvar.aspx?id=%s" % ( str(config["school_id"]), str(config["survey_id"]) )
if session is False:
session = authenticate.authenticate(config)
if session == False:
return {"status" : "error", "type" : "authenticate"}
# Insert the session information from the auth function
cookies = {
"lecmobile" : "0",
"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
"LastLoginUserName" : session["LastLoginUserName"],
"lectiogsc" : session["lectiogsc"],
"LectioTicket" : session["LectioTicket"]
}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("table", attrs={"id" : "m_Content_InfoTable"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
elements = soup.find("table", attrs={"id" : "m_Content_InfoTable"}).findAll("td")
owner = context_card.user({
"context_card_id" : elements[1].find("span")["lectiocontextcard"],
"school_id" : str(config["school_id"])
}, session)["user"]
ownerUser = {
"context_cards" : [elements[1].find("span")["lectiocontextcard"], owner["context_card_id"]],
"picture_id" : owner["picture_id"],
"name" : owner["name"],
"type" : owner["type"]
}
if owner["type"] == "student":
ownerUser["student_id"] = owner["student_id"]
else:
ownerUser["teacher_id"] = owner["teacher_id"]
information = {
"title" : elements[0].text.encode("utf8"),
"owner" : ownerUser,
"anonymous" : True if elements[2].text == "Ja" else False,
"teachers" : elements[3].text.split(", "),
"teams" : elements[4].text.split(", ")
}
sections = []
section_number = None
section_title = None
section_elements = []
section_description = None
titleProg = re.compile(r"(?P<number>[\d]*) (?P<title>.*)")
subTitleProg = re.compile(r"(?P<number>[\d\.\d\S]*) (?P<title>.*)")
for row in soup.find(attrs={"id" : "m_Content_questionIsland2_pa"}).findAll("table"):
if row.find("h3") is None:
if not row.find(attrs={"type" : "RADIO"}) is None:
type = "radio"
elif not row.find(attrs={"type" : "CHECKBOX"}) is None:
type = "checkbox"
else:
type = "text"
lines = row.find("h4").text.replace("\t", "").replace("\r", "").strip().split("\n")
titleGroups = subTitleProg.match(str(lines[0]) + " " + str(lines[1]))
options = []
section_id = None
if type == "text":
section_id = row.find("textarea")["name"].replace("answer_", "")
options.append({
"type" : "text",
"name" : row.find("textarea")["name"]
})
else:
for element in row.findAll("div"):
section_id = element.find("input")["name"].replace("answer_", "")
options.append({
"title" : element.find("label").text.encode("utf8"),
"value" : element.find("input")["value"],
"name" : element.find("input")["name"],
"type" : type
})
section_elements.append({
"type" : type,
"title" : titleGroups.group("title") if not titleGroups is None else "",
"description" : row.find(attrs={"class" : "discreteCell"}).text.replace("\r", "").replace("\n", "").replace("\t", "").strip(),
"number" : titleGroups.group("number") if not titleGroups is None else "",
"options" : options,
"section_id" : section_id
})
else:
if not section_number is None:
sections.append({
"number" : section_number,
"title" : section_title,
"elements" : section_elements,
"description" : section_description
})
section_number = None
section_title = None
section_elements = []
section_description = None
lines = row.find("h3").text.replace("\t", "").replace("\r", "").strip().split("\n")
titleGroups = titleProg.match(str(lines[0]) + " " + str(lines[1]))
section_number = titleGroups.group("number") if not titleGroups is None else None
section_title = titleGroups.group("title") if not titleGroups is None else None
section_description = row.find(attrs={"class" : "discreteCell"}).text.replace("\r\n", "").replace("\t", "").strip()
if section_number == None:
section_number = 1
section_title = ""
section_description = ""
sections.append({
"number" : section_number,
"title" : section_title,
"elements" : section_elements,
"description" : section_description
})
return {
"status" : "ok",
"information" : information,
"sections" : sections
}
def survey_report ( config, session = False ):
url = "https://www.lectio.dk/lectio/%s/spoergeskema/spoergeskemarapportering.aspx?id=%s" % ( str(config["school_id"]), str(config["survey_id"]) )
if session is False:
session = authenticate.authenticate(config)
if session == False:
return {"status" : "error", "type" : "authenticate"}
# Insert the session information from the auth function
cookies = {
"lecmobile" : "0",
"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
"LastLoginUserName" : session["LastLoginUserName"],
"lectiogsc" : session["lectiogsc"],
"LectioTicket" : session["LectioTicket"]
}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("div", attrs={"id" : "m_Content_sdasd_pa"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
dateTimeProg = re.compile(r"(?P<day>.*)/(?P<month>.*)-(?P<year>.*) (?P<hour>.*):(?P<minute>.*)")
informationTables = soup.find("div", attrs={"id" : "m_Content_sdasd_pa"}).findAll("table")
infoElements = informationTables[0].findAll("td")
dateGroups = dateTimeProg.match(infoElements[2].text)
answerDate = datetime.strptime("%s/%s-%s %s:%s" % (functions.zeroPadding(dateGroups.group("day")), functions.zeroPadding(dateGroups.group("month")), dateGroups.group("year"), dateGroups.group("hour"), dateGroups.group("minute")), "%d/%m-%Y %H:%M") if not dateGroups is None else ""
owner = context_card.user({
"context_card_id" : infoElements[1].find("span")["lectiocontextcard"],
"school_id" : str(config["school_id"])
}, session)["user"]
ownerUser = {
"context_cards" : [infoElements[1].find("span")["lectiocontextcard"], owner["context_card_id"]],
"picture_id" : owner["picture_id"],
"name" : owner["name"],
"type" : owner["type"]
}
if owner["type"] == "student":
ownerUser["student_id"] = owner["student_id"]
else:
ownerUser["teacher_id"] = owner["teacher_id"]
information = {
"title" : infoElements[0].text.encode("utf8"),
"answer_date" : answerDate,
"owner" : ownerUser
}
statElements = informationTables[1].findAll("td")
stats = {
"teachers" : {
"registred" : statElements[1].text,
"submitted" : statElements[2].text,
"submitted_with_unsubscribed" : statElements[3].text,
"not_submitted" : statElements[4].text
},
"students" : {
"registred" : statElements[5].text,
"submitted" : statElements[6].text,
"submitted_with_unsubscribed" : statElements[7].text,
"not_submitted" : statElements[8].text
},
"total" : {
"registred" : statElements[9].text,
"submitted" : statElements[10].text,
"submitted_with_unsubscribed" : statElements[11].text,
"not_submitted" : statElements[12].text
}
}
sections = []
section_number = None
section_title = None
section_elements = []
section_description = None
current_question_title = None
current_question_number = None
current_question_description = None
titleProg = re.compile(r"(?P<number>[\d\.\d\S]*) (?P<title>.*)")
type = "text"
answerStats = []
unanswered = 0
unansweredPercent = 0
for row in soup.find(attrs={"id" : "m_Content_ctl00_pa"}).find("table").findAll("tr", recursive=False):
elements = row.findAll("td")
text = elements[0].text.strip().replace("\r", "").replace("\t", "")
if len(text) > 0:
if not elements[0].find("h3") is None:
titleGroups = titleProg.match(elements[0].find("h3").text)
if not "." in titleGroups.group("number"):
if not section_number is None:
sections.append({
"number" : section_number,
"title" : section_title,
"elements" : section_elements,
"description" : section_description
})
section_number = None
section_title = None
section_elements = []
section_description = None
section_number = titleGroups.group("number") if not titleGroups is None else None
section_title = titleGroups.group("title") if not titleGroups is None else None
elements[0].find("h3").decompose()
section_description = elements[0].text.replace("\r\n", "").replace("\t", "").strip().strip("\n")
else:
current_question_number = titleGroups.group("number") if not titleGroups is None else None
current_question_title = titleGroups.group("title") if not titleGroups is None else None
elements[0].find("h3").decompose()
current_question_description = elements[0].text.replace("\r\n", "").replace("\t", "").strip().strip("\n")
else:
tables = row.findAll("table")
answers = []
if tables[0].find("img") is None:
for x in tables[0].findAll("tr"):
xElements = x.findAll("td")
if type == "checkbox":
options = xElements[3].text.split(", ")
else:
options = [xElements[3].text]
if xElements[2].text == "anonym":
answers.append({
"anonymous" : True,
"respondent_id" : xElements[0].text,
"options" : options
})
else:
answers.append({
"anonymous" : False,
"options" : options,
"user_context_card_id" : xElements[0].find("span")["lectiocontextcard"],
"user_text_id" : xElements[1].text,
"user_team_text" : xElements[2].text
})
section_elements.append({
"number" : current_question_number.encode("utf8"),
"title" : current_question_title.encode("utf8"),
"description" : current_question_description.encode("utf8"),
"type" : type,
"answers" : answers,
"answer_stats" : answerStats,
"unanswered" : str(unanswered),
"unanswered_percent" : str(unansweredPercent)
})
type = "text"
answerStats = []
unanswered = 0
unansweredPercent = 0
else:
for x in tables[0].findAll("tr"):
xElements = x.findAll("td")
if x.find("th").text == "Ubesvaret":
type = "radio"
unanswered = xElements[1].text
unansweredPercent = xElements[2].text.replace(" %", "")
else:
type = "checkbox"
answerStats.append({
"text" : x.find("th").text.encode("utf8"),
"number" : xElements[1].text,
"percent" : xElements[2].text.replace(" %", "").replace(",", ".")
})
if section_number == None:
section_number = 1
section_title = ""
section_description = ""
sections.append({
"number" : section_number,
"title" : section_title,
"elements" : section_elements,
"description" : section_description
})
return {
"status" : "ok",
"information" : information,
"stats" : stats,
"sections" : sections
}
def templates ( config, session = False ):
url = "https://www.lectio.dk/lectio/%s/spoergeskema/skabeloner.aspx?elevid=%s" % ( str(config["school_id"]), str(config["student_id"]) )
if session is False:
session = authenticate.authenticate(config)
if session == False:
return {"status" : "error", "type" : "authenticate"}
# Insert the session information from the auth function
cookies = {
"lecmobile" : "0",
"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
"LastLoginUserName" : session["LastLoginUserName"],
"lectiogsc" : session["lectiogsc"],
"LectioTicket" : session["LectioTicket"]
}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("div", attrs={"id" : "s_m_Content_Content_createQueryIsland_pa"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
templates = []
ownSchoolProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/spoergeskema_besvar.aspx\?mode=display&id=(?P<survey_id>.*)&prevurl=(?P<prev_url>.*)")
if not soup.find("div", attrs={"id" : "s_m_Content_Content_createQueryIsland_pa"}).find(attrs={"class" : "noRecord"}):
for row in soup.find("div", attrs={"id" : "s_m_Content_Content_createQueryIsland_pa"}).findAll("tr")[1:]:
idGroups = ownSchoolProg.match(row.find("a")["href"])
templates.append({
"school_id" : str(config["school_id"]),
"branch_id" : str(config["school_id"]),
"title" : row.find("a").text.encode("utf8"),
"survey_id" : idGroups.group("survey_id") if not idGroups is None else "",
"template" : True
})
if not soup.find("div", attrs={"id" : "s_m_Content_Content_LectioDetailIsland1_pa"}).find(attrs={"class" : "noRecord"}):
for row in soup.find("div", attrs={"id" : "s_m_Content_Content_LectioDetailIsland1_pa"}).findAll("tr")[1:]:
idGroups = ownSchoolProg.match(row.find("a")["href"])
elements = row.findAll("td")
templates.append({
"title" : row.find("a").text.encode("utf8"),
"survey_id" : idGroups.group("survey_id") if not idGroups is None else "",
"school_name" : elements[1].text.encode("utf8"),
"owner_name" : elements[2].text.encode("utf8"),
"template" : True
})
return {
"status" : "ok",
"templates" : templates
}
| |
# -*- coding: utf8 -*-
"""
.. module:: lesscpy.plib.block
:synopsis: Block parse node.
Copyright (c)
See LICENSE for details.
.. moduleauthor:: Johann T. Mariusson <jtm@robot.is>
"""
from .node import Node
from lesscpy.lessc import utility
from lesscpy.plib.identifier import Identifier
class Block(Node):
""" Block node. Represents one parse-block.
Can contain property nodes or other block nodes.
identifier {
propertys
inner blocks
}
"""
def parse(self, scope):
"""Parse block node.
args:
scope (Scope): Current scope
raises:
SyntaxError
returns:
self
"""
if not self.parsed:
scope.push()
self.name, inner = self.tokens
scope.current = self.name
scope.real.append(self.name)
if not self.name.parsed:
self.name.parse(scope)
if not inner:
inner = []
inner = list(utility.flatten([p.parse(scope) for p in inner if p]))
self.parsed = []
self.inner = []
if not hasattr(self, "inner_media_queries"):
self.inner_media_queries = []
for p in inner:
if p is not None:
if isinstance(p, Block):
if (len(scope) == 2 and p.tokens[1] is not None):
p_is_mediaquery = p.name.tokens[0] == '@media'
# Inner block @media ... { ... } is a nested media
# query. But double-nested media queries have to be
# removed and marked as well. While parsing ".foo",
# both nested "@media print" and double-nested
# "@media all" will be handled as we have to
# re-arrange the scope and block layout quite a bit:
#
# .foo {
# @media print {
# color: blue;
# @media screen { font-size: 12em; }
# }
# }
#
# Expected result:
#
# @media print {
# .foo { color: blue; }
# }
# @media print and screen {
# .foo { font-size: 12 em; }
# }
append_list = []
reparse_p = False
for child in p.tokens[1]:
if isinstance(child, Block) and child.name.raw().startswith("@media"):
# Remove child from the nested media query, it will be re-added to
# the parent with 'merged' media query (see above example).
p.tokens[1].remove(child)
if p_is_mediaquery: # Media query inside a & block
# Double-nested media query found. We remove it from 'p' and add
# it to this block with a new 'name'.
reparse_p = True
part_a = p.name.tokens[2:][0][0][0]
part_b = child.name.tokens[2:][0][0]
new_ident_tokens = ['@media', ' ', [part_a, (' ', 'and', ' '), part_b]]
# Parse child again with new @media $BLA {} part
child.tokens[0] = Identifier(new_ident_tokens)
child.parsed = None
child = child.parse(scope)
else:
child.block_name = p.name
append_list.append(child)
if reparse_p:
p.parsed = None
p = p.parse(scope)
if not p_is_mediaquery and not append_list:
self.inner.append(p)
else:
append_list.insert(0, p) # This media query should occur before it's children
for media_query in append_list:
self.inner_media_queries.append(media_query)
# NOTE(saschpe): The code is not recursive but we hope that people
# wont use triple-nested media queries.
else:
self.inner.append(p)
else:
self.parsed.append(p)
if self.inner_media_queries:
# Nested media queries, we have to remove self from scope and
# push all nested @media ... {} blocks.
scope.remove_block(self, index=-2)
for mb in self.inner_media_queries:
# New inner block with current name and media block contents
if hasattr(mb, 'block_name'):
cb_name = mb.block_name
else:
cb_name = self.tokens[0]
cb = Block([cb_name, mb.tokens[1]]).parse(scope)
# Replace inner block contents with new block
new_mb = Block([mb.tokens[0], [cb]]).parse(scope)
self.inner.append(new_mb)
scope.add_block(new_mb)
scope.real.pop()
scope.pop()
return self
def raw(self, clean=False):
"""Raw block name
args:
clean (bool): clean name
returns:
str
"""
try:
return self.tokens[0].raw(clean)
except (AttributeError, TypeError):
pass
def fmt(self, fills):
"""Format block (CSS)
args:
fills (dict): Fill elements
returns:
str (CSS)
"""
f = "%(identifier)s%(ws)s{%(nl)s%(proplist)s}%(eb)s"
out = []
name = self.name.fmt(fills)
if self.parsed and any(p for p in self.parsed if str(type(p)) != "<class 'lesscpy.plib.variable.Variable'>"):
fills.update({
'identifier': name,
'proplist': ''.join([p.fmt(fills) for p in self.parsed if p]),
})
out.append(f % fills)
if hasattr(self, 'inner'):
if self.name.subparse and len(self.inner) > 0: # @media
inner = ''.join([p.fmt(fills) for p in self.inner])
inner = inner.replace(fills['nl'],
fills['nl'] + fills['tab']).rstrip(fills['tab'])
if not fills['nl']:
inner = inner.strip()
fills.update({
'identifier': name,
'proplist': fills['tab'] + inner
})
out.append(f % fills)
else:
out.append(''.join([p.fmt(fills) for p in self.inner]))
return ''.join(out)
def copy(self):
""" Return a full copy of self
returns: Block object
"""
name, inner = self.tokens
if inner:
inner = [u.copy() if u else u
for u in inner]
if name:
name = name.copy()
return Block([name, inner], 0)
def copy_inner(self, scope):
"""Copy block contents (properties, inner blocks).
Renames inner block from current scope.
Used for mixins.
args:
scope (Scope): Current scope
returns:
list (block contents)
"""
if self.tokens[1]:
tokens = [u.copy() if u else u
for u in self.tokens[1]]
out = [p for p in tokens if p]
utility.rename(out, scope, Block)
return out
return None
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
from mock import MagicMock, patch
# Bokeh imports
from bokeh.core.properties import Any, ColumnData, Instance
from bokeh.core.serialization import MapRep, ObjectRefRep, Serializer
from bokeh.document import Document
from bokeh.model import Model
# Module under test
import bokeh.document.events as bde # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
class FakeEmptyDispatcher:
pass
class FakeFullDispatcher:
def __init__(self) -> None:
self.called = []
def _document_changed(self, event): self.called.append('_document_changed')
def _document_patched(self, event): self.called.append('_document_patched')
def _document_model_changed(self, event): self.called.append('_document_model_changed')
def _column_data_changed(self, event): self.called.append('_column_data_changed')
def _columns_streamed(self, event): self.called.append('_columns_streamed')
def _columns_patched(self, event): self.called.append('_columns_patched')
def _session_callback_added(self, event): self.called.append('_session_callback_added')
def _session_callback_removed(self, event): self.called.append('_session_callback_removed')
class SomeModel(Model):
data = ColumnData(Any, Any, default={})
ref1 = Instance(Model, default=lambda: SomeModel())
ref2 = Instance(Model, default=lambda: SomeModel())
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
# DocumentChangedEvent --------------------------------------------------------
class TestDocumentChangedEvent:
def test_init(self) -> None:
doc = Document()
e = bde.DocumentChangedEvent(doc)
assert e.document == doc
assert e.setter == None
assert e.callback_invoker == None
doc = Document()
e = bde.DocumentChangedEvent(doc, "setter")
assert e.document == doc
assert e.setter == "setter"
assert e.callback_invoker == None
doc = Document()
e = bde.DocumentChangedEvent(doc, callback_invoker="invoker")
assert e.document == doc
assert e.setter == None
assert e.callback_invoker == "invoker"
doc = Document()
e = bde.DocumentChangedEvent(doc, "setter", "invoker")
assert e.document == doc
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_dispatch(self) -> None:
doc = Document()
e = bde.DocumentChangedEvent(doc, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed']
def test_combine_ignores_all(self) -> None:
doc = Document()
e = bde.DocumentChangedEvent(doc, "setter", "invoker")
e2 = bde.DocumentChangedEvent(doc, "setter", "invoker")
assert e.combine(e2) == False
# DocumentPatchedEvent --------------------------------------------------------
class TestDocumentPatchedEvent:
def test_init(self) -> None:
doc = Document()
e = bde.DocumentPatchedEvent(doc, "setter", "invoker")
assert e.document == doc
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_to_serializable(self) -> None:
doc = Document()
s = Serializer()
e = bde.DocumentPatchedEvent(doc, "setter", "invoker")
with pytest.raises(NotImplementedError):
s.encode(e)
def test_dispatch(self) -> None:
doc = Document()
e = bde.DocumentPatchedEvent(doc, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
def test_combine_ignores_all(self) -> None:
doc = Document()
e = bde.DocumentPatchedEvent(doc, "setter", "invoker")
e2 = bde.DocumentPatchedEvent(doc, "setter", "invoker")
assert e.combine(e2) == False
# ModelChangedEvent -----------------------------------------------------------
class TestModelChangedEvent:
def test_init_defaults(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
assert e.document == doc
assert e.setter == None
assert e.callback_invoker == None
assert e.model == "model"
assert e.attr == "attr"
assert e.new == "new"
assert e.callback_invoker == None
def test_kind(self) -> None:
assert bde.ModelChangedEvent.kind == "ModelChanged"
# TODO (bev) tests for generate
def test_dispatch(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_document_model_changed']
def test_combine_ignores_except_title_changd_event(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e2 = bde.DocumentPatchedEvent(doc, "setter", "invoker")
assert e.combine(e2) == False
def test_combine_ignores_different_setter(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new", "setter")
e2 = bde.ModelChangedEvent(doc, "model", "attr", "new2", "setter2")
assert e.combine(e2) == False
def test_combine_ignores_different_doc(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e2 = bde.ModelChangedEvent("doc2", "model", "attr", "new2")
assert e.combine(e2) == False
def test_combine_ignores_different_model(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e2 = bde.ModelChangedEvent(doc, "model2", "attr", "new2")
assert e.combine(e2) == False
def test_combine_ignores_different_attr(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new")
e2 = bde.ModelChangedEvent(doc, "model", "attr2", "new2")
assert e.combine(e2) == False
def test_combine_with_matching_model_changed_event(self) -> None:
doc = Document()
e = bde.ModelChangedEvent(doc, "model", "attr", "new", callback_invoker="invoker")
e2 = bde.ModelChangedEvent(doc, "model", "attr", "new2", callback_invoker="invoker2")
assert e.combine(e2) == True
assert e.new == "new2"
assert e.callback_invoker == "invoker2"
@patch("bokeh.document.events.ColumnsStreamedEvent.combine")
def test_combine_with_defers(self, mock_combine: MagicMock) -> None:
mock_combine.return_value = False
doc = Document()
m = SomeModel()
e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker")
e2 = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=2), 300, "setter", "invoker")
assert e.combine(e2) == False
assert mock_combine.call_count == 1
assert mock_combine.call_args[0] == (e2,)
assert mock_combine.call_args[1] == {}
# ColumnDataChangedEvent ------------------------------------------------------
class TestColumnDataChangedEvent:
def test_init(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnDataChangedEvent(doc, m, "data", None, [1,2], "setter", "invoker")
assert e.document == doc
assert e.model == m
assert e.attr == "data"
assert e.cols == [1,2]
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_kind(self) -> None:
assert bde.ColumnDataChangedEvent.kind == "ColumnDataChanged"
def test_to_serializable(self) -> None:
doc = Document()
m = SomeModel(data={"col0": [1], "col1": [1, 2], "col2": [1, 2, 3]})
e = bde.ColumnDataChangedEvent(doc, m, "data", None, ["col1", "col2"], "setter", "invoker")
s = Serializer()
r = s.encode(e)
assert r == dict(
kind=e.kind,
model=m.ref,
attr="data",
data=MapRep(type="map", entries=[("col1", [1, 2]), ("col2", [1, 2, 3])]),
cols=["col1", "col2"],
)
assert s.buffers == []
def test_dispatch(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnDataChangedEvent(doc, m, "data", None, [1,2], "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_column_data_changed']
def test_combine_ignores_all(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnDataChangedEvent(doc, m, "data", None, [1,2], "setter", "invoker")
e2 = bde.ColumnDataChangedEvent(doc, m, "data", None, [3,4], "setter", "invoker")
assert e.combine(e2) == False
assert e.cols == [1,2]
# ColumnsStreamedEvent --------------------------------------------------------
class TestColumnsStreamedEvent:
def test_init(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker")
assert e.document == doc
assert e.model == m
assert e.attr == "data"
assert e.data == dict(foo=1)
assert e.rollover == 200
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_kind(self) -> None:
assert bde.ColumnsStreamedEvent.kind == "ColumnsStreamed"
def test_to_serializable(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker")
s = Serializer()
r = s.encode(e)
assert r == dict(kind=e.kind, model=m.ref, attr="data", data=MapRep(type="map", entries=[("foo", 1)]), rollover=200)
assert s.buffers == []
def test_dispatch(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_columns_streamed']
def test_combine_ignores_all(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker")
e2 = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=2), 300, "setter", "invoker")
assert e.combine(e2) == False
assert e.model is m
assert e.attr == "data"
assert e.data == dict(foo=1)
assert e.rollover == 200
def test_pandas_data(self, pd) -> None:
doc = Document()
m = SomeModel()
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
e = bde.ColumnsStreamedEvent(doc, m, "data", df, 200, "setter", "invoker")
assert isinstance(e.data, dict)
assert e.data == {c: df[c] for c in df.columns}
# ColumnsPatchedEvent ---------------------------------------------------------
class TestColumnsPatchedEvent:
def test_init(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsPatchedEvent(doc, m, "data", [1, 2], "setter", "invoker")
assert e.document == doc
assert e.model == m
assert e.attr == "data"
assert e.patches == [1, 2]
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_kind(self) -> None:
assert bde.ColumnsPatchedEvent.kind == "ColumnsPatched"
def test_to_serializable(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsPatchedEvent(doc, m, "data", [1, 2], "setter", "invoker")
s = Serializer()
r = s.encode(e)
assert r == dict(kind=e.kind, model=m.ref, attr="data", patches=[1,2])
assert s.buffers == []
def test_dispatch(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsPatchedEvent(doc, m, "data", [1, 2], "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_columns_patched']
def test_combine_ignores_all(self) -> None:
doc = Document()
m = SomeModel()
e = bde.ColumnsPatchedEvent(doc, m, "data", [1,2], "setter", "invoker")
e2 = bde.ColumnsPatchedEvent(doc, m, "data", [3,4], "setter", "invoker")
assert e.combine(e2) == False
assert e.patches == [1,2]
# TitleChangedEvent -----------------------------------------------------------
class TestTitleChangedEvent:
def test_init(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
assert e.document == doc
assert e.title == "title"
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_kind(self) -> None:
assert bde.TitleChangedEvent.kind == "TitleChanged"
def test_to_serializable(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
s = Serializer()
r = s.encode(e)
assert r == dict(kind=e.kind, title="title")
assert s.buffers == []
def test_dispatch(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
def test_combine_ignores_except_title_changd_event(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e2 = bde.DocumentPatchedEvent(doc, "setter", "invoker")
assert e.combine(e2) == False
assert e.title == "title"
assert e.callback_invoker == "invoker"
def test_combine_ignores_different_setter(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e2 = bde.TitleChangedEvent(doc, "title2", "setter2", "invoker2")
assert e.combine(e2) == False
assert e.title == "title"
assert e.callback_invoker == "invoker"
def test_combine_ignores_different_doc(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e2 = bde.TitleChangedEvent("doc2", "title2", "setter2", "invoker2")
assert e.combine(e2) == False
assert e.title == "title"
assert e.callback_invoker == "invoker"
def test_combine_with_title_changed_event(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e2 = bde.TitleChangedEvent(doc, "title2", "setter", "invoker2")
assert e.combine(e2) == True
assert e.title == "title2"
assert e.callback_invoker == "invoker2"
# RootAddedEvent --------------------------------------------------------------
class TestRootAddedEvent:
def test_init(self) -> None:
doc = Document()
m = SomeModel()
e = bde.RootAddedEvent(doc, m, "setter", "invoker")
assert e.document == doc
assert e.model == m
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_kind(self) -> None:
assert bde.RootAddedEvent.kind == "RootAdded"
def test_to_serializable(self) -> None:
doc = Document()
ref1 = SomeModel()
ref2 = SomeModel()
m = SomeModel(ref1=ref1, ref2=ref2)
e = bde.RootAddedEvent(doc, m, "setter", "invoker")
s = Serializer()
r = s.encode(e)
assert r == dict(
kind=e.kind,
model=ObjectRefRep(
type="object",
name="test_events__document.SomeModel",
id=m.id,
attributes=dict(
ref1=ObjectRefRep(
type="object",
name="test_events__document.SomeModel",
id=ref1.id,
),
ref2=ObjectRefRep(
type="object",
name="test_events__document.SomeModel",
id=ref2.id,
),
),
),
)
assert s.buffers == []
def test_dispatch(self) -> None:
doc = Document()
m = SomeModel()
e = bde.RootAddedEvent(doc, m, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
# RootRemovedEvent ------------------------------------------------------------
class TestRootRemovedEvent:
def test_init(self) -> None:
doc = Document()
m = SomeModel()
e = bde.RootRemovedEvent(doc, m, "setter", "invoker")
assert e.document == doc
assert e.model == m
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_kind(self) -> None:
assert bde.RootRemovedEvent.kind == "RootRemoved"
def test_to_serializable(self) -> None:
doc = Document()
m = SomeModel()
e = bde.RootRemovedEvent(doc, m, "setter", "invoker")
s = Serializer()
r = s.encode(e)
assert r == dict(kind=e.kind, model=m.ref)
assert s.buffers == []
def test_dispatch(self) -> None:
doc = Document()
m = SomeModel()
e = bde.RootRemovedEvent(doc, m, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
# SessionCallbackAdded --------------------------------------------------------
class TestSessionCallbackAdded:
def test_init(self) -> None:
doc = Document()
e = bde.SessionCallbackAdded(doc, "callback")
assert e.document == doc
assert e.callback == "callback"
assert e.setter == None
assert e.callback_invoker == None
def test_dispatch(self) -> None:
doc = Document()
e = bde.SessionCallbackAdded(doc, "callback")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_session_callback_added']
def test_combine_ignores_all(self) -> None:
doc = Document()
e = bde.SessionCallbackAdded(doc, "setter")
e2 = bde.SessionCallbackAdded(doc, "setter")
assert e.combine(e2) == False
# SessionCallbackRemoved ------------------------------------------------------
class TestSessionCallbackRemoved:
def test_init(self) -> None:
doc = Document()
e = bde.SessionCallbackRemoved(doc, "callback")
assert e.document == doc
assert e.callback == "callback"
assert e.setter == None
assert e.callback_invoker == None
def test_dispatch(self) -> None:
doc = Document()
e = bde.SessionCallbackRemoved(doc, "callback")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_session_callback_removed']
def test_combine_ignores_all(self) -> None:
doc = Document()
e = bde.SessionCallbackAdded(doc, "setter")
e2 = bde.SessionCallbackAdded(doc, "setter")
assert e.combine(e2) == False
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
"""
Implimentation of Density-Based Clustering Validation "DBCV"
Citation:
Moulavi, Davoud, et al. "Density-based clustering validation."
Proceedings of the 2014 SIAM International Conference on Data Mining.
Society for Industrial and Applied Mathematics, 2014.
"""
import numpy as np
from scipy.spatial.distance import euclidean, cdist
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse import csgraph
def DBCV(X, labels, dist_function=euclidean):
"""
Density Based clustering validation
Args:
X (np.ndarray): ndarray with dimensions [n_samples, n_features]
data to check validity of clustering
labels (np.array): clustering assignments for data X
dist_dunction (func): function to determine distance between objects
func args must be [np.array, np.array] where each array is a point
Returns: cluster_validity (float)
score in range[-1, 1] indicating validity of clustering assignments
"""
graph = _mutual_reach_dist_graph(X, labels, dist_function)
mst = _mutual_reach_dist_MST(graph)
cluster_validity = _clustering_validity_index(mst, labels)
return cluster_validity
def _core_dist(point, neighbors, dist_function):
"""
Computes the core distance of a point.
Core distance is the inverse density of an object.
Args:
point (np.array): array of dimensions (n_features,)
point to compute core distance of
neighbors (np.ndarray): array of dimensions (n_neighbors, n_features):
array of all other points in object class
dist_dunction (func): function to determine distance between objects
func args must be [np.array, np.array] where each array is a point
Returns: core_dist (float)
inverse density of point
"""
n_features = np.shape(point)[0]
n_neighbors = np.shape(neighbors)[0]
distance_vector = cdist(point.reshape(1, -1), neighbors)
distance_vector = distance_vector[distance_vector != 0]
numerator = ((1/distance_vector)**n_features).sum()
core_dist = (numerator / (n_neighbors - 1)) ** (-1/n_features)
return core_dist
def _mutual_reachability_dist(point_i, point_j, neighbors_i,
neighbors_j, dist_function):
""".
Computes the mutual reachability distance between points
Args:
point_i (np.array): array of dimensions (n_features,)
point i to compare to point j
point_j (np.array): array of dimensions (n_features,)
point i to compare to point i
neighbors_i (np.ndarray): array of dims (n_neighbors, n_features):
array of all other points in object class of point i
neighbors_j (np.ndarray): array of dims (n_neighbors, n_features):
array of all other points in object class of point j
dist_dunction (func): function to determine distance between objects
func args must be [np.array, np.array] where each array is a point
Returns: mutual_reachability (float)
mutual reachability between points i and j
"""
core_dist_i = _core_dist(point_i, neighbors_i, dist_function)
core_dist_j = _core_dist(point_j, neighbors_j, dist_function)
dist = dist_function(point_i, point_j)
mutual_reachability = np.max([core_dist_i, core_dist_j, dist])
return mutual_reachability
def _mutual_reach_dist_graph(X, labels, dist_function):
"""
Computes the mutual reach distance complete graph.
Graph of all pair-wise mutual reachability distances between points
Args:
X (np.ndarray): ndarray with dimensions [n_samples, n_features]
data to check validity of clustering
labels (np.array): clustering assignments for data X
dist_dunction (func): function to determine distance between objects
func args must be [np.array, np.array] where each array is a point
Returns: graph (np.ndarray)
array of dimensions (n_samples, n_samples)
Graph of all pair-wise mutual reachability distances between points.
"""
n_samples = np.shape(X)[0]
graph = []
counter = 0
for row in range(n_samples):
graph_row = []
for col in range(n_samples):
point_i = X[row]
point_j = X[col]
class_i = labels[row]
class_j = labels[col]
members_i = _get_label_members(X, labels, class_i)
members_j = _get_label_members(X, labels, class_j)
dist = _mutual_reachability_dist(point_i, point_j,
members_i, members_j,
dist_function)
graph_row.append(dist)
counter += 1
graph.append(graph_row)
graph = np.array(graph)
return graph
def _mutual_reach_dist_MST(dist_tree):
"""
Computes minimum spanning tree of the mutual reach distance complete graph
Args:
dist_tree (np.ndarray): array of dimensions (n_samples, n_samples)
Graph of all pair-wise mutual reachability distances
between points.
Returns: minimum_spanning_tree (np.ndarray)
array of dimensions (n_samples, n_samples)
minimum spanning tree of all pair-wise mutual reachability
distances between points.
"""
mst = minimum_spanning_tree(dist_tree).toarray()
return mst + np.transpose(mst)
def _cluster_density_sparseness(MST, labels, cluster):
"""
Computes the cluster density sparseness, the minimum density
within a cluster
Args:
MST (np.ndarray): minimum spanning tree of all pair-wise
mutual reachability distances between points.
labels (np.array): clustering assignments for data X
cluster (int): cluster of interest
Returns: cluster_density_sparseness (float)
value corresponding to the minimum density within a cluster
"""
indices = np.where(labels == cluster)[0]
cluster_MST = MST[indices][:, indices]
cluster_density_sparseness = np.max(cluster_MST)
return cluster_density_sparseness
def _cluster_density_separation(MST, labels, cluster_i, cluster_j):
"""
Computes the density separation between two clusters, the maximum
density between clusters.
Args:
MST (np.ndarray): minimum spanning tree of all pair-wise
mutual reachability distances between points.
labels (np.array): clustering assignments for data X
cluster_i (int): cluster i of interest
cluster_j (int): cluster j of interest
Returns: density_separation (float):
value corresponding to the maximum density between clusters
"""
indices_i = np.where(labels == cluster_i)[0]
indices_j = np.where(labels == cluster_j)[0]
shortest_paths = csgraph.dijkstra(MST, indices=indices_i)
relevant_paths = shortest_paths[:, indices_j]
density_separation = np.min(relevant_paths)
return density_separation
def _cluster_validity_index(MST, labels, cluster):
"""
Computes the validity of a cluster (validity of assignmnets)
Args:
MST (np.ndarray): minimum spanning tree of all pair-wise
mutual reachability distances between points.
labels (np.array): clustering assignments for data X
cluster (int): cluster of interest
Returns: cluster_validity (float)
value corresponding to the validity of cluster assignments
"""
min_density_separation = np.inf
for cluster_j in np.unique(labels):
if cluster_j != cluster:
cluster_density_separation = _cluster_density_separation(MST,
labels,
cluster,
cluster_j)
if cluster_density_separation < min_density_separation:
min_density_separation = cluster_density_separation
cluster_density_sparseness = _cluster_density_sparseness(MST,
labels,
cluster)
numerator = min_density_separation - cluster_density_sparseness
denominator = np.max([min_density_separation, cluster_density_sparseness])
cluster_validity = numerator / denominator
return cluster_validity
def _clustering_validity_index(MST, labels):
"""
Computes the validity of all clustering assignments for a
clustering algorithm
Args:
MST (np.ndarray): minimum spanning tree of all pair-wise
mutual reachability distances between points.
labels (np.array): clustering assignments for data X
Returns: validity_index (float):
score in range[-1, 1] indicating validity of clustering assignments
"""
n_samples = len(labels)
validity_index = 0
for label in np.unique(labels):
fraction = np.sum(labels == label) / float(n_samples)
cluster_validity = _cluster_validity_index(MST, labels, label)
validity_index += fraction * cluster_validity
return validity_index
def _get_label_members(X, labels, cluster):
"""
Helper function to get samples of a specified cluster.
Args:
X (np.ndarray): ndarray with dimensions [n_samples, n_features]
data to check validity of clustering
labels (np.array): clustering assignments for data X
cluster (int): cluster of interest
Returns: members (np.ndarray)
array of dimensions (n_samples, n_features) of samples of the
specified cluster.
"""
indices = np.where(labels == cluster)[0]
members = X[indices]
return members
| |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.liveperson.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','com.liveperson.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComLivepersonModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def verify_build_arch(manifest, config):
binaryname = 'lib%s.a' % manifest['moduleid']
binarypath = os.path.join('build', binaryname)
manifestarch = set(manifest['architectures'].split(' '))
output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True)
builtarch = set(output.split(':')[-1].strip().split(' '))
if ('arm64' not in builtarch):
warn('built module is missing 64-bit support.')
if (manifestarch != builtarch):
warn('there is discrepancy between the architectures specified in module manifest and compiled binary.')
warn('architectures in manifest: %s' % ', '.join(manifestarch))
warn('compiled binary architectures: %s' % ', '.join(builtarch))
die('please update manifest to match module binary architectures.')
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
verify_build_arch(manifest, config)
package_module(manifest,mf,config)
sys.exit(0)
| |
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
totalfiles = normalB + mcell + pcell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr2_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_chrom2.phy", header=None, index=None)
print(tott.shape)
| |
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at:
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
"""The type mappings for the ``simplejson``-like API.
In particular, this module provides the extension to native Python data types with
particulars of the Ion data model.
"""
# Python 2/3 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from decimal import Decimal
from collections import MutableMapping
import six
from amazon.ion.symbols import SymbolToken
from .core import TIMESTAMP_PRECISION_FIELD
from .core import Multimap, Timestamp, IonEvent, IonType, TIMESTAMP_FRACTION_PRECISION_FIELD, TimestampPrecision, \
MICROSECOND_PRECISION, TIMESTAMP_FRACTIONAL_SECONDS_FIELD
class _IonNature(object):
"""Mix-in for Ion related properties.
Attributes:
ion_event (Optional[IonEvent]): The event, if any associated with the value.
ion_type (IonType): The Ion type for the value.
ion_annotations (Sequence[unicode]): The annotations associated with the value.
Notes:
There is no ``field_name`` attribute as that is generally modeled as a property of the
container.
The ``ion_event`` field is only provided if the value was derived from a low-level event.
User constructed values will generally not set this field.
"""
def __init__(self, *args, **kwargs):
self.ion_type = None
self.ion_annotations = ()
def _copy(self):
"""Copies this instance. Its IonEvent (if any) is not preserved.
Keeping this protected until/unless we decide there's use for it publicly.
"""
args, kwargs = self._to_constructor_args(self)
value = self.__class__(*args, **kwargs)
value.ion_type = self.ion_type
value.ion_annotations = self.ion_annotations
return value
@staticmethod
def _to_constructor_args(value):
return (value, ), {}
@classmethod
def from_event(cls, ion_event):
"""Constructs the given native extension from the properties of an event.
Args:
ion_event (IonEvent): The event to construct the native value from.
"""
if ion_event.value is not None:
args, kwargs = cls._to_constructor_args(ion_event.value)
else:
# if value is None (i.e. this is a container event), args must be empty or initialization of the
# underlying container will fail.
args, kwargs = (), {}
value = cls(*args, **kwargs)
value.ion_type = ion_event.ion_type
value.ion_annotations = ion_event.annotations
return value
@classmethod
def from_value(cls, ion_type, value, annotations=()):
"""Constructs a value as a copy with an associated Ion type and annotations.
Args:
ion_type (IonType): The associated Ion type.
value (Any): The value to construct from, generally of type ``cls``.
annotations (Sequence[unicode]): The sequence Unicode strings decorating this value.
"""
if value is None:
value = IonPyNull()
else:
args, kwargs = cls._to_constructor_args(value)
value = cls(*args, **kwargs)
value.ion_type = ion_type
value.ion_annotations = annotations
return value
def to_event(self, event_type, field_name=None, in_struct=False, depth=None):
"""Constructs an IonEvent from this _IonNature value.
Args:
event_type (IonEventType): The type of the resulting event.
field_name (Optional[text]): The field name associated with this value, if any. When ``None``
is specified and ``in_struct`` is ``True``, the returned event's ``field_name`` will
represent symbol zero (a ``SymbolToken`` with text=None and sid=0).
in_struct (Optional[True|False]): When ``True``, indicates the returned event ``field_name``
will be populated. When ``False``, ``field_name`` will be ``None``.
depth (Optional[int]): The depth of this value.
Returns:
An IonEvent with the properties from this value.
"""
value = self
if isinstance(self, IonPyNull) or self.ion_type.is_container:
value = None
if in_struct:
if not isinstance(field_name, SymbolToken):
field_name = SymbolToken(field_name, 0 if field_name is None else None)
else:
field_name = None
return IonEvent(event_type, ion_type=self.ion_type, value=value, field_name=field_name,
annotations=self.ion_annotations, depth=depth)
def _ion_type_for(name, base_cls):
class IonPyValueType(base_cls, _IonNature):
def __init__(self, *args, **kwargs):
super(IonPyValueType, self).__init__(*args, **kwargs)
IonPyValueType.__name__ = name
IonPyValueType.__qualname__ = name
return IonPyValueType
if six.PY2:
IonPyInt = _ion_type_for('IonPyInt', long)
else:
IonPyInt = _ion_type_for('IonPyInt', int)
IonPyBool = IonPyInt
IonPyFloat = _ion_type_for('IonPyFloat', float)
IonPyDecimal = _ion_type_for('IonPyDecimal', Decimal)
IonPyText = _ion_type_for('IonPyText', six.text_type)
IonPyBytes = _ion_type_for('IonPyBytes', six.binary_type)
class IonPySymbol(SymbolToken, _IonNature):
def __init__(self, *args, **kwargs):
super(IonPySymbol, self).__init__(*args, **kwargs)
@staticmethod
def _to_constructor_args(st):
try:
args = (st.text, st.sid, st.location)
except AttributeError:
args = (st, None, None)
kwargs = {}
return args, kwargs
class IonPyTimestamp(Timestamp, _IonNature):
def __init__(self, *args, **kwargs):
super(IonPyTimestamp, self).__init__(*args, **kwargs)
@staticmethod
def _to_constructor_args(ts):
if isinstance(ts, Timestamp):
args = (ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, None, ts.tzinfo)
fractional_seconds = getattr(ts, TIMESTAMP_FRACTIONAL_SECONDS_FIELD, None)
precision = getattr(ts, TIMESTAMP_PRECISION_FIELD, TimestampPrecision.SECOND)
kwargs = {TIMESTAMP_PRECISION_FIELD: precision, TIMESTAMP_FRACTIONAL_SECONDS_FIELD: fractional_seconds}
else:
args = (ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.microsecond, ts.tzinfo)
kwargs = {TIMESTAMP_PRECISION_FIELD: TimestampPrecision.SECOND}
return args, kwargs
class IonPyNull(_IonNature):
"""Representation of ``null``.
Notes:
``None`` is a singleton and cannot be sub-classed, so we have our
own value type for it. The function ``is_null`` is the best way
to test for ``null``-ness or ``None``-ness.
"""
def __init__(self, *args, **kwargs):
super(IonPyNull, self).__init__(*args, **kwargs)
def __nonzero__(self):
return False
def __bool__(self):
return False
@staticmethod
def _to_constructor_args(value):
return (), {}
def is_null(value):
"""A mechanism to determine if a value is ``None`` or an Ion ``null``."""
return value is None or isinstance(value, IonPyNull)
IonPyList = _ion_type_for('IonPyList', list)
IonPyDict = _ion_type_for('IonPyDict', Multimap)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.