gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from __future__ import print_function
from builtins import map
from builtins import object
import re
import socket
import time
import _thread
import queue
from ssl import wrap_socket, CERT_NONE, CERT_REQUIRED, SSLError
DEFAULT_NAME = 'skybot'
DEFAULT_REALNAME = 'Python bot - http://github.com/rmmh/skybot'
DEFAULT_NICKSERV_NAME = 'nickserv'
DEFAULT_NICKSERV_COMMAND = 'IDENTIFY %s'
def decode(txt):
for codec in ('utf-8', 'iso-8859-1', 'shift_jis', 'cp1252'):
try:
return txt.decode(codec)
except UnicodeDecodeError:
continue
return txt.decode('utf-8', 'ignore')
def censor(text, censored_strings = None):
text = re.sub("[\n\r]+", " ", text)
if not censored_strings:
return text
words = map(re.escape, censored_strings)
pattern = "(%s)" % "|".join(words)
text = re.sub(pattern, "[censored]", text)
return text
class crlf_tcp(object):
"Handles tcp connections that consist of utf-8 lines ending with crlf"
def __init__(self, host, port, timeout=300):
self.ibuffer = b''
self.obuffer = b''
self.oqueue = queue.Queue() # lines to be sent out
self.iqueue = queue.Queue() # lines that were received
self.socket = self.create_socket()
self.host = host
self.port = port
self.timeout = timeout
def create_socket(self):
return socket.socket(socket.AF_INET, socket.TCP_NODELAY)
def run(self):
while True:
try:
self.socket.connect((self.host, self.port))
except socket.timeout:
print('timed out connecting to %s:%s' % (self.host, self.port))
time.sleep(60)
except socket.gaierror:
print('problem getting address info for %s' % (self.host))
time.sleep(60)
else:
break
_thread.start_new_thread(self.recv_loop, ())
_thread.start_new_thread(self.send_loop, ())
def recv_from_socket(self, nbytes):
return self.socket.recv(nbytes)
def get_timeout_exception_type(self):
return socket.timeout
def handle_receive_exception(self, error, last_timestamp):
if time.time() - last_timestamp > self.timeout:
self.iqueue.put(StopIteration)
self.socket.close()
return True
return False
def recv_loop(self):
last_timestamp = time.time()
while True:
try:
data = self.recv_from_socket(4096)
self.ibuffer += data
if data:
last_timestamp = time.time()
else:
if time.time() - last_timestamp > self.timeout:
self.iqueue.put(StopIteration)
self.socket.close()
return
time.sleep(1)
except (self.get_timeout_exception_type(), socket.error) as e:
if self.handle_receive_exception(e, last_timestamp):
return
continue
while b'\r\n' in self.ibuffer:
line, self.ibuffer = self.ibuffer.split(b'\r\n', 1)
self.iqueue.put(decode(line))
def send_loop(self):
while True:
line = self.oqueue.get().splitlines()[0][:500]
line = line.encode('utf-8', 'replace')
print(">>> {}".format(line))
self.obuffer += line + b'\r\n'
while self.obuffer:
sent = self.socket.send(self.obuffer)
self.obuffer = self.obuffer[sent:]
class crlf_ssl_tcp(crlf_tcp):
"Handles ssl tcp connetions that consist of utf-8 lines ending with crlf"
def __init__(self, host, port, ignore_cert_errors, timeout=300):
self.ignore_cert_errors = ignore_cert_errors
crlf_tcp.__init__(self, host, port, timeout)
def create_socket(self):
return wrap_socket(crlf_tcp.create_socket(self), server_side=False,
cert_reqs=CERT_NONE if self.ignore_cert_errors else
CERT_REQUIRED)
def recv_from_socket(self, nbytes):
return self.socket.read(nbytes)
def get_timeout_exception_type(self):
return SSLError
def handle_receive_exception(self, error, last_timestamp):
return crlf_tcp.handle_receive_exception(self, error, last_timestamp)
def zip_channels(channels):
channels.sort(key=lambda x: ' ' not in x) # keyed channels first
chans = []
keys = []
for channel in channels:
if ' ' in channel:
chan, key = channel.split(' ')
chans.append(chan)
keys.append(key)
else:
chans.append(channel)
chans = ','.join(chans)
if keys:
return [chans, ','.join(keys)]
else:
return [chans]
def test_zip_channels():
assert zip_channels(['#a', '#b c', '#d']) == ['#b,#a,#d', 'c']
assert zip_channels(['#a', '#b']) == ['#a,#b']
class IRC(object):
IRC_PREFIX_REM = re.compile(r'(.*?) (.*?) (.*)').match
IRC_NOPROFEIX_REM = re.compile(r'()(.*?) (.*)').match
IRC_NETMASK_REM = re.compile(r':?([^!@]*)!?([^@]*)@?(.*)').match
IRC_PARAM_REF = re.compile(r'(?:^|(?<= ))(:.*|[^ ]+)').findall
"handles the IRC protocol"
# see the docs/ folder for more information on the protocol
def __init__(self, conf):
self.conn = None
self.nick = DEFAULT_NAME
self.user = DEFAULT_NAME
self.realname = DEFAULT_REALNAME
self.user_mode = None
self.server_host = None
self.server_port = 6667
self.server_password = None
self.nickserv_password = None
self.nickserv_name = DEFAULT_NICKSERV_NAME
self.nickserv_command = DEFAULT_NICKSERV_COMMAND
self.channels = []
self.admins = []
self.censored_strings = []
self.out = queue.Queue() # responses from the server are placed here
# format: [rawline, prefix, command, params,
# nick, user, host, paramlist, msg]
self.set_conf(conf)
self.connect()
_thread.start_new_thread(self.parse_loop, ())
def set_conf(self, conf):
self.nick = conf.get('nick', DEFAULT_NAME)
self.user = conf.get('user', DEFAULT_NAME)
self.realname = conf.get('realname', DEFAULT_REALNAME)
self.user_mode = conf.get('mode', None)
self.server_host = conf['server']
self.server_port = conf.get('port', 6667)
self.server_password = conf.get('server_password', None)
self.nickserv_password = conf.get('nickserv_password', None)
self.nickserv_name = conf.get('nickserv_name', DEFAULT_NICKSERV_NAME)
self.nickserv_command = conf.get('nickserv_command', DEFAULT_NICKSERV_COMMAND)
self.channels = conf.get('channels', [])
self.admins = conf.get('admins', [])
self.censored_strings = conf.get('censored_strings', [])
if self.conn is not None:
self.join_channels()
def create_connection(self):
return crlf_tcp(self.server_host, self.server_port)
def connect(self):
self.conn = self.create_connection()
_thread.start_new_thread(self.conn.run, ())
self.cmd("NICK", [self.nick])
self.cmd("USER", [self.user, "3", "*", self.realname])
if self.server_password:
self.cmd("PASS", [self.server_password])
def parse_loop(self):
while True:
msg = self.conn.iqueue.get()
if msg == StopIteration:
self.connect()
continue
if msg.startswith(":"): # has a prefix
prefix, command, params = self.IRC_PREFIX_REM(msg).groups()
else:
prefix, command, params = self.IRC_NOPROFEIX_REM(msg).groups()
nick, user, host = self.IRC_NETMASK_REM(prefix).groups()
paramlist = self.IRC_PARAM_REF(params)
lastparam = ""
if paramlist:
if paramlist[-1].startswith(':'):
paramlist[-1] = paramlist[-1][1:]
lastparam = paramlist[-1]
self.out.put([msg, prefix, command, params, nick, user, host,
paramlist, lastparam])
if command == "PING":
self.cmd("PONG", paramlist)
def join(self, channel):
self.cmd("JOIN", channel.split(" ")) # [chan, password]
def join_channels(self):
if self.channels:
# TODO: send multiple join commands for large channel lists
self.cmd("JOIN", zip_channels(self.channels))
def msg(self, target, text):
self.cmd("PRIVMSG", [target, text])
def cmd(self, command, params=None):
if params:
params[-1] = ':' + params[-1]
params = [censor(p, self.censored_strings) for p in params]
self.send(command + ' ' + ' '.join(params))
else:
self.send(command)
def send(self, str):
self.conn.oqueue.put(str)
class FakeIRC(IRC):
def __init__(self, conf):
self.set_conf(conf)
self.out = queue.Queue() # responses from the server are placed here
self.f = open(fn, 'rb')
_thread.start_new_thread(self.parse_loop, ())
def parse_loop(self):
while True:
msg = decode(self.f.readline()[9:])
if msg == '':
print("!!!!DONE READING FILE!!!!")
return
if msg.startswith(":"): # has a prefix
prefix, command, params = irc_prefix_rem(msg).groups()
else:
prefix, command, params = irc_noprefix_rem(msg).groups()
nick, user, host = irc_netmask_rem(prefix).groups()
paramlist = irc_param_ref(params)
lastparam = ""
if paramlist:
if paramlist[-1].startswith(':'):
paramlist[-1] = paramlist[-1][1:]
lastparam = paramlist[-1]
self.out.put([msg, prefix, command, params, nick, user, host,
paramlist, lastparam])
if command == "PING":
self.cmd("PONG", [params])
def cmd(self, command, params=None):
pass
class SSLIRC(IRC):
def __init__(self, conf):
super(SSLIRC, self).__init__(conf=conf)
self.server_port = 6697
self.server_ignore_cert = False
def set_conf(self, conf):
super(SSLIRC, self).set_conf(conf)
self.server_port = conf.get('port', 6697)
self.server_ignore_cert = conf.get('ignore_cert', False)
def create_connection(self):
return crlf_ssl_tcp(self.server_host, self.server_port, self.server_ignore_cert)
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for slice_key_extractor."""
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
def make_features_dict(features_dict):
result = {}
for key, value in features_dict.items():
result[key] = {'node': np.array(value)}
return result
def create_fpls():
fpl1 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['f'],
'age': [13],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
fpl2 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
return [fpl1, fpl2]
def wrap_fpl(fpl):
return {
constants.INPUT_KEY: fpl,
constants.FEATURES_PREDICTIONS_LABELS_KEY: fpl
}
class SliceTest(testutil.TensorflowModelAnalysisTest, parameterized.TestCase):
@parameterized.named_parameters(
('features_only', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
})
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
})
}], [slicer.SingleSliceSpec(columns=['gender'])], [[(('gender', 'm'),)],
[(('gender', 'f'),)]]),
('duplicate_feature_keys', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
})
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
})
}], [
slicer.SingleSliceSpec(columns=['gender']),
slicer.SingleSliceSpec(columns=['gender'])
], [[(('gender', 'm'),)], [(('gender', 'f'),)]]),
('transformed_features', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
constants.TRANSFORMED_FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['boats']
})
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
}),
constants.TRANSFORMED_FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['planes']
})
}], [slicer.SingleSliceSpec(columns=['interest'])
], [[(('interest', 'boats'),)], [(('interest', 'planes'),)]]),
('missing_features', [''], [{
constants.TRANSFORMED_FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['boats']
})
}, {
constants.TRANSFORMED_FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['planes']
})
}], [slicer.SingleSliceSpec(columns=['interest'])
], [[(('interest', 'boats'),)], [(('interest', 'planes'),)]]),
('transformed_features_with_multiple_models', ['model1', 'model2'], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
constants.TRANSFORMED_FEATURES_KEY: {
'model1': make_features_dict({'interest': ['boats']}),
'model2': make_features_dict({'interest': ['planes']})
}
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['planes']
}),
constants.TRANSFORMED_FEATURES_KEY: {
'model1': make_features_dict({'interest': ['trains']}),
'model2': make_features_dict({'interest': ['planes']})
}
}], [slicer.SingleSliceSpec(columns=['interest'])], [[
(('interest', 'boats'),), (('interest', 'planes'),)
], [(('interest', 'planes'),), (('interest', 'trains'),)]]),
('features_with_batched_slices_keys', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
constants.SLICE_KEY_TYPES_KEY: [(
('age', '10'),
('interest', 'cars'),
)]
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
}),
constants.SLICE_KEY_TYPES_KEY: [(
('age', '12'),
('interest', 'cars'),
)]
}], [slicer.SingleSliceSpec(columns=['gender'])], [[
(
('age', '10'),
('interest', 'cars'),
),
(('gender', 'm'),),
], [
(
('age', '12'),
('interest', 'cars'),
),
(('gender', 'f'),),
]]),
)
def testSliceKeys(self, model_names, extracts, slice_specs, expected_slices):
eval_config = config_pb2.EvalConfig(
model_specs=[config_pb2.ModelSpec(name=name) for name in model_names])
with beam.Pipeline() as pipeline:
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(extracts)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys(
slice_spec=slice_specs, eval_config=eval_config))
def check_result(got):
try:
self.assertLen(got, 2)
got_results = []
for item in got:
self.assertIn(constants.SLICE_KEY_TYPES_KEY, item)
got_results.append(sorted(item[constants.SLICE_KEY_TYPES_KEY]))
self.assertCountEqual(got_results, expected_slices)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
def testLegacySliceKeys(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys([
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
]))
def check_result(got):
try:
self.assertLen(got, 2)
expected_results = sorted([[(), (('gender', 'f'),)],
[(), (('gender', 'm'),)]])
got_results = []
for item in got:
self.assertIn(constants.SLICE_KEY_TYPES_KEY, item)
got_results.append(sorted(item[constants.SLICE_KEY_TYPES_KEY]))
self.assertCountEqual(got_results, expected_results)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
def testMaterializedLegacySliceKeys(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys(
[
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
],
materialize=True))
def check_result(got):
try:
self.assertLen(got, 2)
expected_results = [
types.MaterializedColumn(
name=constants.SLICE_KEYS_KEY,
value=[b'Overall', b'gender:f']),
types.MaterializedColumn(
name=constants.SLICE_KEYS_KEY,
value=[b'Overall', b'gender:m'])
]
got_results = []
for item in got:
self.assertIn(constants.SLICE_KEYS_KEY, item)
got_result = item[constants.SLICE_KEYS_KEY]
got_results.append(
got_result._replace(value=sorted(got_result.value)))
self.assertCountEqual(got_results, expected_results)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
if __name__ == '__main__':
tf.test.main()
|
|
"""
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from django.utils.functional import cached_property
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions, Thing2Literal
from MySQLdb.constants import FIELD_TYPE, CLIENT
from django.conf import settings
from django.db import utils
from django.db.backends import *
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.encoding import force_str, force_text
from django.utils.safestring import SafeBytes, SafeText
from django.utils import six
from django.utils import timezone
# Raise exceptions for database warnings if DEBUG is on
if settings.DEBUG:
warnings.filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_long_model_names = False
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
uses_savepoints = True
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
# This command is MySQL specific; the second column
# will tell you the default table type of the created
# table. Since all Django's test tables will have the same
# table type, that's enough to evaluate the feature.
cursor.execute("SHOW TABLE STATUS WHERE Name='INTROSPECT_TEST'")
result = cursor.fetchone()
cursor.execute('DROP TABLE INTROSPECT_TEST')
return result[1]
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def has_zoneinfo_database(self):
cursor = self.connection.cursor()
cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
return cursor.fetchone() is not None
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
sql = "DAYOFWEEK(%s)" % field_name
else:
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(cursor._last_executed, errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# Truncate already resets the AUTO_INCREMENT field from
# MySQL version 5.0.13 onwards. Refs #16961.
if self.connection.mysql_version < (5, 0, 13):
return ["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences]
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def year_lookup_bounds_for_datetime_field(self, value):
# Again, no microseconds
first, second = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
return [first.replace(microsecond=0), second.replace(microsecond=0)]
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
cursor = self.connection.cursor()
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
cursor.close()
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
self.cursor().execute('SET foreign_key_checks=1')
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
try:
self.connection.ping()
except DatabaseError:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple([int(x) for x in match.groups()])
|
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class dialogflowCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'analyze_content': ('participant', 'text_input', 'event_input', 'reply_audio_config', 'query_params', 'assist_query_params', 'message_send_time', 'request_id', ),
'batch_create_entities': ('parent', 'entities', 'language_code', ),
'batch_create_messages': ('parent', 'requests', ),
'batch_delete_entities': ('parent', 'entity_values', 'language_code', ),
'batch_delete_entity_types': ('parent', 'entity_type_names', ),
'batch_delete_intents': ('parent', 'intents', ),
'batch_update_entities': ('parent', 'entities', 'language_code', 'update_mask', ),
'batch_update_entity_types': ('parent', 'entity_type_batch_uri', 'entity_type_batch_inline', 'language_code', 'update_mask', ),
'batch_update_intents': ('parent', 'intent_batch_uri', 'intent_batch_inline', 'language_code', 'update_mask', 'intent_view', ),
'compile_suggestion': ('parent', 'latest_message', 'context_size', ),
'complete_conversation': ('name', ),
'create_context': ('parent', 'context', ),
'create_conversation': ('parent', 'conversation', 'conversation_id', ),
'create_conversation_profile': ('parent', 'conversation_profile', ),
'create_document': ('parent', 'document', 'import_gcs_custom_metadata', ),
'create_entity_type': ('parent', 'entity_type', 'language_code', ),
'create_environment': ('parent', 'environment', 'environment_id', ),
'create_intent': ('parent', 'intent', 'language_code', 'intent_view', ),
'create_knowledge_base': ('parent', 'knowledge_base', ),
'create_participant': ('parent', 'participant', ),
'create_session_entity_type': ('parent', 'session_entity_type', ),
'create_version': ('parent', 'version', ),
'delete_agent': ('parent', ),
'delete_all_contexts': ('parent', ),
'delete_context': ('name', ),
'delete_conversation_profile': ('name', ),
'delete_document': ('name', ),
'delete_entity_type': ('name', ),
'delete_environment': ('name', ),
'delete_intent': ('name', ),
'delete_knowledge_base': ('name', 'force', ),
'delete_session_entity_type': ('name', ),
'delete_version': ('name', ),
'detect_intent': ('session', 'query_input', 'query_params', 'output_audio_config', 'output_audio_config_mask', 'input_audio', ),
'export_agent': ('parent', 'agent_uri', ),
'get_agent': ('parent', ),
'get_answer_record': ('name', ),
'get_context': ('name', ),
'get_conversation': ('name', ),
'get_conversation_profile': ('name', ),
'get_document': ('name', ),
'get_entity_type': ('name', 'language_code', ),
'get_environment': ('name', ),
'get_environment_history': ('parent', 'page_size', 'page_token', ),
'get_fulfillment': ('name', ),
'get_intent': ('name', 'language_code', 'intent_view', ),
'get_knowledge_base': ('name', ),
'get_participant': ('name', ),
'get_session_entity_type': ('name', ),
'get_validation_result': ('parent', 'language_code', ),
'get_version': ('name', ),
'import_agent': ('parent', 'agent_uri', 'agent_content', ),
'import_documents': ('parent', 'document_template', 'gcs_source', 'import_gcs_custom_metadata', ),
'list_answer_records': ('parent', 'page_size', 'page_token', ),
'list_contexts': ('parent', 'page_size', 'page_token', ),
'list_conversation_profiles': ('parent', 'page_size', 'page_token', ),
'list_conversations': ('parent', 'page_size', 'page_token', 'filter', ),
'list_documents': ('parent', 'page_size', 'page_token', 'filter', ),
'list_entity_types': ('parent', 'language_code', 'page_size', 'page_token', ),
'list_environments': ('parent', 'page_size', 'page_token', ),
'list_intents': ('parent', 'language_code', 'intent_view', 'page_size', 'page_token', ),
'list_knowledge_bases': ('parent', 'page_size', 'page_token', 'filter', ),
'list_messages': ('parent', 'filter', 'page_size', 'page_token', ),
'list_participants': ('parent', 'page_size', 'page_token', ),
'list_session_entity_types': ('parent', 'page_size', 'page_token', ),
'list_suggestions': ('parent', 'page_size', 'page_token', 'filter', ),
'list_versions': ('parent', 'page_size', 'page_token', ),
'reload_document': ('name', 'gcs_source', 'import_gcs_custom_metadata', ),
'restore_agent': ('parent', 'agent_uri', 'agent_content', ),
'search_agents': ('parent', 'page_size', 'page_token', ),
'set_agent': ('agent', 'update_mask', ),
'streaming_detect_intent': ('session', 'query_input', 'query_params', 'single_utterance', 'output_audio_config', 'output_audio_config_mask', 'input_audio', ),
'suggest_articles': ('parent', 'latest_message', 'context_size', 'assist_query_params', ),
'suggest_faq_answers': ('parent', 'latest_message', 'context_size', 'assist_query_params', ),
'suggest_smart_replies': ('parent', 'current_text_input', 'latest_message', 'context_size', ),
'train_agent': ('parent', ),
'update_answer_record': ('answer_record', 'update_mask', ),
'update_context': ('context', 'update_mask', ),
'update_conversation_profile': ('conversation_profile', 'update_mask', ),
'update_document': ('document', 'update_mask', ),
'update_entity_type': ('entity_type', 'language_code', 'update_mask', ),
'update_environment': ('environment', 'update_mask', 'allow_load_to_draft_and_discard_changes', ),
'update_fulfillment': ('fulfillment', 'update_mask', ),
'update_intent': ('intent', 'language_code', 'update_mask', 'intent_view', ),
'update_knowledge_base': ('knowledge_base', 'update_mask', ),
'update_participant': ('participant', 'update_mask', ),
'update_session_entity_type': ('session_entity_type', 'update_mask', ),
'update_version': ('version', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=dialogflowCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the dialogflow client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
|
# -*- coding: utf-8 -*-
"""
formsubplottool.py
backend.qt4 (PyQt4|PySide) independent form of the subplot tool.
"""
from matplotlib.backends.qt_compat import QtCore, QtGui, QtWidgets
__author__ = 'rudolf.hoefler@gmail.com'
class UiSubplotTool(QtWidgets.QDialog):
def __init__(self, *args, **kwargs):
super(UiSubplotTool, self).__init__(*args, **kwargs)
self.setObjectName('SubplotTool')
self.resize(450, 265)
gbox = QtWidgets.QGridLayout(self)
self.setLayout(gbox)
# groupbox borders
groupbox = QtWidgets.QGroupBox('Borders', self)
gbox.addWidget(groupbox, 6, 0, 1, 1)
self.verticalLayout = QtWidgets.QVBoxLayout(groupbox)
self.verticalLayout.setSpacing(0)
# slider top
self.hboxtop = QtWidgets.QHBoxLayout()
self.labeltop = QtWidgets.QLabel('top', self)
self.labeltop.setMinimumSize(QtCore.QSize(50, 0))
self.labeltop.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.slidertop = QtWidgets.QSlider(self)
self.slidertop.setMouseTracking(False)
self.slidertop.setProperty("value", 0)
self.slidertop.setOrientation(QtCore.Qt.Horizontal)
self.slidertop.setInvertedAppearance(False)
self.slidertop.setInvertedControls(False)
self.slidertop.setTickPosition(QtWidgets.QSlider.TicksAbove)
self.slidertop.setTickInterval(100)
self.topvalue = QtWidgets.QLabel('0', self)
self.topvalue.setMinimumSize(QtCore.QSize(30, 0))
self.topvalue.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.verticalLayout.addLayout(self.hboxtop)
self.hboxtop.addWidget(self.labeltop)
self.hboxtop.addWidget(self.slidertop)
self.hboxtop.addWidget(self.topvalue)
# slider bottom
hboxbottom = QtWidgets.QHBoxLayout()
labelbottom = QtWidgets.QLabel('bottom', self)
labelbottom.setMinimumSize(QtCore.QSize(50, 0))
labelbottom.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.sliderbottom = QtWidgets.QSlider(self)
self.sliderbottom.setMouseTracking(False)
self.sliderbottom.setProperty("value", 0)
self.sliderbottom.setOrientation(QtCore.Qt.Horizontal)
self.sliderbottom.setInvertedAppearance(False)
self.sliderbottom.setInvertedControls(False)
self.sliderbottom.setTickPosition(QtWidgets.QSlider.TicksAbove)
self.sliderbottom.setTickInterval(100)
self.bottomvalue = QtWidgets.QLabel('0', self)
self.bottomvalue.setMinimumSize(QtCore.QSize(30, 0))
self.bottomvalue.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.verticalLayout.addLayout(hboxbottom)
hboxbottom.addWidget(labelbottom)
hboxbottom.addWidget(self.sliderbottom)
hboxbottom.addWidget(self.bottomvalue)
# slider left
hboxleft = QtWidgets.QHBoxLayout()
labelleft = QtWidgets.QLabel('left', self)
labelleft.setMinimumSize(QtCore.QSize(50, 0))
labelleft.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.sliderleft = QtWidgets.QSlider(self)
self.sliderleft.setMouseTracking(False)
self.sliderleft.setProperty("value", 0)
self.sliderleft.setOrientation(QtCore.Qt.Horizontal)
self.sliderleft.setInvertedAppearance(False)
self.sliderleft.setInvertedControls(False)
self.sliderleft.setTickPosition(QtWidgets.QSlider.TicksAbove)
self.sliderleft.setTickInterval(100)
self.leftvalue = QtWidgets.QLabel('0', self)
self.leftvalue.setMinimumSize(QtCore.QSize(30, 0))
self.leftvalue.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.verticalLayout.addLayout(hboxleft)
hboxleft.addWidget(labelleft)
hboxleft.addWidget(self.sliderleft)
hboxleft.addWidget(self.leftvalue)
# slider right
hboxright = QtWidgets.QHBoxLayout()
self.labelright = QtWidgets.QLabel('right', self)
self.labelright.setMinimumSize(QtCore.QSize(50, 0))
self.labelright.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.sliderright = QtWidgets.QSlider(self)
self.sliderright.setMouseTracking(False)
self.sliderright.setProperty("value", 0)
self.sliderright.setOrientation(QtCore.Qt.Horizontal)
self.sliderright.setInvertedAppearance(False)
self.sliderright.setInvertedControls(False)
self.sliderright.setTickPosition(QtWidgets.QSlider.TicksAbove)
self.sliderright.setTickInterval(100)
self.rightvalue = QtWidgets.QLabel('0', self)
self.rightvalue.setMinimumSize(QtCore.QSize(30, 0))
self.rightvalue.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.verticalLayout.addLayout(hboxright)
hboxright.addWidget(self.labelright)
hboxright.addWidget(self.sliderright)
hboxright.addWidget(self.rightvalue)
# groupbox spacings
groupbox = QtWidgets.QGroupBox('Spacings', self)
gbox.addWidget(groupbox, 7, 0, 1, 1)
self.verticalLayout = QtWidgets.QVBoxLayout(groupbox)
self.verticalLayout.setSpacing(0)
# slider hspace
hboxhspace = QtWidgets.QHBoxLayout()
self.labelhspace = QtWidgets.QLabel('hspace', self)
self.labelhspace.setMinimumSize(QtCore.QSize(50, 0))
self.labelhspace.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.sliderhspace = QtWidgets.QSlider(self)
self.sliderhspace.setMouseTracking(False)
self.sliderhspace.setProperty("value", 0)
self.sliderhspace.setOrientation(QtCore.Qt.Horizontal)
self.sliderhspace.setInvertedAppearance(False)
self.sliderhspace.setInvertedControls(False)
self.sliderhspace.setTickPosition(QtWidgets.QSlider.TicksAbove)
self.sliderhspace.setTickInterval(100)
self.hspacevalue = QtWidgets.QLabel('0', self)
self.hspacevalue.setMinimumSize(QtCore.QSize(30, 0))
self.hspacevalue.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.verticalLayout.addLayout(hboxhspace)
hboxhspace.addWidget(self.labelhspace)
hboxhspace.addWidget(self.sliderhspace)
hboxhspace.addWidget(self.hspacevalue) # slider hspace
# slider wspace
hboxwspace = QtWidgets.QHBoxLayout()
self.labelwspace = QtWidgets.QLabel('wspace', self)
self.labelwspace.setMinimumSize(QtCore.QSize(50, 0))
self.labelwspace.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.sliderwspace = QtWidgets.QSlider(self)
self.sliderwspace.setMouseTracking(False)
self.sliderwspace.setProperty("value", 0)
self.sliderwspace.setOrientation(QtCore.Qt.Horizontal)
self.sliderwspace.setInvertedAppearance(False)
self.sliderwspace.setInvertedControls(False)
self.sliderwspace.setTickPosition(QtWidgets.QSlider.TicksAbove)
self.sliderwspace.setTickInterval(100)
self.wspacevalue = QtWidgets.QLabel('0', self)
self.wspacevalue.setMinimumSize(QtCore.QSize(30, 0))
self.wspacevalue.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.verticalLayout.addLayout(hboxwspace)
hboxwspace.addWidget(self.labelwspace)
hboxwspace.addWidget(self.sliderwspace)
hboxwspace.addWidget(self.wspacevalue)
# button bar
hbox2 = QtWidgets.QHBoxLayout()
gbox.addLayout(hbox2, 8, 0, 1, 1)
self.tightlayout = QtWidgets.QPushButton('Tight Layout', self)
spacer = QtWidgets.QSpacerItem(
5, 20, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
self.resetbutton = QtWidgets.QPushButton('Reset', self)
self.donebutton = QtWidgets.QPushButton('Close', self)
self.donebutton.setFocus()
hbox2.addWidget(self.tightlayout)
hbox2.addItem(spacer)
hbox2.addWidget(self.resetbutton)
hbox2.addWidget(self.donebutton)
self.donebutton.clicked.connect(self.accept)
|
|
# Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the BigqueryRulesEngine."""
import copy
import itertools
import json
import unittest.mock as mock
import tempfile
import unittest
import yaml
from datetime import datetime, timedelta
from google.cloud.forseti.common.gcp_type import organization
from google.cloud.forseti.common.gcp_type import project
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
from google.cloud.forseti.scanner.audit import base_rules_engine as bre
from google.cloud.forseti.scanner.audit import errors as audit_errors
from google.cloud.forseti.scanner.audit import retention_rules_engine as rre
from google.cloud.forseti.scanner.audit import rules as scanner_rules
from tests.scanner.test_data import fake_retention_scanner_data as frsd
from tests.unittest_utils import get_datafile_path
from tests.unittest_utils import ForsetiTestCase
import collections
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.scanner.scanners import retention_scanner
def get_rules_engine_with_rule(rule):
"""Create a rule engine based on a yaml file string"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(rule.encode())
f.flush()
rules_engine = rre.RetentionRulesEngine(
rules_file_path=f.name)
rules_engine.build_rule_book()
return rules_engine
def get_expect_violation_item(res_map, bucket_id, rule_name, rule_index):
RuleViolation = namedtuple(
'RuleViolation',
['resource_name', 'resource_type', 'full_name', 'rule_name',
'rule_index', 'violation_type', 'violation_data', 'resource_data'])
lifecycle_str = json.dumps(res_map.get(bucket_id).get_lifecycle_rule())
return RuleViolation(
resource_name=bucket_id,
resource_type=res_map.get(bucket_id).type,
full_name=res_map.get(bucket_id).full_name,
rule_name=rule_name,
rule_index=rule_index,
violation_type=rre.VIOLATION_TYPE,
violation_data=lifecycle_str,
resource_data=res_map.get(bucket_id).data)
class RetentionRulesEngineTest(ForsetiTestCase):
"""Tests for the BigqueryRulesEngine."""
def setUp(self):
"""Set up."""
def test_invalid_rule_with_no_applies_to(self):
"""Test that a rule without applies_to cannot be created"""
yaml_str_no_applies_to="""
rules:
- name: No applies_to
resource:
- type: bucket
resource_ids:
- some-resource-id
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_applies_to.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_lack_of_min_max(self):
"""Test that a rule with neither minimum_retention nor maximum_retention
cannot be created"""
yaml_str_lack_min_max="""
rules:
- name: Lack of min and max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- some-resource-id
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_lack_min_max.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_min_lgr_max(self):
"""Test that a rule whose minimum_retention is larger than
maximum_retention cannot be created"""
yaml_str_min_lgr_max="""
rules:
- name: min larger than max
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- some-resource-id
minimum_retention: 366
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_min_lgr_max.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_duplicate_applies_to(self):
"""Test that a rule with duplicate applies_to cannot be created"""
yaml_str_duplicate_applies_to="""
rules:
- name: Duplicate applies_to
applies_to:
- bucket
- bucket
resource:
- type: bucket
resource_ids:
- some-resource-id
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_duplicate_applies_to.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_resource(self):
"""Test that a rule without resource cannot be created"""
yaml_str_no_resource="""
rules:
- name: No resource
applies_to:
- bucket
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_resource.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_res_type(self):
"""Test that a rule without resource.type cannot be created"""
yaml_str_no_res_type="""
rules:
- name: No resource type
applies_to:
- bucket
resource:
- resource_ids:
- some-resource-id
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_res_type.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_res_id(self):
"""Test that a rule without resource.resource_ids cannot be created"""
yaml_str_no_res_id="""
rules:
- name: No resource ids
applies_to:
- bucket
resource:
- type: bucket
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_res_id.encode())
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
yaml_str_only_max_retention = """
rules:
- name: only max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
maximum_retention: 365
"""
def test_number_of_bucket_rules(self):
"""The number of rules should be exactly the same as the length of SUPPORTED_RETENTION_RES_TYPES."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(2, len(rules_engine.rule_book.resource_rules_map))
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map['bucket']))
self.assertEqual(0, len(rules_engine.rule_book.resource_rules_map['bigquery_table']))
def test_only_max_normal_delete(self):
"""Test that a bucket's rule can guarantee the maximum_retention if its
action is 'Delete' and the only condition is an age(<= maximum_retention)"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_normal_nodelete(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its action is not 'Delete'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=365)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_larger_delete(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its age condition is larger than maximum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=366)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_normal_del_anynormal_del(self):
"""Test that a bucket's rules can guarantee the maximum_retention
if they include a rule whose action is 'Delete' and the only condition
is an age(<= maximum_retention)"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=365, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_lgr_del_anynormal_del(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its age comes along with any other conditions"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=366)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=365, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_lgr_del_normal_else(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its action is not 'Delete'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=366)
data_creater.AddLifecycleDict(action="SetStorageClass", age=365)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_normal_del_any_del(self):
"""Test that a bucket could have more than one rules. If one of them can
guarantee the maximum_retention, there is no violation."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
data_creater.AddLifecycleDict(action="Delete", is_live=False)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_normal_del_lgr_del(self):
"""Test that a bucket could have more than one rules. If one of them can
guarantee the maximum_retention, there is no violation."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
data_creater.AddLifecycleDict(action="Delete", age=366)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_no_condition(self):
"""Test that a rule with maximum_retention produces a violation,
if a bucket has no condition at all."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_anynormal_del(self):
"""Test that a rule with maximum_retention produces a violation.
If a condition whose age comes along with any other conditions, it cannot
guarantee the maximum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365, num_newer_versions=5)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
yaml_str_only_min_retention = """
rules:
- name: only min retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
minimum_retention: 90
"""
def test_only_min_normal_del(self):
"""Test that a rule with minimum_retention does not produce violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_normal_else(self):
"""Test that a rule whose action is not 'Delete' should not break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_less_else(self):
"""Test that a rule whose action is not 'Delete' cannot break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_no_condition(self):
"""Test that a rule with minimum_retention does not produce violations.
The minimum_retention is guaranteed when there is no condition at all"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessver1_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its number of newer versions
is larger than 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89, num_newer_versions=1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessver0_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its number of newer versions
is equal to 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89, num_newer_versions=0)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_ver1_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its number of newer versions
is larger than 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", num_newer_versions=1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_ver0_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its number of newer versions
is equal to 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", num_newer_versions=0)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_ver0_else(self):
"""Test that a rule with minimum_retention does not produce violations.
An action that is not 'Delete' cannot break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", num_newer_versions=0)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessold_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its created before time
is earlier than today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=89, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessnew_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its created before time
is later than today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=88)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=88, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_normalnew_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its age is larger
than or equal to minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=89)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=90, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_less_del_normal_del(self):
"""Test that a rule with minimum_retention produces violations.
A rule that does not produce violations cannot prevent another rule from
producing violations"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_less_else_normal_del(self):
"""Test that a rule with minimum_retention does not produce violations.
An action that is not 'Delete' cannot break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=89)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_less_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule breaks minimum_retention, if its age is smaller than
minimum_retention and its action is 'Delete'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_old_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its created before time
is earlier than the date that is today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_new_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its created before time
is later than today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=88)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
yaml_str_both_min_and_max_retention = """
rules:
- name: both min and max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
minimum_retention: 90
maximum_retention: 365
"""
def test_both_min_max_no_condition(self):
"""Test that a rule with both minimum_retention and maximum_retention
produces violations. A bucket's rule break it, if the bucket breakes the
maximum_retention part."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'both min and max retention')
self.assertEqual(got_violations, expected_violations)
def test_both_min_max_normal_del_any_del(self):
"""Test that a rule with both minimum_retention and maximum_retention
produces violations. A bucket's rule break it, if the bucket breakes the
minimum_retention part."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=100)
data_creater.AddLifecycleDict(action="Delete", is_live=True)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'both min and max retention')
self.assertEqual(got_violations, expected_violations)
def test_both_min_max_normal_del(self):
"""Test that a rule with both minimum_retention and maximum_retention
does not produce violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=100)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_both_min_max_3_conditions(self):
"""Test that a rule with both minimum_retention and maximum_retention
does not produce violations when there are more than one conditions."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=100)
data_creater.AddLifecycleDict(action="SetStorageClass", age=89)
data_creater.AddLifecycleDict(action="Delete", age=500)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
yaml_str_bucket_retention_on_correct_project = """
rules:
- name: bucket retention on correct project
applies_to:
- bucket
resource:
- type: project
resource_ids:
- def-project-1
minimum_retention: 90
"""
def test_bucket_on_correct_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project' does not
produce violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_correct_project)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_bucket_on_correct_project_has_vio(self):
"""Test that a rule with a resource.type equal to 'project' produces violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_correct_project)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'bucket retention on correct project')
self.assertEqual(got_violations, expected_violations)
yaml_str_bucket_retention_on_wrong_project = """
rules:
- name: bucket retention on wrong project
applies_to:
- bucket
resource:
- type: project
resource_ids:
- def-project-wrong
minimum_retention: 90
"""
def test_bucket_on_incorrect_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project' does not
produce violations because the project ID does not match"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_wrong_project)
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
yaml_str_bucket_retention_on_multi_projects = """
rules:
- name: bucket retention on multi projects
applies_to:
- bucket
resource:
- type: project
resource_ids:
- def-project-1
- def-project-2
minimum_retention: 90
"""
def test_bucket_on_multi_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project' does not
produce violations when the resource_ids includes more than one projects"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_multi_projects)
data_creater = frsd.FakeBucketDataCreater('fake_bucket_1', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
data_creater = frsd.FakeBucketDataCreater('fake_bucket_2', frsd.PROJECT2)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_bucket_on_multi_project_has_vio(self):
"""Test that a rule with a resource.type equal to 'project' produces
violations when the resource_ids includes more than one projects"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_multi_projects)
data_creater = frsd.FakeBucketDataCreater('fake_bucket_1', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'bucket retention on multi projects')
self.assertEqual(got_violations, expected_violations)
data_creater = frsd.FakeBucketDataCreater('fake_bucket_2', frsd.PROJECT2)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'bucket retention on multi projects')
self.assertEqual(got_violations, expected_violations)
yaml_str_bigquery_retention_on_projects = """
rules:
- name: bigquery retention on projects
applies_to:
- bigquery_table
resource:
- type: project
resource_ids:
- def-project-5
minimum_retention: 90
maximum_retention: 90
"""
def test_number_of_bigquery_rules(self):
"""The number of rules should be exactly the same as the length of SUPPORTED_RETENTION_RES_TYPES."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bigquery_retention_on_projects)
self.assertEqual(2, len(rules_engine.rule_book.resource_rules_map))
self.assertEqual(0, len(rules_engine.rule_book.resource_rules_map['bucket']))
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map['bigquery_table']))
def test_bigquery_retention_on_project_no_expiration_time(self):
"""Test a bigquery table without expiration time."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bigquery_retention_on_projects)
data_creater = frsd.FakeTableDataCreater('fake_bqtable', frsd.DATASET1)
fake_table = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_table))
expected_violations = frsd.build_table_violations(
fake_table, 'bigquery retention on projects')
self.assertEqual(got_violations, expected_violations)
def test_bigquery_retention_on_project_too_big(self):
"""Test that a rule with a resource.type equal to 'project'.
The retention is larger than the maximum limit."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bigquery_retention_on_projects)
data_creater = frsd.FakeTableDataCreater('fake_bqtable', frsd.DATASET1)
data_creater.SetExpirationTime(frsd.DEFAULT_TABLE_CREATE_TIME+91 * rre._MS_PER_DAY)
fake_table = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_table))
expected_violations = frsd.build_table_violations(
fake_table, 'bigquery retention on projects')
self.assertEqual(got_violations, expected_violations)
def test_bigquery_retention_on_project_too_small(self):
"""Test that a rule with a resource.type equal to 'project'.
The retention is smaller than the minimum limit."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bigquery_retention_on_projects)
data_creater = frsd.FakeTableDataCreater('fake_bqtable', frsd.DATASET1)
data_creater.SetExpirationTime(frsd.DEFAULT_TABLE_CREATE_TIME+89 * rre._MS_PER_DAY)
fake_table = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_table))
expected_violations = frsd.build_table_violations(
fake_table, 'bigquery retention on projects')
self.assertEqual(got_violations, expected_violations)
def test_bigquery_retention_on_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bigquery_retention_on_projects)
data_creater = frsd.FakeTableDataCreater('fake_bqtable', frsd.DATASET1)
data_creater.SetExpirationTime(frsd.DEFAULT_TABLE_CREATE_TIME+90 * rre._MS_PER_DAY)
fake_table = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_table))
expected_violations = []
self.assertEqual(got_violations, expected_violations)
yaml_str_bigquery_retention_on_bigquery_table = """
rules:
- name: bigquery retention on tables
applies_to:
- bigquery_table
resource:
- type: bigquery_table
resource_ids:
- def-project-5:ds01.fake_bqtable
minimum_retention: 90
maximum_retention: 92
"""
def test_bigquery_retention_on_table_too_big(self):
"""Test that a rule with a resource.type equal to 'bigquery_table'.
The retention is larger than the maximum limit."""
rules_engine = get_rules_engine_with_rule(
RetentionRulesEngineTest.yaml_str_bigquery_retention_on_bigquery_table)
data_creater = frsd.FakeTableDataCreater('fake_bqtable', frsd.DATASET1)
data_creater.SetExpirationTime(frsd.DEFAULT_TABLE_CREATE_TIME+93 * rre._MS_PER_DAY)
fake_table = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_table))
expected_violations = frsd.build_table_violations(
fake_table, 'bigquery retention on tables')
self.assertEqual(got_violations, expected_violations)
def test_bigquery_retention_on_table_too_small(self):
"""Test that a rule with a resource.type equal to 'bigquery_table'.
The retention is smaller than the minimum limit."""
rules_engine = get_rules_engine_with_rule(
RetentionRulesEngineTest.yaml_str_bigquery_retention_on_bigquery_table)
data_creater = frsd.FakeTableDataCreater('fake_bqtable', frsd.DATASET1)
data_creater.SetExpirationTime(frsd.DEFAULT_TABLE_CREATE_TIME+89 * rre._MS_PER_DAY)
fake_table = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_table))
expected_violations = frsd.build_table_violations(
fake_table, 'bigquery retention on tables')
self.assertEqual(got_violations, expected_violations)
def test_bigquery_retention_on_table_no_vio(self):
"""Test that a rule with a resource.type equal to 'project'"""
rules_engine = get_rules_engine_with_rule(
RetentionRulesEngineTest.yaml_str_bigquery_retention_on_bigquery_table)
data_creater = frsd.FakeTableDataCreater('fake_bqtable', frsd.DATASET1)
data_creater.SetExpirationTime(frsd.DEFAULT_TABLE_CREATE_TIME+91 * rre._MS_PER_DAY)
fake_table = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_table))
expected_violations = []
self.assertEqual(got_violations, expected_violations)
yaml_str_number_of_rules = """
rules:
- name: only max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
maximum_retention: 365
- name: bigquery retention on projects
applies_to:
- bigquery_table
resource:
- type: project
resource_ids:
- def-project-5
minimum_retention: 90
maximum_retention: 90
"""
def test_number_of_rules(self):
"""The number of rules should be exactly the same as the length of SUPPORTED_RETENTION_RES_TYPES."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_number_of_rules)
self.assertEqual(len(rre.SUPPORTED_RETENTION_RES_TYPES), 2)
self.assertEqual(2, len(rules_engine.rule_book.resource_rules_map))
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map['bucket']))
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map['bigquery_table']))
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from os.path import abspath, join, dirname
from preggy import expect
import mock
# from tornado.concurrent import Future
import tornado.web
from tests.base import PythonTestCase, TestCase
from tornado.concurrent import Future
import thumbor.loaders.https_loader as loader
from thumbor.context import Context
from thumbor.config import Config
from thumbor.loaders import LoaderResult
fixture_for = lambda filename: abspath(join(dirname(__file__), 'fixtures', filename))
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('Hello')
class EchoUserAgentHandler(tornado.web.RequestHandler):
def get(self):
self.write(self.request.headers['User-Agent'])
class HandlerMock(object):
def __init__(self, headers):
self.request = RequestMock(headers)
class RequestMock(object):
def __init__(self, headers):
self.headers = headers
class ResponseMock:
def __init__(self, error=None, content_type=None, body=None, code=None):
self.error = error
self.code = code
self.time_info = None
self.headers = {
'Content-Type': 'image/jpeg'
}
if content_type:
self.headers['Content-Type'] = content_type
self.body = body
class ReturnContentTestCase(PythonTestCase):
def test_return_none_on_error(self):
response_mock = ResponseMock(error='Error', code=599)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_be_null()
expect(result.successful).to_be_false()
def test_return_body_if_valid(self):
response_mock = ResponseMock(body='body', code=200)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('body')
def test_return_upstream_error_on_body_none(self):
response_mock = ResponseMock(body=None, code=200)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_be_null()
expect(result.successful).to_be_false()
expect(result.error).to_equal(LoaderResult.ERROR_UPSTREAM)
def test_return_upstream_error_on_body_empty(self):
response_mock = ResponseMock(body='', code=200)
callback_mock = mock.Mock()
ctx = Context(None, None, None)
loader.return_contents(response_mock, 'some-url', callback_mock, ctx)
result = callback_mock.call_args[0][0]
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_be_null()
expect(result.successful).to_be_false()
expect(result.error).to_equal(LoaderResult.ERROR_UPSTREAM)
class ValidateUrlTestCase(PythonTestCase):
def test_with_allowed_sources(self):
config = Config()
config.ALLOWED_SOURCES = ['s.glbimg.com']
ctx = Context(None, config, None)
expect(
loader.validate(
ctx,
'http://www.google.com/logo.jpg'
)
).to_be_false()
expect(
loader.validate(
ctx,
'http://s2.glbimg.com/logo.jpg'
)
).to_be_false()
expect(
loader.validate(
ctx,
'/glob=:sfoir%20%20%3Co-pmb%20%20%20%20_%20%20%20%200%20%20g.-%3E%3Ca%20hplass='
)
).to_be_false()
expect(
loader.validate(ctx, 'http://s.glbimg.com/logo.jpg')).to_be_true()
def test_without_allowed_sources(self):
config = Config()
config.ALLOWED_SOURCES = []
ctx = Context(None, config, None)
is_valid = loader.validate(ctx, 'http://www.google.com/logo.jpg')
expect(is_valid).to_be_true()
class NormalizeUrlTestCase(PythonTestCase):
def test_should_normalize_url(self):
expect(loader._normalize_url('http://some.url')).to_equal('http://some.url')
expect(loader._normalize_url('some.url')).to_equal('https://some.url')
def test_should_normalize_url_but_keep_quotes_after_the_domain(self):
for url in ['https://some.url/my image', 'some.url/my%20image']:
expect(loader._normalize_url(url)).to_equal('https://some.url/my%20image')
def test_should_normalize_quoted_url(self):
url = 'https%3A//www.google.ca/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png'
expected = 'https://www.google.ca/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png'
result = loader._normalize_url(url)
expect(result).to_equal(expected)
class HttpsLoaderTestCase(TestCase):
def get_app(self):
application = tornado.web.Application([
(r"/", MainHandler),
])
return application
def test_load_with_callback(self):
url = self.get_url('/')
config = Config()
ctx = Context(None, config, None)
loader.load(ctx, url, self.stop)
result = self.wait()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('Hello')
expect(result.successful).to_be_true()
def test_load_with_curl(self):
url = self.get_url('/')
config = Config()
config.HTTP_LOADER_CURL_ASYNC_HTTP_CLIENT = True
ctx = Context(None, config, None)
loader.load(ctx, url, self.stop)
result = self.wait()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('Hello')
expect(result.successful).to_be_true()
def test_should_return_a_future(self):
url = self.get_url('/')
config = Config()
ctx = Context(None, config, None)
future = loader.load(ctx, url)
expect(isinstance(future, Future)).to_be_true()
class HttpLoaderWithUserAgentForwardingTestCase(TestCase):
def get_app(self):
application = tornado.web.Application([
(r"/", EchoUserAgentHandler),
])
return application
def test_load_with_user_agent(self):
url = self.get_url('/')
config = Config()
config.HTTP_LOADER_FORWARD_USER_AGENT = True
ctx = Context(None, config, None, HandlerMock({"User-Agent": "test-user-agent"}))
loader.load(ctx, url, self.stop)
result = self.wait()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('test-user-agent')
def test_load_with_default_user_agent(self):
url = self.get_url('/')
config = Config()
config.HTTP_LOADER_FORWARD_USER_AGENT = True
config.HTTP_LOADER_DEFAULT_USER_AGENT = "DEFAULT_USER_AGENT"
ctx = Context(None, config, None, HandlerMock({}))
loader.load(ctx, url, self.stop)
result = self.wait()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('DEFAULT_USER_AGENT')
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers.preprocessing import image_preprocessing
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import image_ops_impl as image_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class ResizingTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs.update({'height': expected_height, 'width': expected_width})
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.Resizing,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('down_sample_bilinear_2_by_2', {'interpolation': 'bilinear'}, 2, 2),
('down_sample_bilinear_3_by_2', {'interpolation': 'bilinear'}, 3, 2),
('down_sample_nearest_2_by_2', {'interpolation': 'nearest'}, 2, 2),
('down_sample_nearest_3_by_2', {'interpolation': 'nearest'}, 3, 2),
('down_sample_area_2_by_2', {'interpolation': 'area'}, 2, 2),
('down_sample_area_3_by_2', {'interpolation': 'area'}, 3, 2))
def test_down_sampling(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(
('up_sample_bilinear_10_by_12', {'interpolation': 'bilinear'}, 10, 12),
('up_sample_bilinear_12_by_12', {'interpolation': 'bilinear'}, 12, 12),
('up_sample_nearest_10_by_12', {'interpolation': 'nearest'}, 10, 12),
('up_sample_nearest_12_by_12', {'interpolation': 'nearest'}, 12, 12),
('up_sample_area_10_by_12', {'interpolation': 'area'}, 10, 12),
('up_sample_area_12_by_12', {'interpolation': 'area'}, 12, 12))
def test_up_sampling(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(
('reshape_bilinear_10_by_4', {'interpolation': 'bilinear'}, 10, 4))
def test_reshaping(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
def test_invalid_interpolation(self):
with self.assertRaises(NotImplementedError):
image_preprocessing.Resizing(5, 5, 'invalid_interpolation')
def test_config_with_custom_name(self):
layer = image_preprocessing.Resizing(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.Resizing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def get_numpy_center_crop(images, expected_height, expected_width):
orig_height = images.shape[1]
orig_width = images.shape[2]
height_start = int((orig_height - expected_height) / 2)
width_start = int((orig_width - expected_width) / 2)
height_end = height_start + expected_height
width_end = width_start + expected_width
return images[:, height_start:height_end, width_start:width_end, :]
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CenterCropTest(keras_parameterized.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
input_images = np.random.random(
(num_samples, orig_height, orig_width, channels)).astype(np.float32)
expected_output = get_numpy_center_crop(
input_images, expected_height, expected_width)
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.CenterCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
input_data=input_images,
expected_output=expected_output,
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('center_crop_3_by_4', 3, 4),
('center_crop_3_by_2', 3, 2))
def test_center_crop_aligned(self, expected_height, expected_width):
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
('center_crop_4_by_5', 4, 5),
('center_crop_4_by_3', 4, 3))
def test_center_crop_mis_aligned(self, expected_height, expected_width):
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
('center_crop_4_by_6', 4, 6),
('center_crop_3_by_2', 3, 2))
def test_center_crop_half_mis_aligned(self, expected_height, expected_width):
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
('center_crop_5_by_12', 5, 12),
('center_crop_10_by_8', 10, 8),
('center_crop_10_by_12', 10, 12))
def test_invalid_center_crop(self, expected_height, expected_width):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'assertion failed'):
self._run_test(expected_height, expected_width)
def test_config_with_custom_name(self):
layer = image_preprocessing.CenterCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.CenterCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomCropTest(keras_parameterized.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('random_crop_5_by_12', 5, 12),
('random_crop_10_by_8', 10, 8),
('random_crop_10_by_12', 10, 12))
def test_invalid_random_crop(self, expected_height, expected_width):
with self.assertRaises(errors.InvalidArgumentError):
with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
self._run_test(expected_height, expected_width)
def test_training_with_mock(self):
if test.is_built_with_rocm():
# TODO(rocm):
# re-enable this test once ROCm adds support for
# the StatefulUniformFullInt Op (on the GPU)
self.skipTest('Feature not supported on ROCm')
np.random.seed(1337)
height, width = 3, 4
height_offset = np.random.randint(low=0, high=3)
width_offset = np.random.randint(low=0, high=5)
mock_offset = [0, height_offset, width_offset, 0]
with test.mock.patch.object(
stateless_random_ops, 'stateless_random_uniform',
return_value=mock_offset):
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
inp = np.random.random((12, 5, 8, 3))
actual_output = layer(inp, training=1)
expected_output = inp[:, height_offset:(height_offset + height),
width_offset:(width_offset + width), :]
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_crop_4_by_6', 4, 6),
('random_crop_3_by_2', 3, 2))
def test_random_crop_output_shape(self, expected_height, expected_width):
if test.is_built_with_rocm():
# TODO(rocm):
# re-enable this test once ROCm adds support for
# the StatefulUniformFullInt Op (on the GPU)
self.skipTest('Feature not supported on ROCm')
with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
self._run_test(expected_height, expected_width)
def test_predicting_with_mock_longer_height(self):
np.random.seed(1337)
height, width = 3, 3
inp = np.random.random((12, 10, 6, 3))
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
resized_inp = image_ops.resize_images_v2(
inp, size=[5, 3])
expected_output = resized_inp[:, 1:4, :, :]
self.assertAllClose(expected_output, actual_output)
def test_predicting_with_mock_longer_width(self):
np.random.seed(1337)
height, width = 4, 6
inp = np.random.random((12, 8, 16, 3))
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
resized_inp = image_ops.resize_images_v2(
inp, size=[4, 8])
expected_output = resized_inp[:, :, 1:7, :]
self.assertAllClose(expected_output, actual_output)
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
class RescalingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_rescaling_base(self):
kwargs = {'scale': 0.004}
testing_utils.layer_test(
image_preprocessing.Rescaling,
kwargs=kwargs,
input_shape=(2, 5, 6, 3),
expected_output_shape=(None, 5, 6, 3))
@tf_test_util.run_v2_only
def test_rescaling_correctness_float(self):
layer = image_preprocessing.Rescaling(0.004)
inputs = random_ops.random_uniform((2, 4, 5, 3))
outputs = layer(inputs)
self.assertAllClose(outputs.numpy(), inputs.numpy() * 0.004)
@tf_test_util.run_v2_only
def test_rescaling_correctness_int(self):
layer = image_preprocessing.Rescaling(0.004)
inputs = random_ops.random_uniform((2, 4, 5, 3), 0, 100, dtype='int32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype.name, 'float32')
self.assertAllClose(outputs.numpy(), inputs.numpy() * 0.004)
def test_config_with_custom_name(self):
layer = image_preprocessing.Rescaling(0.5, name='rescaling')
config = layer.get_config()
layer_1 = image_preprocessing.Rescaling.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomFlipTest(keras_parameterized.TestCase):
def _run_test(self, mode, expected_output=None, mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = [1 for _ in range(num_samples)]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
expected_output = inp
if mode == 'horizontal' or mode == 'horizontal_and_vertical':
expected_output = np.flip(expected_output, axis=1)
if mode == 'vertical' or mode == 'horizontal_and_vertical':
expected_output = np.flip(expected_output, axis=2)
with test.mock.patch.object(
random_ops, 'random_uniform', return_value=mock_random):
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomFlip(mode)
actual_output = layer(inp, training=1)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_flip_horizontal', 'horizontal'),
('random_flip_vertical', 'vertical'),
('random_flip_both', 'horizontal_and_vertical'))
def test_random_flip(self, mode):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
self._run_test(mode)
def test_random_flip_horizontal_half(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=0)
self._run_test('horizontal', expected_output, mock_random)
def test_random_flip_vertical_half(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=1)
self._run_test('vertical', expected_output, mock_random)
def test_random_flip_inference(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
def test_random_flip_default(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = np.flip(np.flip(input_images, axis=1), axis=2)
mock_random = [1, 1]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
with test.mock.patch.object(
random_ops, 'random_uniform', return_value=mock_random):
with self.cached_session(use_gpu=True):
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=1)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomFlip(name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomFlip.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomContrastTest(keras_parameterized.TestCase):
def _run_test(self,
lower,
upper,
expected_output=None,
mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = 0.2
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
# reduce mean on height.
inp_mean = np.mean(inp, axis=1, keepdims=True)
# reduce mean on width.
inp_mean = np.mean(inp_mean, axis=2, keepdims=True)
expected_output = (inp - inp_mean) * mock_random + inp_mean
with test.mock.patch.object(
random_ops, 'random_uniform', return_value=mock_random):
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomContrast((lower, upper))
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_contrast_2_by_5', 0.2, 0.5),
('random_contrast_2_by_13', 0.2, 1.3),
('random_contrast_5_by_2', 0.5, 0.2))
def test_random_contrast(self, lower, upper):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
self._run_test(lower, upper)
@parameterized.named_parameters(
('random_contrast_amplitude_2', 0.2),
('random_contrast_amplitude_5', 0.5))
def test_random_contrast_amplitude(self, amplitude):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
input_images = np.random.random((2, 5, 8, 3))
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomContrast(amplitude)
layer(input_images)
def test_random_contrast_inference(self):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_random_contrast_int_dtype(self):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
input_images = np.random.randint(low=0, high=255, size=(2, 5, 8, 3))
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
layer(input_images)
def test_random_contrast_invalid_bounds(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((-0.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((1.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((0.1, -0.2))
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomContrast((.5, .6), name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomContrast.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomTranslationTest(keras_parameterized.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomTranslation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_translate_4_by_6', .4, .6), ('random_translate_3_by_2', .3, .2),
('random_translate_tuple_factor', (.5, .4), (.2, .3)))
def test_random_translation(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_translation_negative_lower(self):
mock_offset = np.random.random((12, 1))
with test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_offset):
with self.cached_session(use_gpu=True):
layer = image_preprocessing.RandomTranslation((-0.2, .3), .4)
layer_2 = image_preprocessing.RandomTranslation((0.2, .3), .4)
inp = np.random.random((12, 5, 8, 3)).astype(np.float32)
actual_output = layer(inp, training=1)
actual_output_2 = layer_2(inp, training=1)
self.assertAllClose(actual_output, actual_output_2)
def test_random_translation_inference(self):
with CustomObjectScope(
{'RandomTranslation': image_preprocessing.RandomTranslation}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomTranslation(.5, .5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomTranslation(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomTranslation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomRotationTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'factor': factor}
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomRotation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(('random_rotate_4', .4),
('random_rotate_3', .3),
('random_rotate_tuple_factor', (.5, .4)))
def test_random_rotation(self, factor):
self._run_test(factor)
def test_random_rotation_inference(self):
with CustomObjectScope(
{'RandomTranslation': image_preprocessing.RandomRotation}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomRotation(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomRotation(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomRotation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomZoomTest(keras_parameterized.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomZoom,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_zoom_4_by_6', .4, .6), ('random_zoom_2_by_3', .2, .3),
('random_zoom_tuple_factor', (.4, .5), (.2, .3)))
def test_random_zoom_in(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
@parameterized.named_parameters(
('random_zoom_4_by_6', 1.4, 1.6), ('random_zoom_2_by_3', 1.2, 1.3),
('random_zoom_tuple_factor', (1.4, 1.5), (1.2, 1.3)))
def test_random_zoom_out(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_zoom_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomZoom((.5, .4), .2)
with self.assertRaises(ValueError):
image_preprocessing.RandomZoom(.2, (.5, .4))
def test_random_zoom_inference(self):
with CustomObjectScope(
{'RandomZoom': image_preprocessing.RandomZoom}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomZoom(.5, .5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomZoom(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomZoom.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomHeightTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with tf_test_util.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomHeight(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[2], 8)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_height_4_by_6', (.4, .6)),
('random_height_3_by_2', (.3, 1.2)),
('random_height_3', .3))
def test_random_height_basic(self, factor):
self._run_test(factor)
def test_valid_random_height(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf_test_util.use_gpu():
img = np.random.random((12, 5, 8, 3))
layer = image_preprocessing.RandomHeight(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[1], 3)
def test_random_height_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomHeight((-1.5, .4))
def test_random_height_inference(self):
with CustomObjectScope({'RandomHeight': image_preprocessing.RandomHeight}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomHeight(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomHeight(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomHeight.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomWidthTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with tf_test_util.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomWidth(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[1], 5)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_width_4_by_6', (.4, .6)),
('random_width_3_by_2', (.3, 1.2)),
('random_width_3', .3))
def test_random_width_basic(self, factor):
self._run_test(factor)
def test_valid_random_width(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf_test_util.use_gpu():
img = np.random.random((12, 8, 5, 3))
layer = image_preprocessing.RandomWidth(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[2], 3)
def test_random_width_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomWidth((-1.5, .4))
def test_random_width_inference(self):
with CustomObjectScope({'RandomWidth': image_preprocessing.RandomWidth}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomWidth(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomWidth(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomWidth.from_config(config)
self.assertEqual(layer_1.name, layer.name)
if __name__ == '__main__':
test.main()
|
|
import itertools
import random
from raiden.constants import MAXIMUM_PENDING_TRANSFERS
from raiden.transfer import channel, secret_registry
from raiden.transfer.architecture import Event, StateChange, TransitionResult
from raiden.transfer.events import SendProcessed
from raiden.transfer.mediated_transfer.events import (
CHANNEL_IDENTIFIER_GLOBAL_QUEUE,
EventUnexpectedSecretReveal,
EventUnlockClaimFailed,
EventUnlockClaimSuccess,
EventUnlockFailed,
EventUnlockSuccess,
SendSecretReveal,
)
from raiden.transfer.mediated_transfer.state import (
HashTimeLockState,
LockedTransferSignedState,
LockedTransferUnsignedState,
MediationPairState,
MediatorTransferState,
WaitingTransferState,
)
from raiden.transfer.mediated_transfer.state_change import (
ActionInitMediator,
ReceiveLockExpired,
ReceiveSecretReveal,
ReceiveTransferRefund,
)
from raiden.transfer.state import (
CHANNEL_STATE_CLOSED,
CHANNEL_STATE_OPENED,
NODE_NETWORK_REACHABLE,
NODE_NETWORK_UNREACHABLE,
NettingChannelState,
RouteState,
message_identifier_from_prng,
)
from raiden.transfer.state_change import (
ActionChangeNodeNetworkState,
Block,
ContractReceiveSecretReveal,
ReceiveUnlock,
)
from raiden.transfer.utils import is_valid_secret_reveal
from raiden.utils.typing import (
MYPY_ANNOTATION,
Address,
BlockExpiration,
BlockHash,
BlockNumber,
BlockTimeout,
ChannelMap,
Dict,
List,
LockType,
NodeNetworkStateMap,
Optional,
PaymentWithFeeAmount,
Secret,
SecretHash,
SuccessOrError,
Tuple,
Union,
cast,
)
STATE_SECRET_KNOWN = (
"payee_secret_revealed",
"payee_contract_unlock",
"payee_balance_proof",
"payer_secret_revealed",
"payer_waiting_unlock",
"payer_balance_proof",
)
STATE_TRANSFER_PAID = ("payee_contract_unlock", "payee_balance_proof", "payer_balance_proof")
# TODO: fix expired state, it is not final
STATE_TRANSFER_FINAL = (
"payee_contract_unlock",
"payee_balance_proof",
"payee_expired",
"payer_balance_proof",
"payer_expired",
)
def is_lock_valid(expiration: BlockExpiration, block_number: BlockNumber) -> bool:
""" True if the lock has not expired. """
return block_number <= BlockNumber(expiration)
def is_safe_to_wait(
lock_expiration: BlockExpiration, reveal_timeout: BlockTimeout, block_number: BlockNumber
) -> SuccessOrError:
""" True if waiting is safe, i.e. there are more than enough blocks to safely
unlock on chain.
"""
# reveal timeout will not ever be larger than the lock_expiration otherwise
# the expected block_number is negative
assert block_number > 0
assert reveal_timeout > 0
assert lock_expiration > reveal_timeout
lock_timeout = lock_expiration - block_number
# A node may wait for a new balance proof while there are reveal_timeout
# blocks left, at that block and onwards it is not safe to wait.
if lock_timeout > reveal_timeout:
return True, None
msg = (
f"lock timeout is unsafe."
f" timeout must be larger than {reveal_timeout}, but it is {lock_timeout}."
f" expiration: {lock_expiration} block_number: {block_number}"
)
return False, msg
def is_channel_usable(
candidate_channel_state: NettingChannelState,
transfer_amount: PaymentWithFeeAmount,
lock_timeout: BlockTimeout,
) -> bool:
pending_transfers = channel.get_number_of_pending_transfers(candidate_channel_state.our_state)
distributable = channel.get_distributable(
candidate_channel_state.our_state, candidate_channel_state.partner_state
)
return (
lock_timeout > 0
and channel.get_status(candidate_channel_state) == CHANNEL_STATE_OPENED
and candidate_channel_state.settle_timeout >= lock_timeout
and candidate_channel_state.reveal_timeout < lock_timeout
and pending_transfers < MAXIMUM_PENDING_TRANSFERS
and transfer_amount <= distributable
and channel.is_valid_amount(candidate_channel_state.our_state, transfer_amount)
)
def is_send_transfer_almost_equal(
send_channel: NettingChannelState,
send: LockedTransferUnsignedState,
received: LockedTransferSignedState,
) -> bool:
""" True if both transfers are for the same mediated transfer. """
# The only thing that may change is the direction of the transfer
return (
isinstance(send, LockedTransferUnsignedState)
and isinstance(received, LockedTransferSignedState)
and send.payment_identifier == received.payment_identifier
and send.token == received.token
and send.lock.amount == received.lock.amount - send_channel.mediation_fee
and send.lock.expiration == received.lock.expiration
and send.lock.secrethash == received.lock.secrethash
and send.initiator == received.initiator
and send.target == received.target
)
def has_secret_registration_started(
channel_states: List[NettingChannelState],
transfers_pair: List[MediationPairState],
secrethash: SecretHash,
) -> bool:
# If it's known the secret is registered on-chain, the node should not send
# a new transaction. Note there is a race condition:
#
# - Node B learns the secret on-chain, sends a secret reveal to A
# - Node A receives the secret reveal off-chain prior to the event for the
# secret registration, if the lock is in the danger zone A will try to
# register the secret on-chain, because from its perspective the secret
# is not there yet.
is_secret_registered_onchain = any(
channel.is_secret_known_onchain(payer_channel.partner_state, secrethash)
for payer_channel in channel_states
)
has_pending_transaction = any(
pair.payer_state == "payer_waiting_secret_reveal" for pair in transfers_pair
)
return is_secret_registered_onchain or has_pending_transaction
def filter_reachable_routes(
routes: List[RouteState], nodeaddresses_to_networkstates: NodeNetworkStateMap
) -> List[RouteState]:
"""This function makes sure we use reachable routes only."""
reachable_routes = []
for route in routes:
node_network_state = nodeaddresses_to_networkstates.get(
route.node_address, NODE_NETWORK_UNREACHABLE
)
if node_network_state == NODE_NETWORK_REACHABLE:
reachable_routes.append(route)
return reachable_routes
def filter_used_routes(
transfers_pair: List[MediationPairState], routes: List[RouteState]
) -> List[RouteState]:
"""This function makes sure we filter routes that have already been used.
So in a setup like this, we want to make sure that node 2, having tried to
route the transfer through 3 will also try 5 before sending it backwards to 1
1 -> 2 -> 3 -> 4
v ^
5 -> 6 -> 7
This function will return routes as provided in their original order.
"""
channelid_to_route = {r.channel_identifier: r for r in routes}
routes_order = {route.node_address: index for index, route in enumerate(routes)}
for pair in transfers_pair:
channelid = pair.payer_transfer.balance_proof.channel_identifier
if channelid in channelid_to_route:
del channelid_to_route[channelid]
channelid = pair.payee_transfer.balance_proof.channel_identifier
if channelid in channelid_to_route:
del channelid_to_route[channelid]
return sorted(channelid_to_route.values(), key=lambda route: routes_order[route.node_address])
def get_payee_channel(
channelidentifiers_to_channels: ChannelMap, transfer_pair: MediationPairState
) -> Optional[NettingChannelState]:
""" Returns the payee channel of a given transfer pair or None if it's not found """
payee_channel_identifier = transfer_pair.payee_transfer.balance_proof.channel_identifier
return channelidentifiers_to_channels.get(payee_channel_identifier)
def get_payer_channel(
channelidentifiers_to_channels: ChannelMap, transfer_pair: MediationPairState
) -> Optional[NettingChannelState]:
""" Returns the payer channel of a given transfer pair or None if it's not found """
payer_channel_identifier = transfer_pair.payer_transfer.balance_proof.channel_identifier
return channelidentifiers_to_channels.get(payer_channel_identifier)
def get_pending_transfer_pairs(
transfers_pair: List[MediationPairState],
) -> List[MediationPairState]:
""" Return the transfer pairs that are not at a final state. """
pending_pairs = list(
pair
for pair in transfers_pair
if pair.payee_state not in STATE_TRANSFER_FINAL
or pair.payer_state not in STATE_TRANSFER_FINAL
)
return pending_pairs
def get_lock_amount_after_fees(
lock: HashTimeLockState, payee_channel: NettingChannelState
) -> PaymentWithFeeAmount:
"""
Return the lock.amount after fees are taken.
Fees are taken only for the outgoing channel, which is the one with
collateral locked from this node.
"""
return PaymentWithFeeAmount(lock.amount - payee_channel.mediation_fee)
def sanity_check(state: MediatorTransferState, channelidentifiers_to_channels: ChannelMap) -> None:
""" Check invariants that must hold. """
# if a transfer is paid we must know the secret
all_transfers_states = itertools.chain(
(pair.payee_state for pair in state.transfers_pair),
(pair.payer_state for pair in state.transfers_pair),
)
if any(state in STATE_TRANSFER_PAID for state in all_transfers_states):
assert state.secret is not None
# the "transitivity" for these values is checked below as part of
# almost_equal check
if state.transfers_pair:
first_pair = state.transfers_pair[0]
assert state.secrethash == first_pair.payer_transfer.lock.secrethash
for pair in state.transfers_pair:
payee_channel = get_payee_channel(
channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_pair=pair
)
# Channel could have been removed
if not payee_channel:
continue
assert is_send_transfer_almost_equal(
send_channel=payee_channel, send=pair.payee_transfer, received=pair.payer_transfer
)
assert pair.payer_state in pair.valid_payer_states
assert pair.payee_state in pair.valid_payee_states
for original, refund in zip(state.transfers_pair[:-1], state.transfers_pair[1:]):
assert original.payee_address == refund.payer_address
payer_channel = get_payer_channel(
channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_pair=refund
)
# Channel could have been removed
if not payer_channel:
continue
transfer_sent = original.payee_transfer
transfer_received = refund.payer_transfer
assert is_send_transfer_almost_equal(
send_channel=payer_channel, send=transfer_sent, received=transfer_received
)
if state.waiting_transfer and state.transfers_pair:
last_transfer_pair = state.transfers_pair[-1]
payee_channel = get_payee_channel(
channelidentifiers_to_channels=channelidentifiers_to_channels,
transfer_pair=last_transfer_pair,
)
# Channel could have been removed
if payee_channel:
transfer_sent = last_transfer_pair.payee_transfer
transfer_received = state.waiting_transfer.transfer
assert is_send_transfer_almost_equal(
send_channel=payee_channel, send=transfer_sent, received=transfer_received
)
def clear_if_finalized(
iteration: TransitionResult, channelidentifiers_to_channels: ChannelMap
) -> TransitionResult[MediatorTransferState]:
"""Clear the mediator task if all the locks have been finalized.
A lock is considered finalized if it has been removed from the merkle tree
offchain, either because the transfer was unlocked or expired, or because the
channel was settled on chain and therefore the channel is removed."""
state = cast(MediatorTransferState, iteration.new_state)
if state is None:
return iteration
# Only clear the task if all channels have the lock cleared.
secrethash = state.secrethash
for pair in state.transfers_pair:
payer_channel = get_payer_channel(channelidentifiers_to_channels, pair)
if payer_channel and channel.is_lock_pending(payer_channel.partner_state, secrethash):
return iteration
payee_channel = get_payee_channel(channelidentifiers_to_channels, pair)
if payee_channel and channel.is_lock_pending(payee_channel.our_state, secrethash):
return iteration
if state.waiting_transfer:
waiting_transfer = state.waiting_transfer.transfer
waiting_channel_identifier = waiting_transfer.balance_proof.channel_identifier
waiting_channel = channelidentifiers_to_channels.get(waiting_channel_identifier)
if waiting_channel and channel.is_lock_pending(waiting_channel.partner_state, secrethash):
return iteration
return TransitionResult(None, iteration.events)
def next_channel_from_routes(
available_routes: List["RouteState"],
channelidentifiers_to_channels: Dict,
transfer_amount: PaymentWithFeeAmount,
lock_timeout: BlockTimeout,
) -> Optional[NettingChannelState]:
""" Returns the first route that may be used to mediated the transfer.
The routing service can race with local changes, so the recommended routes
must be validated.
Args:
available_routes: Current available routes that may be used, it's
assumed that the available_routes list is ordered from best to
worst.
channelidentifiers_to_channels: Mapping from channel identifier
to NettingChannelState.
transfer_amount: The amount of tokens that will be transferred
through the given route.
lock_timeout: Number of blocks until the lock expires, used to filter
out channels that have a smaller settlement window.
Returns:
The next route.
"""
for route in available_routes:
channel_state = channelidentifiers_to_channels.get(route.channel_identifier)
if not channel_state:
continue
if is_channel_usable(channel_state, transfer_amount, lock_timeout):
return channel_state
return None
def forward_transfer_pair(
payer_transfer: LockedTransferSignedState,
available_routes: List["RouteState"],
channelidentifiers_to_channels: Dict,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> Tuple[Optional[MediationPairState], List[Event]]:
""" Given a payer transfer tries a new route to proceed with the mediation.
Args:
payer_transfer: The transfer received from the payer_channel.
available_routes: Current available routes that may be used, it's
assumed that the routes list is ordered from best to worst.
channelidentifiers_to_channels: All the channels available for this
transfer.
pseudo_random_generator: Number generator to generate a message id.
block_number: The current block number.
"""
transfer_pair = None
mediated_events: List[Event] = list()
lock_timeout = BlockTimeout(payer_transfer.lock.expiration - block_number)
payee_channel = next_channel_from_routes(
available_routes=available_routes,
channelidentifiers_to_channels=channelidentifiers_to_channels,
transfer_amount=payer_transfer.lock.amount,
lock_timeout=lock_timeout,
)
if payee_channel:
assert payee_channel.settle_timeout >= lock_timeout
assert payee_channel.token_address == payer_transfer.token
message_identifier = message_identifier_from_prng(pseudo_random_generator)
lock = payer_transfer.lock
lockedtransfer_event = channel.send_lockedtransfer(
channel_state=payee_channel,
initiator=payer_transfer.initiator,
target=payer_transfer.target,
amount=get_lock_amount_after_fees(lock, payee_channel),
message_identifier=message_identifier,
payment_identifier=payer_transfer.payment_identifier,
expiration=lock.expiration,
secrethash=lock.secrethash,
)
assert lockedtransfer_event
transfer_pair = MediationPairState(
payer_transfer, payee_channel.partner_state.address, lockedtransfer_event.transfer
)
mediated_events = [lockedtransfer_event]
return (transfer_pair, mediated_events)
def backward_transfer_pair(
backward_channel: NettingChannelState,
payer_transfer: LockedTransferSignedState,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> Tuple[Optional[MediationPairState], List[Event]]:
""" Sends a transfer backwards, allowing the previous hop to try a new
route.
When all the routes available for this node failed, send a transfer
backwards with the same amount and secrethash, allowing the previous hop to
do a retry.
Args:
backward_channel: The original channel which sent the mediated transfer
to this node.
payer_transfer: The *latest* payer transfer which is backing the
mediation.
block_number: The current block number.
Returns:
The mediator pair and the correspoding refund event.
"""
transfer_pair = None
events: List[Event] = list()
lock = payer_transfer.lock
lock_timeout = BlockTimeout(lock.expiration - block_number)
# Ensure the refund transfer's lock has a safe expiration, otherwise don't
# do anything and wait for the received lock to expire.
if is_channel_usable(backward_channel, lock.amount, lock_timeout):
message_identifier = message_identifier_from_prng(pseudo_random_generator)
refund_transfer = channel.send_refundtransfer(
channel_state=backward_channel,
initiator=payer_transfer.initiator,
target=payer_transfer.target,
amount=get_lock_amount_after_fees(lock, backward_channel),
message_identifier=message_identifier,
payment_identifier=payer_transfer.payment_identifier,
expiration=lock.expiration,
secrethash=lock.secrethash,
)
transfer_pair = MediationPairState(
payer_transfer, backward_channel.partner_state.address, refund_transfer.transfer
)
events.append(refund_transfer)
return (transfer_pair, events)
def set_offchain_secret(
state: MediatorTransferState,
channelidentifiers_to_channels: ChannelMap,
secret: Secret,
secrethash: SecretHash,
) -> List[Event]:
""" Set the secret to all mediated transfers. """
state.secret = secret
for pair in state.transfers_pair:
payer_channel = channelidentifiers_to_channels.get(
pair.payer_transfer.balance_proof.channel_identifier
)
if payer_channel:
channel.register_offchain_secret(payer_channel, secret, secrethash)
payee_channel = channelidentifiers_to_channels.get(
pair.payee_transfer.balance_proof.channel_identifier
)
if payee_channel:
channel.register_offchain_secret(payee_channel, secret, secrethash)
# The secret should never be revealed if `waiting_transfer` is not None.
# For this to happen this node must have received a transfer, which it did
# *not* mediate, and neverthless the secret was revealed.
#
# This can only be possible if the initiator reveals the secret without the
# target's secret request, or if the node which sent the `waiting_transfer`
# has sent another transfer which reached the target (meaning someone along
# the path will lose tokens).
if state.waiting_transfer:
payer_channel = channelidentifiers_to_channels.get(
state.waiting_transfer.transfer.balance_proof.channel_identifier
)
if payer_channel:
channel.register_offchain_secret(payer_channel, secret, secrethash)
unexpected_reveal = EventUnexpectedSecretReveal(
secrethash=secrethash, reason="The mediator has a waiting transfer."
)
return [unexpected_reveal]
return list()
def set_onchain_secret(
state: MediatorTransferState,
channelidentifiers_to_channels: ChannelMap,
secret: Secret,
secrethash: SecretHash,
block_number: BlockNumber,
) -> List[Event]:
""" Set the secret to all mediated transfers.
The secret should have been learned from the secret registry.
"""
state.secret = secret
for pair in state.transfers_pair:
payer_channel = channelidentifiers_to_channels.get(
pair.payer_transfer.balance_proof.channel_identifier
)
if payer_channel:
channel.register_onchain_secret(payer_channel, secret, secrethash, block_number)
payee_channel = channelidentifiers_to_channels.get(
pair.payee_transfer.balance_proof.channel_identifier
)
if payee_channel:
channel.register_onchain_secret(
channel_state=payee_channel,
secret=secret,
secrethash=secrethash,
secret_reveal_block_number=block_number,
)
# Like the off-chain secret reveal, the secret should never be revealed
# on-chain if there is a waiting transfer.
if state.waiting_transfer:
payer_channel = channelidentifiers_to_channels.get(
state.waiting_transfer.transfer.balance_proof.channel_identifier
)
if payer_channel:
channel.register_onchain_secret(
channel_state=payer_channel,
secret=secret,
secrethash=secrethash,
secret_reveal_block_number=block_number,
)
unexpected_reveal = EventUnexpectedSecretReveal(
secrethash=secrethash, reason="The mediator has a waiting transfer."
)
return [unexpected_reveal]
return list()
def set_offchain_reveal_state(
transfers_pair: List[MediationPairState], payee_address: Address
) -> None:
""" Set the state of a transfer *sent* to a payee. """
for pair in transfers_pair:
if pair.payee_address == payee_address:
pair.payee_state = "payee_secret_revealed"
def events_for_expired_pairs(
channelidentifiers_to_channels: ChannelMap,
transfers_pair: List[MediationPairState],
waiting_transfer: Optional[WaitingTransferState],
block_number: BlockNumber,
) -> List[Event]:
""" Informational events for expired locks. """
pending_transfers_pairs = get_pending_transfer_pairs(transfers_pair)
events: List[Event] = list()
for pair in pending_transfers_pairs:
payer_balance_proof = pair.payer_transfer.balance_proof
payer_channel = channelidentifiers_to_channels.get(payer_balance_proof.channel_identifier)
if not payer_channel:
continue
has_payer_transfer_expired = channel.is_transfer_expired(
transfer=pair.payer_transfer, affected_channel=payer_channel, block_number=block_number
)
if has_payer_transfer_expired:
# For safety, the correct behavior is:
#
# - If the payee has been paid, then the payer must pay too.
#
# And the corollary:
#
# - If the payer transfer has expired, then the payee transfer must
# have expired too.
#
# The problem is that this corollary cannot be asserted. If a user
# is running Raiden without a monitoring service, then it may go
# offline after having paid a transfer to a payee, but without
# getting a balance proof of the payer, and once it comes back
# online the transfer may have expired.
#
# assert pair.payee_state == 'payee_expired'
pair.payer_state = "payer_expired"
unlock_claim_failed = EventUnlockClaimFailed(
pair.payer_transfer.payment_identifier,
pair.payer_transfer.lock.secrethash,
"lock expired",
)
events.append(unlock_claim_failed)
if waiting_transfer and waiting_transfer.state != "expired":
waiting_transfer.state = "expired"
unlock_claim_failed = EventUnlockClaimFailed(
waiting_transfer.transfer.payment_identifier,
waiting_transfer.transfer.lock.secrethash,
"lock expired",
)
events.append(unlock_claim_failed)
return events
def events_for_secretreveal(
transfers_pair: List[MediationPairState],
secret: Secret,
pseudo_random_generator: random.Random,
) -> List[Event]:
""" Reveal the secret off-chain.
The secret is revealed off-chain even if there is a pending transaction to
reveal it on-chain, this allows the unlock to happen off-chain, which is
faster.
This node is named N, suppose there is a mediated transfer with two refund
transfers, one from B and one from C:
A-N-B...B-N-C..C-N-D
Under normal operation N will first learn the secret from D, then reveal to
C, wait for C to inform the secret is known before revealing it to B, and
again wait for B before revealing the secret to A.
If B somehow sent a reveal secret before C and D, then the secret will be
revealed to A, but not C and D, meaning the secret won't be propagated
forward. Even if D sent a reveal secret at about the same time, the secret
will only be revealed to B upon confirmation from C.
If the proof doesn't arrive in time and the lock's expiration is at risk, N
won't lose tokens since it knows the secret can go on-chain at any time.
"""
events: List[Event] = list()
for pair in reversed(transfers_pair):
payee_knows_secret = pair.payee_state in STATE_SECRET_KNOWN
payer_knows_secret = pair.payer_state in STATE_SECRET_KNOWN
is_transfer_pending = pair.payer_state == "payer_pending"
should_send_secret = payee_knows_secret and not payer_knows_secret and is_transfer_pending
if should_send_secret:
message_identifier = message_identifier_from_prng(pseudo_random_generator)
pair.payer_state = "payer_secret_revealed"
payer_transfer = pair.payer_transfer
revealsecret = SendSecretReveal(
recipient=payer_transfer.balance_proof.sender,
channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,
message_identifier=message_identifier,
secret=secret,
)
events.append(revealsecret)
return events
def events_for_balanceproof(
channelidentifiers_to_channels: ChannelMap,
transfers_pair: List[MediationPairState],
pseudo_random_generator: random.Random,
block_number: BlockNumber,
secret: Secret,
secrethash: SecretHash,
) -> List[Event]:
""" While it's safe do the off-chain unlock. """
events: List[Event] = list()
for pair in reversed(transfers_pair):
payee_knows_secret = pair.payee_state in STATE_SECRET_KNOWN
payee_payed = pair.payee_state in STATE_TRANSFER_PAID
payee_channel = get_payee_channel(channelidentifiers_to_channels, pair)
payee_channel_open = (
payee_channel and channel.get_status(payee_channel) == CHANNEL_STATE_OPENED
)
payer_channel = get_payer_channel(channelidentifiers_to_channels, pair)
# The mediator must not send to the payee a balance proof if the lock
# is in the danger zone, because the payer may not do the same and the
# on-chain unlock may fail. If the lock is nearing it's expiration
# block, then on-chain unlock should be done, and if successful it can
# be unlocked off-chain.
is_safe_to_send_balanceproof = False
if payer_channel:
is_safe_to_send_balanceproof, _ = is_safe_to_wait(
pair.payer_transfer.lock.expiration, payer_channel.reveal_timeout, block_number
)
should_send_balanceproof_to_payee = (
payee_channel_open
and payee_knows_secret
and not payee_payed
and is_safe_to_send_balanceproof
)
if should_send_balanceproof_to_payee:
# At this point we are sure that payee_channel exists due to the
# payee_channel_open check above. So let mypy know about this
assert payee_channel
payee_channel = cast(NettingChannelState, payee_channel)
pair.payee_state = "payee_balance_proof"
message_identifier = message_identifier_from_prng(pseudo_random_generator)
unlock_lock = channel.send_unlock(
channel_state=payee_channel,
message_identifier=message_identifier,
payment_identifier=pair.payee_transfer.payment_identifier,
secret=secret,
secrethash=secrethash,
)
unlock_success = EventUnlockSuccess(
pair.payer_transfer.payment_identifier, pair.payer_transfer.lock.secrethash
)
events.append(unlock_lock)
events.append(unlock_success)
return events
def events_for_onchain_secretreveal_if_dangerzone(
channelmap: ChannelMap,
secrethash: SecretHash,
transfers_pair: List[MediationPairState],
block_number: BlockNumber,
block_hash: BlockHash,
) -> List[Event]:
""" Reveal the secret on-chain if the lock enters the unsafe region and the
secret is not yet on-chain.
"""
events: List[Event] = list()
all_payer_channels = []
for pair in transfers_pair:
channel_state = get_payer_channel(channelmap, pair)
if channel_state:
all_payer_channels.append(channel_state)
transaction_sent = has_secret_registration_started(
all_payer_channels, transfers_pair, secrethash
)
# Only consider the transfers which have a pair. This means if we have a
# waiting transfer and for some reason the node knows the secret, it will
# not try to register it. Otherwise it would be possible for an attacker to
# reveal the secret late, just to force the node to send an unecessary
# transaction.
for pair in get_pending_transfer_pairs(transfers_pair):
payer_channel = get_payer_channel(channelmap, pair)
if not payer_channel:
continue
lock = pair.payer_transfer.lock
safe_to_wait, _ = is_safe_to_wait(
lock.expiration, payer_channel.reveal_timeout, block_number
)
secret_known = channel.is_secret_known(
payer_channel.partner_state, pair.payer_transfer.lock.secrethash
)
if not safe_to_wait and secret_known:
pair.payer_state = "payer_waiting_secret_reveal"
if not transaction_sent:
secret = channel.get_secret(payer_channel.partner_state, lock.secrethash)
assert secret, "the secret should be known at this point"
reveal_events = secret_registry.events_for_onchain_secretreveal(
channel_state=payer_channel,
secret=secret,
expiration=lock.expiration,
block_hash=block_hash,
)
events.extend(reveal_events)
transaction_sent = True
return events
def events_for_onchain_secretreveal_if_closed(
channelmap: ChannelMap,
transfers_pair: List[MediationPairState],
secret: Secret,
secrethash: SecretHash,
block_hash: BlockHash,
) -> List[Event]:
""" Register the secret on-chain if the payer channel is already closed and
the mediator learned the secret off-chain.
Balance proofs are not exchanged for closed channels, so there is no reason
to wait for the unsafe region to register secret.
Note:
If the secret is learned before the channel is closed, then the channel
will register the secrets in bulk, not the transfer.
"""
events: List[Event] = list()
all_payer_channels = []
for pair in transfers_pair:
channel_state = get_payer_channel(channelmap, pair)
if channel_state:
all_payer_channels.append(channel_state)
transaction_sent = has_secret_registration_started(
all_payer_channels, transfers_pair, secrethash
)
# Just like the case for entering the danger zone, this will only consider
# the transfers which have a pair.
for pending_pair in get_pending_transfer_pairs(transfers_pair):
payer_channel = get_payer_channel(channelmap, pending_pair)
# Don't register the secret on-chain if the channel is open or settled
if payer_channel and channel.get_status(payer_channel) == CHANNEL_STATE_CLOSED:
pending_pair.payer_state = "payer_waiting_secret_reveal"
if not transaction_sent:
partner_state = payer_channel.partner_state
lock = channel.get_lock(partner_state, secrethash)
# The mediator task lives as long as there are any pending
# locks, it may be the case that some of the transfer_pairs got
# resolved off-chain, but others didn't. For this reason we
# must check if the lock is still part of the channel
if lock:
reveal_events = secret_registry.events_for_onchain_secretreveal(
channel_state=payer_channel,
secret=secret,
expiration=lock.expiration,
block_hash=block_hash,
)
events.extend(reveal_events)
transaction_sent = True
return events
def events_to_remove_expired_locks(
mediator_state: MediatorTransferState,
channelidentifiers_to_channels: ChannelMap,
block_number: BlockNumber,
pseudo_random_generator: random.Random,
) -> List[Event]:
""" Clear the channels which have expired locks.
This only considers the *sent* transfers, received transfers can only be
updated by the partner.
"""
events: List[Event] = list()
for transfer_pair in mediator_state.transfers_pair:
balance_proof = transfer_pair.payee_transfer.balance_proof
channel_identifier = balance_proof.channel_identifier
channel_state = channelidentifiers_to_channels.get(channel_identifier)
if not channel_state:
continue
secrethash = mediator_state.secrethash
lock: Union[None, LockType] = None
if secrethash in channel_state.our_state.secrethashes_to_lockedlocks:
assert secrethash not in channel_state.our_state.secrethashes_to_unlockedlocks
lock = channel_state.our_state.secrethashes_to_lockedlocks.get(secrethash)
elif secrethash in channel_state.our_state.secrethashes_to_unlockedlocks:
lock = channel_state.our_state.secrethashes_to_unlockedlocks.get(secrethash)
if lock:
lock_expiration_threshold = channel.get_sender_expiration_threshold(lock)
has_lock_expired, _ = channel.is_lock_expired(
end_state=channel_state.our_state,
lock=lock,
block_number=block_number,
lock_expiration_threshold=lock_expiration_threshold,
)
is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED
if has_lock_expired and is_channel_open:
transfer_pair.payee_state = "payee_expired"
expired_lock_events = channel.events_for_expired_lock(
channel_state=channel_state,
locked_lock=lock,
pseudo_random_generator=pseudo_random_generator,
)
events.extend(expired_lock_events)
unlock_failed = EventUnlockFailed(
transfer_pair.payee_transfer.payment_identifier,
transfer_pair.payee_transfer.lock.secrethash,
"lock expired",
)
events.append(unlock_failed)
return events
def secret_learned(
state: MediatorTransferState,
channelidentifiers_to_channels: ChannelMap,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
block_hash: BlockHash,
secret: Secret,
secrethash: SecretHash,
payee_address: Address,
) -> TransitionResult[MediatorTransferState]:
""" Unlock the payee lock, reveal the lock to the payer, and if necessary
register the secret on-chain.
"""
secret_reveal_events = set_offchain_secret(
state, channelidentifiers_to_channels, secret, secrethash
)
set_offchain_reveal_state(state.transfers_pair, payee_address)
onchain_secret_reveal = events_for_onchain_secretreveal_if_closed(
channelmap=channelidentifiers_to_channels,
transfers_pair=state.transfers_pair,
secret=secret,
secrethash=secrethash,
block_hash=block_hash,
)
offchain_secret_reveal = events_for_secretreveal(
state.transfers_pair, secret, pseudo_random_generator
)
balance_proof = events_for_balanceproof(
channelidentifiers_to_channels,
state.transfers_pair,
pseudo_random_generator,
block_number,
secret,
secrethash,
)
events = secret_reveal_events + offchain_secret_reveal + balance_proof + onchain_secret_reveal
iteration = TransitionResult(state, events)
return iteration
def mediate_transfer(
state: MediatorTransferState,
possible_routes: List["RouteState"],
payer_channel: NettingChannelState,
channelidentifiers_to_channels: ChannelMap,
nodeaddresses_to_networkstates: NodeNetworkStateMap,
pseudo_random_generator: random.Random,
payer_transfer: LockedTransferSignedState,
block_number: BlockNumber,
) -> TransitionResult[MediatorTransferState]:
""" Try a new route or fail back to a refund.
The mediator can safely try a new route knowing that the tokens from
payer_transfer will cover the expenses of the mediation. If there is no
route available that may be used at the moment of the call the mediator may
send a refund back to the payer, allowing the payer to try a different
route.
"""
reachable_routes = filter_reachable_routes(possible_routes, nodeaddresses_to_networkstates)
available_routes = filter_used_routes(state.transfers_pair, reachable_routes)
assert payer_channel.partner_state.address == payer_transfer.balance_proof.sender
transfer_pair, mediated_events = forward_transfer_pair(
payer_transfer,
available_routes,
channelidentifiers_to_channels,
pseudo_random_generator,
block_number,
)
if transfer_pair is None:
assert not mediated_events
if state.transfers_pair:
original_pair = state.transfers_pair[0]
original_channel = get_payer_channel(channelidentifiers_to_channels, original_pair)
else:
original_channel = payer_channel
if original_channel:
transfer_pair, mediated_events = backward_transfer_pair(
original_channel, payer_transfer, pseudo_random_generator, block_number
)
else:
transfer_pair = None
mediated_events = list()
if transfer_pair is None:
assert not mediated_events
mediated_events = list()
state.waiting_transfer = WaitingTransferState(payer_transfer)
else:
# the list must be ordered from high to low expiration, expiration
# handling depends on it
state.transfers_pair.append(transfer_pair)
return TransitionResult(state, mediated_events)
def handle_init(
state_change: ActionInitMediator,
channelidentifiers_to_channels: ChannelMap,
nodeaddresses_to_networkstates: NodeNetworkStateMap,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> TransitionResult[MediatorTransferState]:
routes = state_change.routes
from_route = state_change.from_route
from_transfer = state_change.from_transfer
payer_channel = channelidentifiers_to_channels.get(from_route.channel_identifier)
# There is no corresponding channel for the message, ignore it
if not payer_channel:
return TransitionResult(None, [])
mediator_state = MediatorTransferState(secrethash=from_transfer.lock.secrethash, routes=routes)
is_valid, events, _ = channel.handle_receive_lockedtransfer(payer_channel, from_transfer)
if not is_valid:
# If the balance proof is not valid, do *not* create a task. Otherwise it's
# possible for an attacker to send multiple invalid transfers, and increase
# the memory usage of this Node.
return TransitionResult(None, events)
iteration = mediate_transfer(
mediator_state,
routes,
payer_channel,
channelidentifiers_to_channels,
nodeaddresses_to_networkstates,
pseudo_random_generator,
from_transfer,
block_number,
)
events.extend(iteration.events)
return TransitionResult(iteration.new_state, events)
def handle_block(
mediator_state: MediatorTransferState,
state_change: Block,
channelidentifiers_to_channels: ChannelMap,
pseudo_random_generator: random.Random,
) -> TransitionResult[MediatorTransferState]:
""" After Raiden learns about a new block this function must be called to
handle expiration of the hash time locks.
Args:
state: The current state.
Return:
TransitionResult: The resulting iteration
"""
expired_locks_events = events_to_remove_expired_locks(
mediator_state,
channelidentifiers_to_channels,
state_change.block_number,
pseudo_random_generator,
)
secret_reveal_events = events_for_onchain_secretreveal_if_dangerzone(
channelmap=channelidentifiers_to_channels,
secrethash=mediator_state.secrethash,
transfers_pair=mediator_state.transfers_pair,
block_number=state_change.block_number,
block_hash=state_change.block_hash,
)
unlock_fail_events = events_for_expired_pairs(
channelidentifiers_to_channels=channelidentifiers_to_channels,
transfers_pair=mediator_state.transfers_pair,
waiting_transfer=mediator_state.waiting_transfer,
block_number=state_change.block_number,
)
iteration = TransitionResult(
mediator_state, unlock_fail_events + secret_reveal_events + expired_locks_events
)
return iteration
def handle_refundtransfer(
mediator_state: MediatorTransferState,
mediator_state_change: ReceiveTransferRefund,
channelidentifiers_to_channels: ChannelMap,
nodeaddresses_to_networkstates: NodeNetworkStateMap,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> TransitionResult[MediatorTransferState]:
""" Validate and handle a ReceiveTransferRefund mediator_state change.
A node might participate in mediated transfer more than once because of
refund transfers, eg. A-B-C-B-D-T, B tried to mediate the transfer through
C, which didn't have an available route to proceed and refunds B, at this
point B is part of the path again and will try a new partner to proceed
with the mediation through D, D finally reaches the target T.
In the above scenario B has two pairs of payer and payee transfers:
payer:A payee:C from the first SendLockedTransfer
payer:C payee:D from the following SendRefundTransfer
Args:
mediator_state (MediatorTransferState): Current mediator_state.
mediator_state_change (ReceiveTransferRefund): The mediator_state change.
Returns:
TransitionResult: The resulting iteration.
"""
events: List[Event] = list()
if mediator_state.secret is None:
# The last sent transfer is the only one that may be refunded, all the
# previous ones are refunded already.
transfer_pair = mediator_state.transfers_pair[-1]
payee_transfer = transfer_pair.payee_transfer
payer_transfer = mediator_state_change.transfer
channel_identifier = payer_transfer.balance_proof.channel_identifier
payer_channel = channelidentifiers_to_channels.get(channel_identifier)
if not payer_channel:
return TransitionResult(mediator_state, list())
is_valid, channel_events, _ = channel.handle_refundtransfer(
received_transfer=payee_transfer,
channel_state=payer_channel,
refund=mediator_state_change,
)
if not is_valid:
return TransitionResult(mediator_state, channel_events)
iteration = mediate_transfer(
mediator_state,
mediator_state_change.routes,
payer_channel,
channelidentifiers_to_channels,
nodeaddresses_to_networkstates,
pseudo_random_generator,
payer_transfer,
block_number,
)
events.extend(channel_events)
events.extend(iteration.events)
iteration = TransitionResult(mediator_state, events)
return iteration
def handle_offchain_secretreveal(
mediator_state: MediatorTransferState,
mediator_state_change: ReceiveSecretReveal,
channelidentifiers_to_channels: ChannelMap,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
block_hash: BlockHash,
) -> TransitionResult[MediatorTransferState]:
""" Handles the secret reveal and sends SendBalanceProof/RevealSecret if necessary. """
is_valid_reveal = is_valid_secret_reveal(
state_change=mediator_state_change,
transfer_secrethash=mediator_state.secrethash,
secret=mediator_state_change.secret,
)
is_secret_unknown = mediator_state.secret is None
# a SecretReveal should be rejected if the payer transfer
# has expired. To check for this, we use the last
# transfer pair.
transfer_pair = mediator_state.transfers_pair[-1]
payer_transfer = transfer_pair.payer_transfer
channel_identifier = payer_transfer.balance_proof.channel_identifier
payer_channel = channelidentifiers_to_channels.get(channel_identifier)
if not payer_channel:
return TransitionResult(mediator_state, list())
has_payer_transfer_expired = channel.is_transfer_expired(
transfer=transfer_pair.payer_transfer,
affected_channel=payer_channel,
block_number=block_number,
)
if is_secret_unknown and is_valid_reveal and not has_payer_transfer_expired:
iteration = secret_learned(
state=mediator_state,
channelidentifiers_to_channels=channelidentifiers_to_channels,
pseudo_random_generator=pseudo_random_generator,
block_number=block_number,
block_hash=block_hash,
secret=mediator_state_change.secret,
secrethash=mediator_state_change.secrethash,
payee_address=mediator_state_change.sender,
)
else:
iteration = TransitionResult(mediator_state, list())
return iteration
def handle_onchain_secretreveal(
mediator_state: MediatorTransferState,
onchain_secret_reveal: ContractReceiveSecretReveal,
channelidentifiers_to_channels: ChannelMap,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> TransitionResult[MediatorTransferState]:
""" The secret was revealed on-chain, set the state of all transfers to
secret known.
"""
secrethash = onchain_secret_reveal.secrethash
is_valid_reveal = is_valid_secret_reveal(
state_change=onchain_secret_reveal,
transfer_secrethash=mediator_state.secrethash,
secret=onchain_secret_reveal.secret,
)
if is_valid_reveal:
secret = onchain_secret_reveal.secret
# Compare against the block number at which the event was emitted.
block_number = onchain_secret_reveal.block_number
secret_reveal = set_onchain_secret(
state=mediator_state,
channelidentifiers_to_channels=channelidentifiers_to_channels,
secret=secret,
secrethash=secrethash,
block_number=block_number,
)
balance_proof = events_for_balanceproof(
channelidentifiers_to_channels=channelidentifiers_to_channels,
transfers_pair=mediator_state.transfers_pair,
pseudo_random_generator=pseudo_random_generator,
block_number=block_number,
secret=secret,
secrethash=secrethash,
)
iteration = TransitionResult(mediator_state, secret_reveal + balance_proof)
else:
iteration = TransitionResult(mediator_state, list())
return iteration
def handle_unlock(
mediator_state: MediatorTransferState,
state_change: ReceiveUnlock,
channelidentifiers_to_channels: ChannelMap,
) -> TransitionResult[MediatorTransferState]:
""" Handle a ReceiveUnlock state change. """
events = list()
balance_proof_sender = state_change.balance_proof.sender
channel_identifier = state_change.balance_proof.channel_identifier
for pair in mediator_state.transfers_pair:
if pair.payer_transfer.balance_proof.sender == balance_proof_sender:
channel_state = channelidentifiers_to_channels.get(channel_identifier)
if channel_state:
is_valid, channel_events, _ = channel.handle_unlock(channel_state, state_change)
events.extend(channel_events)
if is_valid:
unlock = EventUnlockClaimSuccess(
pair.payee_transfer.payment_identifier, pair.payee_transfer.lock.secrethash
)
events.append(unlock)
send_processed = SendProcessed(
recipient=balance_proof_sender,
channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,
message_identifier=state_change.message_identifier,
)
events.append(send_processed)
pair.payer_state = "payer_balance_proof"
iteration = TransitionResult(mediator_state, events)
return iteration
def handle_lock_expired(
mediator_state: MediatorTransferState,
state_change: ReceiveLockExpired,
channelidentifiers_to_channels: ChannelMap,
block_number: BlockNumber,
) -> TransitionResult[MediatorTransferState]:
events = list()
for transfer_pair in mediator_state.transfers_pair:
balance_proof = transfer_pair.payer_transfer.balance_proof
channel_state = channelidentifiers_to_channels.get(balance_proof.channel_identifier)
if not channel_state:
return TransitionResult(mediator_state, list())
result = channel.handle_receive_lock_expired(
channel_state=channel_state, state_change=state_change, block_number=block_number
)
assert result.new_state and isinstance(result.new_state, NettingChannelState), (
"Handling a receive_lock_expire should never delete the channel task",
)
events.extend(result.events)
if not channel.get_lock(result.new_state.partner_state, mediator_state.secrethash):
transfer_pair.payer_state = "payer_expired"
if mediator_state.waiting_transfer:
waiting_channel = channelidentifiers_to_channels.get(
mediator_state.waiting_transfer.transfer.balance_proof.channel_identifier
)
if waiting_channel:
result = channel.handle_receive_lock_expired(
channel_state=waiting_channel, state_change=state_change, block_number=block_number
)
events.extend(result.events)
return TransitionResult(mediator_state, events)
def handle_node_change_network_state(
mediator_state: MediatorTransferState,
state_change: ActionChangeNodeNetworkState,
channelidentifiers_to_channels: ChannelMap,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> TransitionResult:
""" If a certain node comes online:
1. Check if a channel exists with that node
2. Check that this channel is a route, check if the route is valid.
3. Check that the transfer was stuck because there was no route available.
4. Send the transfer again to this now-available route.
"""
if state_change.network_state != NODE_NETWORK_REACHABLE:
return TransitionResult(mediator_state, list())
try:
route = next(
route
for route in mediator_state.routes
if route.node_address == state_change.node_address
)
except StopIteration:
return TransitionResult(mediator_state, list())
if mediator_state.waiting_transfer is None:
return TransitionResult(mediator_state, list())
transfer = mediator_state.waiting_transfer.transfer
payer_channel_identifier = transfer.balance_proof.channel_identifier
payer_channel = channelidentifiers_to_channels.get(payer_channel_identifier)
payee_channel = channelidentifiers_to_channels.get(route.channel_identifier)
if not payee_channel or not payer_channel:
return TransitionResult(mediator_state, list())
payee_channel_open = channel.get_status(payee_channel) == CHANNEL_STATE_OPENED
if not payee_channel_open:
return TransitionResult(mediator_state, list())
return mediate_transfer(
state=mediator_state,
possible_routes=[route],
payer_channel=payer_channel,
channelidentifiers_to_channels=channelidentifiers_to_channels,
nodeaddresses_to_networkstates={state_change.node_address: state_change.network_state},
pseudo_random_generator=pseudo_random_generator,
payer_transfer=mediator_state.waiting_transfer.transfer,
block_number=block_number,
)
def state_transition(
mediator_state: Optional[MediatorTransferState],
state_change: StateChange,
channelidentifiers_to_channels: ChannelMap,
nodeaddresses_to_networkstates: NodeNetworkStateMap,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
block_hash: BlockHash,
) -> TransitionResult[MediatorTransferState]:
""" State machine for a node mediating a transfer. """
# pylint: disable=too-many-branches
# Notes:
# - A user cannot cancel a mediated transfer after it was initiated, she
# may only reject to mediate before hand. This is because the mediator
# doesn't control the secret reveal and needs to wait for the lock
# expiration before safely discarding the transfer.
iteration = TransitionResult(mediator_state, list())
if type(state_change) == ActionInitMediator:
assert isinstance(state_change, ActionInitMediator), MYPY_ANNOTATION
if mediator_state is None:
iteration = handle_init(
state_change=state_change,
channelidentifiers_to_channels=channelidentifiers_to_channels,
nodeaddresses_to_networkstates=nodeaddresses_to_networkstates,
pseudo_random_generator=pseudo_random_generator,
block_number=block_number,
)
elif type(state_change) == Block:
assert isinstance(state_change, Block), MYPY_ANNOTATION
assert mediator_state, "Block should be accompanied by a valid mediator state"
iteration = handle_block(
mediator_state=mediator_state,
state_change=state_change,
channelidentifiers_to_channels=channelidentifiers_to_channels,
pseudo_random_generator=pseudo_random_generator,
)
elif type(state_change) == ReceiveTransferRefund:
assert isinstance(state_change, ReceiveTransferRefund), MYPY_ANNOTATION
msg = "ReceiveTransferRefund should be accompanied by a valid mediator state"
assert mediator_state, msg
iteration = handle_refundtransfer(
mediator_state=mediator_state,
mediator_state_change=state_change,
channelidentifiers_to_channels=channelidentifiers_to_channels,
nodeaddresses_to_networkstates=nodeaddresses_to_networkstates,
pseudo_random_generator=pseudo_random_generator,
block_number=block_number,
)
elif type(state_change) == ReceiveSecretReveal:
assert isinstance(state_change, ReceiveSecretReveal), MYPY_ANNOTATION
msg = "ReceiveSecretReveal should be accompanied by a valid mediator state"
assert mediator_state, msg
iteration = handle_offchain_secretreveal(
mediator_state=mediator_state,
mediator_state_change=state_change,
channelidentifiers_to_channels=channelidentifiers_to_channels,
pseudo_random_generator=pseudo_random_generator,
block_number=block_number,
block_hash=block_hash,
)
elif type(state_change) == ContractReceiveSecretReveal:
assert isinstance(state_change, ContractReceiveSecretReveal), MYPY_ANNOTATION
msg = "ContractReceiveSecretReveal should be accompanied by a valid mediator state"
assert mediator_state, msg
iteration = handle_onchain_secretreveal(
mediator_state=mediator_state,
onchain_secret_reveal=state_change,
channelidentifiers_to_channels=channelidentifiers_to_channels,
pseudo_random_generator=pseudo_random_generator,
block_number=block_number,
)
elif type(state_change) == ReceiveUnlock:
assert isinstance(state_change, ReceiveUnlock), MYPY_ANNOTATION
assert mediator_state, "ReceiveUnlock should be accompanied by a valid mediator state"
iteration = handle_unlock(
mediator_state=mediator_state,
state_change=state_change,
channelidentifiers_to_channels=channelidentifiers_to_channels,
)
elif type(state_change) == ReceiveLockExpired:
assert isinstance(state_change, ReceiveLockExpired), MYPY_ANNOTATION
assert mediator_state, "ReceiveLockExpired should be accompanied by a valid mediator state"
iteration = handle_lock_expired(
mediator_state=mediator_state,
state_change=state_change,
channelidentifiers_to_channels=channelidentifiers_to_channels,
block_number=block_number,
)
elif type(state_change) == ActionChangeNodeNetworkState:
assert isinstance(state_change, ActionChangeNodeNetworkState), MYPY_ANNOTATION
msg = "ActionChangeNodeNetworkState should be accompanied by a valid mediator state"
assert mediator_state, msg
iteration = handle_node_change_network_state(
mediator_state=mediator_state,
state_change=state_change,
channelidentifiers_to_channels=channelidentifiers_to_channels,
pseudo_random_generator=pseudo_random_generator,
block_number=block_number,
)
# this is the place for paranoia
if iteration.new_state is not None:
assert isinstance(iteration.new_state, MediatorTransferState)
sanity_check(iteration.new_state, channelidentifiers_to_channels)
return clear_if_finalized(iteration, channelidentifiers_to_channels)
|
|
from plotters import plot_bax_oligo, plot_dist_rate, plot_compiled_dist_rate, plot_agg_initiation, plot_oligo_rate_histo, plot_cytc, plot_scatter, plot_oligo_double_axis
import analysis_tools as tools
from scipy.spatial import distance
import matplotlib.pyplot as plt
import numpy as np
import data
import math
from itertools import cycle
from scipy import stats
from scipy.stats import norm
from collections import defaultdict
import os
import pandas as pd
def bax_oligo_fit():
prefix="./data/"
suffix=".csv"
data_files = data.bax_oligo_data #Import data files from data.py
bax_files = data_files.keys()
ids = data_files.values()
all_data = []
bax_data = {}
for i, f in enumerate(bax_files):
fname = prefix + f + suffix
print f
rows = tools.get_rows(fname, 4)
cell_data = tools.get_val_time(rows, ids[i])
tools.filter_1(cell_data)
tools.normalize_val_time(cell_data)
bax_data[f] = cell_data
all_data.append(cell_data)
print cell_data
nfiles = len(bax_files)
color = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
colorcycle = cycle(color)
for i in range(nfiles):
bax_file = bax_files[i]
print bax_files[i]
ids_list = ids[i]
bdata = bax_data[bax_file]
print bdata
plot_bax_oligo(bdata, fit=True, verbose=True)
def cytc_release_fit():
data_ch2 = data.bax_cytc_datach2
cytc_files = data_ch2.keys()
ids = data_ch2.values()
prefix = "../cyt_c/channel2/"
suffix = ".csv"
cytc_data = {}
for i, f in enumerate(cytc_files):
fname = prefix + f + suffix
rows = tools.get_rows(fname, 4)
cell_data = tools.get_val_time(rows, ids[i])
tools.filter_1(cell_data)
tools.normalize_val_time(cell_data)
cytc_data[f] = cell_data
nfiles = len(cytc_files)
color = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
colorcycle = cycle(color)
for i in range(nfiles):
cytc_file = cytc_files[i]
ids_list = ids[i]
cdata = cytc_data[cytc_file]
print cytc_file
plot_cytc(cdata, fit=True, verbose=True)
def oligo_rates():
prefix="../bax_agg/"
suffix=".csv"
data_files = data.bax_oligo_data #Import data files from data.py
names = []
ids_list = []
all_slopes = []
log_slopes = []
indiv_rates = {}
cell_stds = []
scatterx = []
scattery = []
popt_k = []
popt_a =[]
popt_b = []
param_list =[]
for name, ids in data_files.items(): #Create lists of file names and corresponding ids
names.append(name)
ids_list.append(ids)
for i in range(len(names)):
fname = prefix+names[i]+suffix
print names[i]
rows = tools.get_rows(fname, 4)
val_time = tools.get_val_time(rows, ids_list[i])
tools.filter_1(val_time)
tools.normalize_val_time(val_time)
scatterxval = i + 1
cell_rate = []
for id, timeseries in val_time.items():
x, y = tools.get_xy(timeseries)
try:
xfit, yfit, popt, pcov = tools.bax_fit(timeseries, id)
if popt != None and not isinstance(pcov,float):
print yfit[-10], yfit[-1], popt
i = tools.sigmoid_inflection(xfit, popt[0], popt[1], popt[2], popt[3], popt[4])
# print i, popt[1], len(xfit)
if popt[1] > 0 and i != len(xfit) - 1:
slope = tools.sigmoid_d1(xfit[i], popt[0], popt[1], popt[2], popt[3], popt[4])
print slope, "= slope", id
if slope > 0 and tools.filter_2(xfit, yfit) == None:
print fname, id
# plt.ylim(0,ymax=5)
# plt.plot(x, y, xfit, yfit)
# plt.show()
cell_rate.append(slope)
all_slopes.append(slope)
scatterx.append(scatterxval)
scattery.append(slope)
popt_k.append(popt[1])
popt_a.append(popt[3])
popt_b.append(popt[4])
x0, k, y0, a, b = popt[0], popt[1], popt[2], popt[3], popt[4]
x0_cov, k_cov, y0_cov, a_cov, b_cov = pcov[0,0]**0.5, pcov[1,1]**0.5, pcov[2,2]**0.5, pcov[3,3]**0.5, pcov[4,4]**0.5
param_data = {'id': id,
'a': a, 'b': b, 'k': k, 'x0': x0, 'y0': y0,
'x0_cov': x0_cov, 'k_cov': k_cov, 'y0_cov': y0_cov, 'a_cov': a_cov, 'b_cov': b_cov}
param_list.append(param_data)
except RuntimeError:
continue
if sum(cell_rate) != 0:
cell_avg = sum(cell_rate)/len(cell_rate)
cell_std = np.std(cell_rate)
# print len(cell_rate), cell_avg, "=cellavg"
cell_stds.append(cell_std)
indiv_rates[fname] = [cell_std]
for i in range(len(all_slopes)): # log transformation for comparison between cell types
log = math.log(all_slopes[i], 2)
print log, all_slopes[i]
if log < (-5.2 + (2*29)) and log > (-5.2 - (2*29)):
log_slopes.append(log)
else:
continue
avg_rate = sum(log_slopes)/len(log_slopes)
std = np.std(log_slopes)
print log_slopes
print "Avg=", avg_rate, "Std=", std, "n=", len(log_slopes), len(cell_rate), len(all_slopes)
norm_data = []
bins_bound = np.linspace(-10, 10, 51)
n, bins, patches = plt.hist(log_slopes, bins_bound, color= 'white', linewidth = 0)
for i in range(len(n)):
norm_log = (n[i] / sum(n))
norm_data.append(norm_log)
norm_data.append(0)
print norm_data, sum(norm_data), len(norm_data)
width = bins_bound[1]- bins_bound[0]
plt.bar(bins_bound, norm_data, width, color= "gray")
plt.ylim(ymax=0.4)
plt.xlabel("Log(Rate of Oligomerization) ")
plt.ylabel("Percent Frequency within bin *100")
# plt.scatter(scatterx, scattery)
plt.show()
df = pd.DataFrame(param_list)
# df.to_csv('RGC_ONC.csv')
avg_k = sum(popt_k) / len(popt_k)
avg_a = sum(popt_a) / len(popt_a)
avg_b = sum(popt_b) / len(popt_b)
print 'k=', avg_k, 'a=', avg_a, 'b=', avg_b
#For yMax Values of sigmoid curves.
#plot_oligo_rate_histo(norm_data)
# cell1 = []
# cell2 = []
# cell3 = []
# for i in range(len(scatterx)):
# if scatterx[i] == 1:
# cell1.append(scattery[i])
# if scatterx[i] == 2:
# cell2.append(scattery[i])
# if scatterx[i] == 3:
# cell3.append(scattery[i])
# mean1 = np.mean(cell1)
# mean2 = np.mean(cell2)
# mean3 = np.mean(cell3)
# std1 = np.std(cell1)
# std2 = np.std(cell2)
# std3 = np.std(cell3)
# print "cell1=", len(cell1), mean1, std1, "cell2=", len(cell2), mean2, std2, "cell3=", len(cell3), mean3, std3
##For Manhattan plots
# HCTDKO = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0086206896551724137, 0.0, 0.025862068965517241, 0.0, 0.034482758620689655, 0.077586206896551727, 0.077586206896551727, 0.13793103448275862, 0.20689655172413793, 0.17241379310344829, 0.094827586206896547, 0.051724137931034482, 0.043103448275862072, 0.034482758620689655, 0.017241379310344827, 0.0086206896551724137, 0.0086206896551724137, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0]
# HCTBAXKO = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.012195121951219513, 0.0, 0.024390243902439025, 0.036585365853658534, 0.024390243902439025, 0.14634146341463414, 0.073170731707317069, 0.18292682926829268, 0.12195121951219512, 0.12195121951219512, 0.06097560975609756, 0.04878048780487805, 0.073170731707317069, 0.012195121951219513, 0.036585365853658534, 0.012195121951219513, 0.012195121951219513, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0]
# rgc = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01048951048951049, 0.034965034965034968, 0.048951048951048952, 0.080419580419580416, 0.11888111888111888, 0.12237762237762238, 0.15034965034965034, 0.13636363636363635, 0.097902097902097904, 0.062937062937062943, 0.1048951048951049, 0.031468531468531472, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0]
# hct = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0029498525073746312, 0.017699115044247787, 0.041297935103244837, 0.041297935103244837, 0.067846607669616518, 0.091445427728613568, 0.15929203539823009, 0.16224188790560473, 0.15339233038348082, 0.13569321533923304, 0.076696165191740412, 0.038348082595870206, 0.011799410029498525, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,0]
# d407 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0012804097311139564, 0.0, 0.0012804097311139564, 0.0, 0.0025608194622279128, 0.0012804097311139564, 0.0025608194622279128, 0.0064020486555697821, 0.017925736235595392, 0.014084507042253521, 0.040973111395646605, 0.060179257362355951, 0.10755441741357234, 0.17029449423815621, 0.17285531370038412, 0.16133162612035851, 0.14084507042253522, 0.04353393085787452, 0.024327784891165175, 0.012804097311139564, 0.0051216389244558257, 0.0038412291933418692, 0.0038412291933418692, 0.0025608194622279128, 0.0025608194622279128, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0]
# mCherry = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0027548209366391185, 0.0013774104683195593, 0.011019283746556474, 0.0082644628099173556, 0.022038567493112948, 0.033057851239669422, 0.078512396694214878, 0.14325068870523416, 0.14462809917355371, 0.16115702479338842, 0.13085399449035812, 0.0743801652892562, 0.071625344352617082, 0.048209366391184574, 0.033057851239669422, 0.01790633608815427, 0.0055096418732782371, 0.0041322314049586778, 0.0041322314049586778, 0.0027548209366391185, 0.0, 0.0013774104683195593, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0]
# # samp = norm.rvs(loc=0,scale=1,size=len(HCTDKO))
# # print log_slopes, norm_data
# mean, std_n = norm.fit(log_slopes)
# print mean, std_n
# plt.plot(bins_bound, norm.pdf(bins_bound, loc=mean, scale=std_n))
# # bins_bound = np.linspace(-10, 10, 51)
# # pdf_fitted = norm.pdf(bins_bound,loc=avg_rate,scale=std)
# # plt.plot(bins_bound, rgc, ls = 'steps', label = 'RGC', linewidth = 2, color = 'red')
# plt.plot(bins_bound, HCTDKO, ls= 'steps', label = "HCT116 DKO", linewidth = 2, color = 'red')
# # plt.plot(bins_bound, HCTBAXKO, ls = 'steps', label = 'HCT116 BAX KO', linewidth = 2, color = 'blue')
# plt.plot(bins_bound, hct, ls = 'steps', label = 'HCT116 WT', linewidth = 2, color = 'green')
# plt.axhline(y=0, linewidth=2, color= 'black')
# plt.axvline(x=-10, linewidth=2, color= 'black')
# plt.legend(loc=2, shadow=False, prop={'size':12}, bbox_transform=plt.gcf().transFigure)
# plt.xlabel('Rate of oligomerization \n log(2)RFU/min')
# plt.ylabel('Frequency \n Percent of Total')
# plt.ylim(ymax= 0.5)
# plt.title('Rate of BAX Oligomerization')
# plt.show()
def initiation_time(): # Per Mitochondria And Per Cell
prefix="../bax_agg/"
suffix=".csv"
data_files = data.bax_oligo_data #Import data files from data.py
names = []
ids_list = []
time_oligo = [] #initiation time.
time_complete = [] #completion time.
scattery = []
log_y = []
avg_ymax =[]
cell_rates = defaultdict(list) ## key: [vals] = fname: [rates of ea. mito]
cell_inits = defaultdict(list)
cell_compls = defaultdict(list) ###fname: [total completion time of mito]
log_inits = defaultdict(list)
cell_pcntrates = defaultdict(list) ## fname: [%rate or total cellrates of ea. mito]
cell_pcntinits = defaultdict(list)
avg_cell_rates = [] #list of avg rates(mito)/cell for all cells
avg_cell_inits = []
avg_cell_completions = []
std_pcntof_avgrates = [] #list of std as percent over average per cell
std_pcntof_avginits = [] #list of std as percent over average per cell
std_cell_rates = [] #list of stdevs(mito)/cell for all cells
std_cell_inits = []
cell_number = []
cell_number2 = []
duration = []
plos_list = []
for name, ids in data_files.items(): #Create lists of file names and corresponding ids
names.append(name)
ids_list.append(ids)
for i in range(len(names)):
print (i, names[i])
label = (names[i])
rows1 = tools.get_rows("../bax_agg/Image Acquisition Times.csv", 4)
lag_name, lag_time = tools.get_lagtime(rows1, names[i])
#print lag_name, lag_time, "da;lkfsdjf"
fname = prefix+names[i]+suffix
rows = tools.get_rows(fname, 4)
val_time = tools.get_val_time(rows, ids_list[i])
tools.filter_1(val_time)
tools.normalize_val_time(val_time)
for lagger in range(len(lag_name)):
#print lag_name[lagger], names[i]
if names[i] == lag_name[lagger]:
cell_lag = lag_time[lagger]
print lag_name[lagger], cell_lag
else:
continue
# cell_lag = 50
for id, timeseries in val_time.items():
x, y = tools.get_xy(timeseries)
# plt.plot(x, y, color = 'blue')
xfit, yfit, popt, pcov = tools.bax_fit(val_time[id], id)
if xfit == None:
print "No fit"
else:
i = tools.sigmoid_inflection(xfit, *popt) #i = xfit where crosses zero. xfit[i] = inflection point.
# print "SFIts", yfit[0], yfit[-1]
# plt.plot(xfit, yfit, x, y)
# plt.show()
if popt[1] > 0 and i != len(xfit) - 1 and tools.close(yfit[0], yfit[-1]) == False:
# plt.plot(xfit, yfit, x, y)
# plt.show()
slope = tools.sigmoid_d1(xfit[i], *popt)
y = []
ymin = []
b = yfit[i] - (xfit[i]*slope) #creating separate curve to determine max/min lines.
xx=range(len(xfit))
for value in range(len(xx)):
ys = (slope*value) + b
y.append(ys)
xmin = np.arange(-500, 500, 5)
for value in range(len(xmin)):
ymins = tools.sigmoid(value, popt[0], popt[1], popt[2], popt[3], popt[4])
ymin.append(ymins)
max_line = (sum(ymin[180:]) / len(ymin[180:]))
minimumval = sum(ymin[:20]) / len(ymin[:20]) #to calc max Y value
delta_y = (max_line - minimumval) + 1 #change in y plus norm value 1
min_line= max_line - (2 * (max_line - yfit[i]))
init_ymxb = tools.crosses(y, min_line)
sat = tools.crosses(y, max_line)
print max_line, '=maxline', min_line, minimumval, delta_y, '=deltay'
# changey = min_line + ((max_line- min_line) * 0.05)
# init_chan = tools.crosses(yfit, changey)
# init_chanfix = (init_chan / 500) * len(x) + (x[0] - 1)
# print init_chan, '=initchangey'
# max_thry = (max_line - 0.05 * max_line) # max threshold of extended curve. y value.
total_time = init_ymxb + cell_lag
oligo_dur = int(sat-init_ymxb)
if slope > 0 and math.isnan(slope) == False and init_ymxb != 499 and tools.close(slope, 0) == False and tools.close(oligo_dur, 0) == False:
# print oligo_dur, sat, init_ymxb
# log_dur = math.log(oligo_dur, 2)
time_oligo.append(total_time) # for population data
cell_inits[label].append(total_time) #for per cell data
scattery.append(slope) # for population data
log_num = math.log(slope, 2)
#log_init = math.log(total_time, 2)
# if log_num < (-4 + (2*15)) and log_num > (-4 - (2*15)):
cell_rates[label].append(log_num) # for per cell data
plos_data1 = {'Cell': label, 'id': id, "Mito_Rate": log_num, 'Mito_Initiation': total_time}
plos_list.append(plos_data1)
# else:
# continue
time_complete.append(sat + cell_lag) #population data
#log_inits[label].append(log_init)
cell_compls[label].append(oligo_dur)
# duration.append(log_dur) #log2 of duration of oligomerization creates normal distribution
avg_ymax.append(delta_y) # for ymax values avg next to plot compiled curves
# print init_ymxb, oligo_dur, avg_ymax, 'avg ymax'
else:
continue
print slope, '=slope'
# # print 'id=', id, 'init_ymxb', init_ymxb, 'total_time=', total_time, cell_lag, 'completion==', sat
# plt.plot(xfit, yfit, linewidth=2, color='blue') #x, y gives slope line
# plt.plot(xmin, ymin, linewidth=2, color= 'green')
# plt.scatter(xfit[i], yfit[i], color='red')
# plt.axhline(y = max_line, color='black', linewidth=1)
# plt.axhline(y = minimumval, color = 'black', linewidth=1)
# # plt.axvline(x= init_chanfix, color= 'red', linewidth=1)
# plt.axvline(x = init_ymxb, color = 'black', linewidth=1)
# plt.show()
avg_oligo = sum(time_oligo) / len(time_oligo)
std_oligo = np.std(time_oligo)
# print 'intiation time=', time_oligo, avg_oligo, std_oligo, len(time_oligo), cell_lag
# cell_rate_std = np.std(cell_rates)
# cell_init_std = np.std(cell_inits)
# print names[i], "rate std", cell_rate_std, "initiation std", cell_init_std
for item in range(len(names)): # gets values for individual cells
# print item, "fname, len of mitos, rates", names[item], len(cell_rates[names[item]])
if len(cell_rates[names[item]]) >1 and len(cell_inits[names[item]]) >1 and len(cell_compls[names[item]]) >1:
avg_ratepc = sum(cell_rates[names[item]]) / len(cell_rates[names[item]]) # avg of mitochondria in one cell
std_ratepc = np.std(cell_rates[names[item]]) #std of mito in one cell
# print avg_ratepc, '=avg_key', names[item], std_ratepc
avg_initpc = sum(cell_inits[names[item]]) / len(cell_inits[names[item]])
std_initpc = np.std(cell_inits[names[item]])
# print avg_initpc, std_initpc, names[item], #cell_rates[names[item]]
avg_complpc = sum(cell_compls[names[item]]) / len(cell_compls[names[item]])
std_complpc = np.std(cell_compls[names[item]])
# print avg_complpc, std_complpc, "compeltions per cell"
# plos_data2 = {'Cell': names[item], 'Cell_rate': avg_ratepc, 'Std_rate': std_ratepc, 'Cell_initiation': avg_initpc, 'Std_initiation': std_initpc}
# plos_list.append(plos_data2)
avg_cell_rates.append(avg_ratepc) # list of avg rate within a single cell
avg_cell_inits.append(avg_initpc)
avg_cell_completions.append(avg_complpc)
std_cell_rates.append(std_ratepc) #list of std of rates within a single cell
std_cell_inits.append(std_initpc)
qs3, qs4, qs5, qs6 = avg_ratepc, std_ratepc, avg_initpc, std_initpc
std_pcntof_avgrates.append(abs((std_ratepc) / (avg_ratepc))) # COEFF OF VARIANCE =stdevs as a percentage of the cell average rate.
std_pcntof_avginits.append(abs((std_initpc) / (avg_initpc)))
# print std_pcntof_avgrates, fname, avg_cell_rates
avg_pcrates = sum(avg_cell_rates)/ len(avg_cell_rates) #total avg of rates/cells for avg line in graph
avg_pcinits = sum(avg_cell_inits)/ len(avg_cell_inits)
std_pcrates = np.std(avg_cell_rates) #std of total avgs. dont really need this
std_pcinits = np.std(avg_cell_inits)
avg_std_pcrates = sum(std_cell_rates) / len(std_cell_rates) #avg of stdevs
avg_std_pcinits = sum(std_cell_inits) / len(std_cell_inits)
ss_rates = np.std(std_cell_rates) #std of stdevs.
ss_inits = np.std(std_cell_inits)
avg_pcnt_pcrates = sum(std_pcntof_avgrates) / len(std_pcntof_avgrates) #total avg of Variance(%cell rates/cell)
avg_pcnt_pcinits = sum(std_pcntof_avginits) / len(std_pcntof_avginits)
std_pcnt_pcrates = np.std(std_pcntof_avgrates) #std of stdevs.
std_pcnt_pcinits = np.std(std_pcntof_avginits)
cell_number.append(range(len(std_pcntof_avgrates)))
# print "list of completions", avg_cell_completions, (sum(avg_cell_completions)/len(avg_cell_completions)), np.std(avg_cell_completions)
print len(cell_number), len(std_pcntof_avgrates), std_pcntof_avgrates, avg_pcnt_pcrates
plt.scatter(cell_number, std_pcntof_avgrates, label= "Rate Std/Avg (as percent)", color = 'blue', marker = 'o')
plt.scatter(cell_number, std_pcntof_avginits, label= "Initiation Std/Avg (as percent)", color = 'red', marker = 'o')
plt.axhline(y=avg_pcnt_pcrates, label="Avg cell rate", color = 'blue')
plt.axhline(y=avg_pcnt_pcinits, label= "avg cell Initiation", color = 'red')
plt.title("Rate/Init as Percent std/avg per cell")
plt.legend(loc=1, shadow=False, prop={'size':8}, bbox_transform=plt.gcf().transFigure)
plt.xlim(-5, 75)
plt.ylim(-1, 2)
plt.show()
avg_ymaxs = sum(avg_ymax)/len(avg_ymax)
std_ymax = np.std(avg_ymax)
print "values needed", avg_pcrates, std_pcrates, avg_pcinits, std_pcinits, "avg,std of varianece",avg_pcnt_pcrates, std_pcnt_pcrates, avg_pcnt_pcinits, std_pcnt_pcinits
# print "per cell info", avg_cell_rates, avg_cell_inits, std_cell_inits, avg_ymaxs, std_ymax
for it in range(len(scattery)): # log transformation for comparison between cell types
log = math.log(scattery[it], 2)
# if log < (-5 + 2*29) and log > (-5 - 2*29):
log_y.append(log)
# else:
# time_oligo.remove(time_oligo[it])
plt.plot(time_oligo, log_y, 'o', color= 'blue')
# plt.axhline(y = 0, linewidth=3, color='black')
# plt.axvline(x=0, linewidth=3, color="black")
plt.ylim(-10, ymax = 5)
plt.xlim(0, xmax = 1000)
plt.xlabel("Initiation Time (min)")
plt.ylabel("Rate of Oligomerization (RFU/min)")
plt.show()
plot_agg_initiation(time_oligo)
# outliers_rem = [] #creating list of completions without the outlier values
# for t in range(len(avg_cell_completions)):
# print avg_cell_completions[t]
# if 3.6< avg_cell_completions[t] < 17.4:
# outliers_rem.append(avg_cell_completions[t])
# print 'add'
# else:
# continue
# new_avg = (sum(outliers_rem))/(len(outliers_rem))
# new_std = np.std(outliers_rem)
# print new_avg, new_std, len(outliers_rem), outliers_rem
# dur_avg = (sum(duration))/(len(duration))
# dur_std = np.std(duration)
# print 'a', dur_avg, dur_std, len(duration)
# plt.hist(duration)
# plt.show()
df = pd.DataFrame(plos_list)
print plos_list
df.to_csv('HCTKO_all.csv')
def bax_cytc_orig():
data_ch1 = data.bax_cytc_datach1 #Import data files from data.py
data_ch2 = data.bax_cytc_datach2
bax_files = data_ch1.keys()
cytc_files = data_ch2.keys()
ids = data_ch1.values()
ids2 = data_ch2.values()
ch1_prefix="../cyt_c/channel1/"
ch2_prefix="../cyt_c/channel2/"
suffix=".csv"
bax_data = {}
cytc_data = {}
filt_ids = {} #ids filtered out for >5 data points.
time_re_bax_initiation = []
release_rate = []
bax_rate = []
inf_slopes = []
slope_d1s = []
for i, f in enumerate(bax_files):
fname = ch1_prefix + f + suffix
rows = tools.get_rows(fname, 4)
cell_data = tools.get_val_time(rows, ids[i])
tools.normalize_val_time(cell_data)
tools.filter_1(cell_data)
filt_ids[f] = cell_data.keys()
bax_data[f] = cell_data
for i, f in enumerate(cytc_files):
fname = ch2_prefix + f + suffix
rows = tools.get_rows(fname, 4)
cell_data = tools.get_val_time(rows, ids2[i])
tools.normalize_val_time(cell_data)
tools.filter_1(cell_data)
for id, timeseries in cell_data.items():
cell_data[id] = [[entry[0] / timeseries[-1][0], entry[1]] for entry in timeseries]
cytc_data[f] = cell_data
#print "aa", filt_ids, "old", ids
for i in range(len(bax_files)):
bax_file = bax_files[i]
cytc_file = bax_file.replace("ch1", "ch2")
print "file=", bax_file, cytc_file
ids_list = filt_ids[bax_file]
bdata = bax_data[bax_file]
cdata = cytc_data[cytc_file]
bax_initiate = float('NaN')
#diff = init_diff(bdata, cdata)
#print(diff)
for id in ids_list:
bx, by = tools.get_xy(bdata[id])
cx, cy = tools.get_xy(cdata[id])
bgood = True
cgood= True
if bgood:
bxfit, byfit, popt, pcov = tools.bax_fit(bdata[id], id)
if bxfit == None:
bgood = False
print "bgood = False"
else:
i = tools.sigmoid_inflection(bxfit, *popt) #inflection point of fit_curve
print "len_fit", len(bxfit), "len x/y", len(bx), len(by), "fit_inf=", i
bslope_d1 = tools.sigmoid_d1(bxfit[i], *popt)
print bslope_d1, "bslope_d1"
if abs(bslope_d1) == 0 or math.isnan(bslope_d1) == True or tools.close(bslope_d1, 0):
max_line = sum(byfit[:20]) / len(byfit[:20])
min_line = sum(byfit[480:]) / len(byfit[480:])
y_inflection = ((max_line - min_line) / 2) + min_line
fl = tools.crosses(byfit, y_inflection) #point where fit curve crosses infleciton point.
x_inflection = bxfit[fl] #defines x value of inflection point
print max_line, min_line, x_inflection
bax_initiation = x_inflection
fast_rate = ((max_line - min_line) / (1))
bax_rate.append(fast_rate)
else:
y = []
ymin = []
b = byfit[i] - (bxfit[i] * bslope_d1) #creating separate curve to determine max/min lines.
#print xinf, yinf, bxfit[i], byfit[i], 'here'
x=range(len(bxfit))
for value in range(len(x)):
ys = (bslope_d1*value) + b
y.append(ys)
xmin = np.arange(-500, 500, 5)
for value in range(len(xmin)):
ymins = tools.sigmoid(value, popt[0], popt[1], popt[2], popt[3], popt[4])
ymin.append(ymins)
max_line2 = (sum(ymin[180:]) / len(ymin[180:]))
min_line2= max_line2 - (2 * (max_line2 - byfit[i]))
print min_line2, max_line2, "max2"
bax_initiation = tools.crosses(y, min_line2)
bax_rate.append(bslope_d1)
print "bax initiation", bax_initiation, bax_rate
# print "fit_inf=", i, bxfit[i], popt[1], yinf, "bslope=", bslope, thr, "baxinitiate", bax_initiate, init_ymxb, "=init_ymxb"
# plt.plot(bxfit, byfit, linewidth = 2)
# #plt.scatter(xinf, yinf, color = 'red')
# plt.axvline(x = init_ymxb, color = 'black')
# plt.axvline(x = bax_initiate, color = 'green')
# plt.axhline(y= max_line, color = 'black')
# plt.axhline(y = min_line, color = 'black')
# #plt.plot(x, y, color= 'red')
# plt.show()
if cgood:
cxfit, cyfit, cpopt = tools.cytc_fit(cdata[id], id)
if cxfit == None:
cgood = False
print "No Cytc Fit"
bax_rate.remove(bax_rate[-1])
else:
cinf = tools.sigmoid_inflection(cxfit, *cpopt) #determine inflection index
# if cinf != len(cxfit) - 1 and cinf != 0:
slope_d1 = tools.sigmoid_d1(cxfit[cinf], *cpopt) #first derivative of inflection index gives slope of line at inflection point
max_line = sum(cyfit[:20]) / len(cyfit[:20])
min_line = sum(cyfit[480:]) / len(cyfit[480:])
y_inflection = ((max_line - min_line) / 2) + min_line
fl = tools.crosses(cyfit, y_inflection) #point where fit curve crosses infleciton point.
x_inflection = cxfit[fl] #defines x value of inflection point
slope_decay = (min_line - max_line) / (1) #1 is max x distance for dataset
print slope_decay, "=decay", slope_d1, "=d1"
if slope_d1 > 0 and slope_decay > 0:
cgood = False
elif cinf == (len(cxfit) -1) or slope_d1 > 0 or slope_decay > 0:
cgood = False
elif abs(slope_d1) == 0 or math.isnan(slope_d1) == True or tools.close(slope_d1, 0) == True or slope_d1 < slope_decay:
release_rate.append(slope_decay)
inf_slopes.append(slope_decay)
# print slope_decay, "=decay", slope_d1, "=d1"
print max_line, min_line, x_inflection
b = y_inflection - (x_inflection * 0) #using x,y of inflection point, determine b of y=mx+b
x_line = range(len(cxfit))
for point in range(len(x_line)):
y_line = (0* point) + b
cytc_decay = x_inflection #because slope is zero, x_inflection point denotes decay start
# print slope_d1, "slope_d1", cytc_decay
# plt.plot(cxfit, cyfit, color= 'green')
# plt.axvline(x= x_inflection, color = 'black')
# plt.plot(x_inflection, y_inflection, 'o', color= 'g')
# plt.show()
else:
release_rate.append(slope_d1)
slope_d1s.append(slope_d1)
yy = []
yymin = []
bb = cyfit[cinf] - (cxfit[cinf] * slope_d1)
xx = range(len(cxfit))
for vals in range(len(xx)):
yys = (slope_d1 * vals) + bb
yy.append(yys)
xxmin = np.arange(-500, 500, 5)
for vals in range(len(xxmin)):
yymins = tools.sigmoid(vals, cpopt[0], cpopt[1], cpopt[2], cpopt[3], cpopt[4])
yymin.append(yymins)
cmin_line = (sum(yymin[180:]) / len(yymin[180:]))
cmax_line= cmin_line + (2 * (cyfit[cinf] - cmin_line))
cmax_line2 = sum(yymin[:20]) / len(yymin[:20])
cytc_decay = tools.crosses(yy, cmax_line)
# print "cslope", cslope, cytc_decay, 'cinf', cinf
# plt.plot(xx, yy, color= 'red')
# plt.xlim(xmax=65)
plt.ylim(0, 10)
# plt.plot(cxfit, cyfit, linewidth = 2)
# plt.plot(cxfit[cinf], cyfit[cinf], 'o', color= 'red')
# # #plt.scatter(xinf, yinf, color = 'red')
# plt.axvline(x = c_init_ymxb, color = 'black')
# #plt.axvline(x = bax_initiate, color = 'green')
# plt.axhline(y= cmax_line, color = 'black')
# plt.axhline(y = cmin_line, color = 'green', linewidth = 2)
# plt.show()
# print release_rate
#print bax_file, id, "List", time_re_bax_initiation
if bgood and cgood:
release_time = cytc_decay - bax_initiation
print cytc_decay, bax_initiation, "values"
time_re_bax_initiation.append(release_time)
#print cytc_decay, bax_initiation, 'release intervals'
plt.plot(cx, cy, cxfit, cyfit, color = 'green', linewidth = 2, label= "cytochrome c-GFP")
plt.plot(bx, by, bxfit, byfit, color = 'blue', linewidth=2, label = "mcherry-BAX")
# plt.axvline(x = cytc_decay, color = 'green', linewidth= 1) #Bax initiation
# plt.axvline(x = bax_initiation, color = 'blue', linewidth=1) # cytc Decay
# plt.axhline(y=min(byfit), color = 'red')
# plt.scatter(xinfl, yinfl, xfit_infl, yfit_infl, color= 'red')
plt.axhline(y = 0, linewidth=3, color='black')
plt.xlabel("Time (min)", size = 18)
plt.ylabel("Fluorescence Intensity \n (Percent of Baseline)", size = 18 )
plt.legend(loc=2, shadow=False, prop={'size':8}, bbox_transform=plt.gcf().transFigure)
plt.xticks(size = 14)
plt.yticks(size = 14)
plt.ylim(ymax=5, ymin = -1)
plt.xlim(xmax=100)
plt.show()
print bax_file, cytc_file, id, len(release_rate), len(bax_rate)
print time_re_bax_initiation,
# print len(release_rate), len(bax_rate), len(inf_slopes), len(slope_d1s)
# print 'regular=', min(slope_d1s), max(slope_d1s), 'infinity', len(inf_slopes), #min(inf_slopes), max(inf_slopes)
# r = np.corrcoef(bax_rate, release_rate)[0, 1]
avg_release = sum(release_rate) / len(release_rate)
std = np.std(release_rate)
# plt.hist(release_rate)
print avg_release, std
plt.plot(release_rate, bax_rate, 'o', color = 'green')
plt.ylim(-5, 5)
plt.show()
slope, intercept, r_value, p_value, std_err = stats.linregress(release_rate, bax_rate)
print r_value
if __name__ == "__main__":
initiation_time()
|
|
import ntpath
import difflib
from django.db import models, transaction
from django.conf import settings
from fields import PositiveBigIntegerField
from gensim.corpora import Dictionary as GensimDictionary
import gensim.similarities
import editdistance
from pyanalysis.apps.corpus.models import Dataset, Script, Line
from pyanalysis.apps.enhance.tokenizers import *
# Create your models here.
# import the logging library
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class SimilarityPair(models.Model):
type = models.CharField(max_length=32, default="cosine", null=False, blank=False, db_index=True)
src_script = models.ForeignKey(Script, related_name="similarity_pairs", db_index=True)
tar_script = models.ForeignKey(Script, db_index=True)
similarity = models.FloatField(default=0.0)
def get_diff(self):
source = self.src_script
target = self.tar_script
diff = "\n".join(difflib.unified_diff(source.text.split('\n'), target.text.split('\n'), fromfile=source.name, tofile=target.name))
return diff
class ScriptDiff(models.Model):
"""
Script diff between two files
"""
pair = models.ForeignKey(SimilarityPair, related_name="diff", unique=True)
text = models.TextField(default="", blank=True, null=True)
class Dictionary(models.Model):
name = models.CharField(max_length=100, null=True, default="", blank=True)
dataset = models.ForeignKey(Dataset, related_name="dictionary", null=True, blank=True, default=None)
settings = models.TextField(default="", blank=True, null=True)
time = models.DateTimeField(auto_now_add=True)
num_docs = PositiveBigIntegerField(default=0)
num_pos = PositiveBigIntegerField(default=0)
num_nnz = PositiveBigIntegerField(default=0)
@property
def gensim_dictionary(self):
if not hasattr(self, '_gensim_dict'):
setattr(self, '_gensim_dict', self._load_gensim_dictionary())
return getattr(self, '_gensim_dict')
def get_token_id(self, bow_index):
if not hasattr(self, '_index2id'):
g = self.gensim_dictionary
try:
return self._index2id[bow_index]
except KeyError:
return None
def _load_gensim_dictionary(self):
setattr(self, '_index2id', {})
gensim_dict = GensimDictionary()
gensim_dict.num_docs = self.num_docs
gensim_dict.num_pos = self.num_pos
gensim_dict.num_nnz = self.num_nnz
for dic_token in self.dic_tokens.all():
self._index2id[dic_token.index] = dic_token.id
gensim_dict.token2id[dic_token.text] = dic_token.index
gensim_dict.dfs[dic_token.index] = dic_token.document_frequency
logger.info("Dictionary contains %d tokens" % len(gensim_dict.token2id))
return gensim_dict
def _populate_from_gensim_dictionary(self, gensim_dict):
self.num_docs = gensim_dict.num_docs
self.num_pos = gensim_dict.num_pos
self.num_nnz = gensim_dict.num_nnz
self.save()
logger.info("Saving gensim dictionary of dataset '%d' in the database" % self.dataset.id)
batch = []
count = 0
print_freq = 10000
batch_size = 1000
total_tokens = len(gensim_dict.token2id)
for token, id in gensim_dict.token2id.iteritems():
dict_token = DictToken(dictionary=self,
text=token,
index=id,
document_frequency=gensim_dict.dfs[id])
batch.append(dict_token)
count += 1
if len(batch) > batch_size:
DictToken.objects.bulk_create(batch)
batch = []
if settings.DEBUG:
# prevent memory leaks
from django.db import connection
connection.queries = []
if count % print_freq == 0:
logger.info("Saved %d / %d tokens in the database dictionary" % (count, total_tokens))
if len(batch):
DictToken.objects.bulk_create(batch)
count += len(batch)
logger.info("Saved %d / %d tokens in the database dictionary" % (count, total_tokens))
return self
@classmethod
def _build_gensim_dictionary(cls, dataset, scripts):
# build a dictionary
logger.info("Building a dictionary from texts")
tokenized_scripts = CallTokenLoader(dataset.scripts.all())
gensim_dict = GensimDictionary(tokenized_scripts)
dict_obj, created = cls.objects.get_or_create(dataset=dataset)
dict_obj._populate_from_gensim_dictionary(gensim_dict)
return dict_obj
@classmethod
def _create_from_texts(cls, tokenized_texts, name, dataset, settings, minimum_frequency=2):
from gensim.corpora import Dictionary as GensimDictionary
# build a dictionary
logger.info("Building a dictionary from texts")
dictionary = GensimDictionary(tokenized_texts)
# Remove extremely rare words
logger.info("Dictionary contains %d words. Filtering..." % len(dictionary.token2id))
dictionary.filter_extremes(no_below=minimum_frequency, no_above=1, keep_n=None)
dictionary.compactify()
logger.info("Dictionary contains %d words." % len(dictionary.token2id))
dict_model = cls(name=name,
dataset=dataset,
settings=settings)
dict_model.save()
dict_model._populate_from_gensim_dictionary(dictionary)
return dict_model
def _vectorize_corpus(self, queryset, tokenizer):
import math
logger.info("Saving document token vectors in corpus.")
total_documents = self.num_docs
gdict = self.gensim_dictionary
count = 0
total_count = queryset.count()
batch = []
batch_size = 1000
print_freq = 10000
# tokenized_scripts = tokenizer(scripts)
for script in queryset.iterator():
for line in script.lines.all():
tokens = tokenizer.tokenize(line)
bow = gdict.doc2bow(tokens)
num_tokens = len(tokens)
for dic_token_index, dic_token_freq in bow:
dic_token_id = self.get_token_id(dic_token_index)
document_freq = gdict.dfs[dic_token_index]
try:
tf = float(dic_token_freq)
idf = math.log(total_documents / document_freq)
tfidf = tf * idf
except:
import pdb
pdb.set_trace()
batch.append(TokenVectorElement(
dictionary=self,
dic_token_id=dic_token_id,
dic_token_index=dic_token_index,
frequency=dic_token_freq,
tfidf=tfidf,
line=line,
script=script))
count += 1
if len(batch) > batch_size:
TokenVectorElement.objects.bulk_create(batch)
batch = []
if settings.DEBUG:
# prevent memory leaks
from django.db import connection
connection.queries = []
if count % print_freq == 0:
logger.info("Saved token-vectors for %d / %d documents" % (count, total_count))
if len(batch):
TokenVectorElement.objects.bulk_create(batch)
logger.info("Saved token-vectors for %d / %d documents" % (count, total_count))
logger.info("Created %d token vector entries" % count)
def _vectorize_diff_corpus(self, queryset, tokenizer):
import math
logger.info("Saving document token vectors in corpus.")
total_documents = self.num_docs
gdict = self.gensim_dictionary
count = 0
total_count = queryset.count()
batch = []
batch_size = 1000
print_freq = 10000
# tokenized_scripts = tokenizer(scripts)
for script_diff in queryset.iterator():
tokens = tokenizer.tokenize(script_diff)
bow = gdict.doc2bow(tokens)
num_tokens = len(tokens)
for dic_token_index, dic_token_freq in bow:
dic_token_id = self.get_token_id(dic_token_index)
document_freq = gdict.dfs[dic_token_index]
try:
tf = float(dic_token_freq)
idf = math.log(total_documents / document_freq)
tfidf = tf * idf
batch.append(DiffTokenVectorElement(
dictionary=self,
dic_token_id=dic_token_id,
dic_token_index=dic_token_index,
frequency=dic_token_freq,
tfidf=tfidf,
script_diff=script_diff))
except:
import pdb
pdb.set_trace()
count += 1
if len(batch) > batch_size:
DiffTokenVectorElement.objects.bulk_create(batch)
batch = []
if settings.DEBUG:
# prevent memory leaks
from django.db import connection
connection.queries = []
if count % print_freq == 0:
logger.info("Saved token-vectors for %d / %d documents" % (count, total_count))
if len(batch):
DiffTokenVectorElement.objects.bulk_create(batch)
logger.info("Saved token-vectors for %d / %d documents" % (count, total_count))
logger.info("Created %d token vector entries" % count)
def _build_lda(self, name, corpus, num_topics=30, tokens_to_save=200, multicore=True):
from gensim.models import LdaMulticore, LdaModel
gdict = self.gensim_dictionary
if multicore:
lda = LdaMulticore(corpus=corpus,
num_topics=num_topics,
workers=3,
id2word=gdict)
else:
lda = LdaModel(corpus=corpus,
num_topics=num_topics,
id2word=gdict)
model = TopicModel(name=name, dictionary=self)
model.save()
topics = []
for i in range(num_topics):
topic = lda.show_topic(i, topn=tokens_to_save)
alpha = lda.alpha[i]
topicm = Topic(model=model, name="?", alpha=alpha, index=i)
topicm.save()
topics.append(topicm)
tokens = []
for token_text, prob in topic:
token_index = gdict.token2id[token_text]
token_id = self.get_token_id(token_index)
tw = TopicDictToken(topic=topicm,
token_id=token_id, token_index=token_index,
probability=prob)
tokens.append(tw)
TopicDictToken.objects.bulk_create(tokens)
most_likely_token_scores = topicm.token_scores\
.order_by('-probability')\
.prefetch_related('token')
topicm.name = ', '.join([score.token.text for score in most_likely_token_scores[:3]])
topicm.save()
if settings.DEBUG:
# prevent memory leaks
from django.db import connection
connection.queries = []
model.save_to_file(lda)
return (model, lda)
def _apply_lda(self, model, corpus, lda=None):
if lda is None:
# recover the lda
lda = model.load_from_file()
total_documents = len(corpus)
count = 0
batch = []
batch_size = 1000
print_freq = 10000
topics = list(model.topics.order_by('index'))
# Go through the bows and get their topic mixtures
for bow in corpus:
mixture = lda.get_document_topics(bow)
script_id = corpus.current_script_id
for topic_index, prob in mixture:
topic = topics[topic_index]
itemtopic = ScriptTopic(topic_model=model,
topic=topic,
script_id=script_id,
probability=prob)
batch.append(itemtopic)
count += 1
if len(batch) > batch_size:
ScriptTopic.objects.bulk_create(batch)
batch = []
if settings.DEBUG:
# prevent memory leaks
from django.db import connection
connection.queries = []
if count % print_freq == 0:
logger.info("Saved topic-vectors for %d / %d documents" % (count, total_documents))
if len(batch):
ScriptTopic.objects.bulk_create(batch)
logger.info("Saved topic-vectors for %d / %d documents" % (count, total_documents))
def _apply_diff_lda(self, model, corpus, lda=None):
if lda is None:
# recover the lda
lda = model.load_from_file()
total_documents = len(corpus)
count = 0
batch = []
batch_size = 1000
print_freq = 10000
topics = list(model.topics.order_by('index'))
# Go through the bows and get their topic mixtures
for bow in corpus:
mixture = lda.get_document_topics(bow)
script_diff_id = corpus.current_script_diff_id
for topic_index, prob in mixture:
topic = topics[topic_index]
itemtopic = ScriptDiffTopic(topic_model=model,
topic=topic,
script_diff_id=script_diff_id,
probability=prob)
batch.append(itemtopic)
count += 1
if len(batch) > batch_size:
ScriptDiffTopic.objects.bulk_create(batch)
batch = []
if settings.DEBUG:
# prevent memory leaks
from django.db import connection
connection.queries = []
if count % print_freq == 0:
logger.info("Saved topic-vectors for %d / %d documents" % (count, total_documents))
if len(batch):
ScriptDiffTopic.objects.bulk_create(batch)
logger.info("Saved topic-vectors for %d / %d documents" % (count, total_documents))
def _evaluate_lda(self, model, corpus, lda=None):
if lda is None:
# recover the lda
lda = model.load_from_file()
logger.info("Calculating model perplexity on entire corpus...")
model.perplexity = lda.log_perplexity(corpus)
logger.info("Perplexity: %f" % model.perplexity)
model.save()
def load_sparse_matrix(self, use_tfidf=True):
script_id_list = []
results = []
scripts = self.dataset.scripts.all()
for script in scripts:
script_id_list.append(script.id)
tokens = map(lambda x: x.to_tuple(use_tfidf), script.token_vector_elements.all())
results.append(filter(lambda x: x[1] > 0, tokens))
return script_id_list, results
def calc_script_similarity_matrix(self):
script_id_list, matrix = self.load_sparse_matrix()
index = gensim.similarities.SparseMatrixSimilarity(matrix, num_features=self.dic_tokens.count())
for r_idx, row in enumerate(matrix):
sim_row = index[row]
for c_idx, sim in enumerate(sim_row):
sim_pair = SimilarityPair(src_script_id=script_id_list[r_idx],
tar_script_id=script_id_list[c_idx],
similarity=sim)
sim_pair.save()
def calc_script_common_call_num(self):
scripts = self.dataset.scripts.all()
sim_pair_list = []
with transaction.atomic(savepoint=False):
for i in range(len(scripts)):
for j in range(i + 1, len(scripts)):
common_call_num = len(scripts[i].extract_common_calls(scripts[j]))
sim_pair_list.append(
SimilarityPair(type='common_calls',
src_script_id=scripts[i].id,
tar_script_id=scripts[j].id,
similarity=common_call_num))
SimilarityPair.objects.bulk_create(sim_pair_list)
sim_pair_list = []
def calc_script_name_similarity(self):
scripts = self.dataset.scripts.all()
sim_pair_list = []
with transaction.atomic(savepoint=False):
for i in range(len(scripts)):
for j in range(i + 1, len(scripts)):
name_similarity = editdistance.eval(ntpath.basename(scripts[i].name),
ntpath.basename(scripts[j].name))
sim_pair_list.append(
SimilarityPair(type='name_similarity',
src_script_id=scripts[i].id,
tar_script_id=scripts[j].id,
similarity=name_similarity))
SimilarityPair.objects.bulk_create(sim_pair_list)
sim_pair_list = []
class DictToken(models.Model):
dictionary = models.ForeignKey(Dictionary, related_name='dic_tokens')
index = models.IntegerField()
text = models.TextField(default="", blank=True, null=True)
document_frequency = models.IntegerField()
scripts = models.ManyToManyField(Script, through='TokenVectorElement', related_name='dic_tokens')
def __repr__(self):
return self.text
def __unicode__(self):
return self.__repr__()
class TopicModel(models.Model):
dictionary = models.ForeignKey(Dictionary)
name = models.TextField(default="", blank=True)
description = models.CharField(max_length=200)
time = models.DateTimeField(auto_now_add=True)
perplexity = models.FloatField(default=0)
def load_from_file(self):
from gensim.models import LdaMulticore
return LdaMulticore.load("lda_out_%d.model" % self.id)
def save_to_file(self, gensim_lda):
gensim_lda.save("lda_out_%d.model" % self.id)
def get_probable_topic(self, script):
"""For this model, get the most likely topic for the script."""
script_topics = script.topic_probabilities\
.filter(topic_model=self)\
.only('topic', 'probability')
max_prob = -100000
probable_topic = None
for mt in script_topics:
if mt.probability > max_prob:
probable_topic = mt.topic
max_prob = mt.probability
return probable_topic
class Topic(models.Model):
model = models.ForeignKey(TopicModel, related_name='topics')
name = models.TextField(default="", blank=True)
description = models.CharField(max_length=200)
index = models.IntegerField()
alpha = models.FloatField()
scripts = models.ManyToManyField(Script, through='ScriptTopic', related_name='topics')
tokens = models.ManyToManyField(DictToken, through='TopicDictToken', related_name='topics')
class TopicDictToken(models.Model):
token = models.ForeignKey(DictToken, related_name='topic_scores')
topic = models.ForeignKey(Topic, related_name='token_scores')
token_index = models.IntegerField()
probability = models.FloatField()
class ScriptTopic(models.Model):
class Meta:
index_together = (
('topic_model', 'script'),
('script', 'topic'),
)
topic_model = models.ForeignKey(TopicModel, db_index=False)
topic = models.ForeignKey(Topic, related_name='script_probabilities')
script = models.ForeignKey(Script, related_name="topic_probabilities", db_index=False)
probability = models.FloatField()
@classmethod
def get_examples(cls, topic):
examples = cls.objects.filter(topic=topic, probability__gte=0.5).distinct()
return examples.order_by('-probability')
class ScriptDiffTopic(models.Model):
class Meta:
index_together = (
('topic_model', 'script_diff'),
('script_diff', 'topic'),
)
topic_model = models.ForeignKey(TopicModel, db_index=False)
topic = models.ForeignKey(Topic, related_name='script_diff_probabilities')
script_diff = models.ForeignKey(ScriptDiff, related_name="diff_topic_probabilities", db_index=False)
probability = models.FloatField()
@classmethod
def get_examples(cls, topic):
examples = cls.objects.filter(topic=topic, probability__gte=0.5).distinct()
return examples.order_by('-probability')
class TokenVectorElement(models.Model):
dictionary = models.ForeignKey(Dictionary, db_index=False, default=None, null=True, blank=True)
script = models.ForeignKey(Script, related_name="token_vector_elements")
dic_token = models.ForeignKey(DictToken, related_name="token_vector_elements")
line = models.ForeignKey(Line, related_name="token_vector_elements", default=None, null=True, blank=True)
frequency = models.IntegerField(default=0)
dic_token_index = models.IntegerField(default=0)
tfidf = models.FloatField(default=0.0)
def to_tuple(self, use_tfidf=True):
return (self.dic_token_index, self.tfidf) if use_tfidf else (self.dic_token_index, self.frequency)
class DifferenceNote(models.Model):
src_script = models.ForeignKey(Script, related_name="difference_notes")
tar_script = models.ForeignKey(Script)
RELATION_CHOICES = (
('<', 'src is older'),
('=', 'may be the same'),
('>', 'tar is older'),
('?', 'tar is older'),
('U', 'Undefined'),
)
relative_relation = models.CharField(max_length=1, choices=RELATION_CHOICES, default='U')
note = models.TextField(default="", blank=True)
class Meta:
index_together = (
"src_script", "tar_script"
)
class DiffTokenVectorElement(models.Model):
dictionary = models.ForeignKey(Dictionary, db_index=False, default=None, null=True, blank=True)
script_diff = models.ForeignKey(ScriptDiff, related_name="diff_token_vector_elements")
dic_token = models.ForeignKey(DictToken, related_name="diff_token_vector_elements")
frequency = models.IntegerField(default=0)
dic_token_index = models.IntegerField(default=0)
tfidf = models.FloatField(default=0.0)
def to_tuple(self, use_tfidf=True):
return (self.dic_token_index, self.tfidf) if use_tfidf else (self.dic_token_index, self.frequency)
|
|
#!/usr/bin/env python
"""
Analyze docstrings to detect errors.
If no argument is provided, it does a quick check of docstrings and returns
a csv with all API functions and results of basic checks.
If a function or method is provided in the form "pandas.function",
"pandas.module.class.method", etc. a list of all errors in the docstring for
the specified function or method.
Usage::
$ ./validate_docstrings.py
$ ./validate_docstrings.py pandas.DataFrame.head
"""
import os
import sys
import json
import re
import glob
import functools
import collections
import argparse
import pydoc
import inspect
import importlib
import doctest
import tempfile
import ast
import textwrap
import flake8.main.application
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
# Template backend makes matplotlib to not plot anything. This is useful
# to avoid that plot windows are open from the doctests while running the
# script. Setting here before matplotlib is loaded.
# We don't warn for the number of open plots, as none is actually being opened
os.environ["MPLBACKEND"] = "Template"
import matplotlib
matplotlib.rc("figure", max_open_warning=10000)
import numpy
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_PATH))
import pandas
sys.path.insert(1, os.path.join(BASE_PATH, "doc", "sphinxext"))
from numpydoc.docscrape import NumpyDocString
from pandas.io.formats.printing import pprint_thing
PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"]
DIRECTIVES = ["versionadded", "versionchanged", "deprecated"]
ALLOWED_SECTIONS = [
"Parameters",
"Attributes",
"Methods",
"Returns",
"Yields",
"Other Parameters",
"Raises",
"Warns",
"See Also",
"Notes",
"References",
"Examples",
]
ERROR_MSGS = {
"GL01": "Docstring text (summary) should start in the line immediately "
"after the opening quotes (not in the same line, or leaving a "
"blank line in between)",
"GL02": "Closing quotes should be placed in the line after the last text "
"in the docstring (do not close the quotes in the same line as "
"the text, or leave a blank line between the last text and the "
"quotes)",
"GL03": "Double line break found; please use only one blank line to "
"separate sections or paragraphs, and do not leave blank lines "
"at the end of docstrings",
"GL04": "Private classes ({mentioned_private_classes}) should not be "
"mentioned in public docstrings",
"GL05": 'Tabs found at the start of line "{line_with_tabs}", please use '
"whitespace only",
"GL06": 'Found unknown section "{section}". Allowed sections are: '
"{allowed_sections}",
"GL07": "Sections are in the wrong order. Correct order is: " "{correct_sections}",
"GL08": "The object does not have a docstring",
"GL09": "Deprecation warning should precede extended summary",
"SS01": "No summary found (a short summary in a single line should be "
"present at the beginning of the docstring)",
"SS02": "Summary does not start with a capital letter",
"SS03": "Summary does not end with a period",
"SS04": "Summary contains heading whitespaces",
"SS05": "Summary must start with infinitive verb, not third person "
'(e.g. use "Generate" instead of "Generates")',
"SS06": "Summary should fit in a single line",
"ES01": "No extended summary found",
"PR01": "Parameters {missing_params} not documented",
"PR02": "Unknown parameters {unknown_params}",
"PR03": "Wrong parameters order. Actual: {actual_params}. "
"Documented: {documented_params}",
"PR04": 'Parameter "{param_name}" has no type',
"PR05": 'Parameter "{param_name}" type should not finish with "."',
"PR06": 'Parameter "{param_name}" type should use "{right_type}" instead '
'of "{wrong_type}"',
"PR07": 'Parameter "{param_name}" has no description',
"PR08": 'Parameter "{param_name}" description should start with a '
"capital letter",
"PR09": 'Parameter "{param_name}" description should finish with "."',
"PR10": 'Parameter "{param_name}" requires a space before the colon '
"separating the parameter name and type",
"RT01": "No Returns section found",
"RT02": "The first line of the Returns section should contain only the "
"type, unless multiple values are being returned",
"RT03": "Return value has no description",
"RT04": "Return value description should start with a capital letter",
"RT05": 'Return value description should finish with "."',
"YD01": "No Yields section found",
"SA01": "See Also section not found",
"SA02": "Missing period at end of description for See Also "
'"{reference_name}" reference',
"SA03": "Description should be capitalized for See Also "
'"{reference_name}" reference',
"SA04": 'Missing description for See Also "{reference_name}" reference',
"SA05": "{reference_name} in `See Also` section does not need `pandas` "
"prefix, use {right_reference} instead.",
"EX01": "No examples section found",
"EX02": "Examples do not pass tests:\n{doctest_log}",
"EX03": "flake8 error: {error_code} {error_message}{times_happening}",
"EX04": "Do not import {imported_library}, as it is imported "
"automatically for the examples (numpy as np, pandas as pd)",
}
def error(code, **kwargs):
"""
Return a tuple with the error code and the message with variables replaced.
This is syntactic sugar so instead of:
- `('EX02', ERROR_MSGS['EX02'].format(doctest_log=log))`
We can simply use:
- `error('EX02', doctest_log=log)`
Parameters
----------
code : str
Error code.
**kwargs
Values for the variables in the error messages
Returns
-------
code : str
Error code.
message : str
Error message with variables replaced.
"""
return (code, ERROR_MSGS[code].format(**kwargs))
def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
"""
current_module = "pandas"
previous_line = current_section = current_subsection = ""
position = None
for line in api_doc_fd:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set("-"):
current_section = previous_line
continue
if set(line) == set("~"):
current_subsection = previous_line
continue
if line.startswith(".. currentmodule::"):
current_module = line.replace(".. currentmodule::", "").strip()
continue
if line == ".. autosummary::":
position = "autosummary"
continue
if position == "autosummary":
if line == "":
position = "items"
continue
if position == "items":
if line == "":
position = None
continue
item = line.strip()
func = importlib.import_module(current_module)
for part in item.split("."):
func = getattr(func, part)
yield (
".".join([current_module, item]),
func,
current_section,
current_subsection,
)
previous_line = line
class Docstring:
def __init__(self, name):
self.name = name
obj = self._load_obj(name)
self.obj = obj
self.code_obj = self._to_original_callable(obj)
self.raw_doc = obj.__doc__ or ""
self.clean_doc = pydoc.getdoc(obj)
self.doc = NumpyDocString(self.clean_doc)
def __len__(self):
return len(self.raw_doc)
@staticmethod
def _load_obj(name):
"""
Import Python object from its name as string.
Parameters
----------
name : str
Object name to import (e.g. pandas.Series.str.upper)
Returns
-------
object
Python object that can be a class, method, function...
Examples
--------
>>> Docstring._load_obj('pandas.Series')
<class 'pandas.core.series.Series'>
"""
for maxsplit in range(1, name.count(".") + 1):
# TODO when py3 only replace by: module, *func_parts = ...
func_name_split = name.rsplit(".", maxsplit)
module = func_name_split[0]
func_parts = func_name_split[1:]
try:
obj = importlib.import_module(module)
except ImportError:
pass
else:
continue
if "obj" not in locals():
raise ImportError("No module can be imported " 'from "{}"'.format(name))
for part in func_parts:
obj = getattr(obj, part)
return obj
@staticmethod
def _to_original_callable(obj):
"""
Find the Python object that contains the source code of the object.
This is useful to find the place in the source code (file and line
number) where a docstring is defined. It does not currently work for
all cases, but it should help find some (properties...).
"""
while True:
if inspect.isfunction(obj) or inspect.isclass(obj):
f = inspect.getfile(obj)
if f.startswith("<") and f.endswith(">"):
return None
return obj
if inspect.ismethod(obj):
obj = obj.__func__
elif isinstance(obj, functools.partial):
obj = obj.func
elif isinstance(obj, property):
obj = obj.fget
else:
return None
@property
def type(self):
return type(self.obj).__name__
@property
def is_function_or_method(self):
# TODO(py27): remove ismethod
return inspect.isfunction(self.obj) or inspect.ismethod(self.obj)
@property
def source_file_name(self):
"""
File name where the object is implemented (e.g. pandas/core/frame.py).
"""
try:
fname = inspect.getsourcefile(self.code_obj)
except TypeError:
# In some cases the object is something complex like a cython
# object that can't be easily introspected. An it's better to
# return the source code file of the object as None, than crash
pass
else:
if fname:
fname = os.path.relpath(fname, BASE_PATH)
return fname
@property
def source_file_def_line(self):
"""
Number of line where the object is defined in its file.
"""
try:
return inspect.getsourcelines(self.code_obj)[-1]
except (OSError, TypeError):
# In some cases the object is something complex like a cython
# object that can't be easily introspected. An it's better to
# return the line number as None, than crash
pass
@property
def github_url(self):
url = "https://github.com/pandas-dev/pandas/blob/master/"
url += "{}#L{}".format(self.source_file_name, self.source_file_def_line)
return url
@property
def start_blank_lines(self):
i = None
if self.raw_doc:
for i, row in enumerate(self.raw_doc.split("\n")):
if row.strip():
break
return i
@property
def end_blank_lines(self):
i = None
if self.raw_doc:
for i, row in enumerate(reversed(self.raw_doc.split("\n"))):
if row.strip():
break
return i
@property
def double_blank_lines(self):
prev = True
for row in self.raw_doc.split("\n"):
if not prev and not row.strip():
return True
prev = row.strip()
return False
@property
def section_titles(self):
sections = []
self.doc._doc.reset()
while not self.doc._doc.eof():
content = self.doc._read_to_next_section()
if (
len(content) > 1
and len(content[0]) == len(content[1])
and set(content[1]) == {"-"}
):
sections.append(content[0])
return sections
@property
def summary(self):
return " ".join(self.doc["Summary"])
@property
def num_summary_lines(self):
return len(self.doc["Summary"])
@property
def extended_summary(self):
if not self.doc["Extended Summary"] and len(self.doc["Summary"]) > 1:
return " ".join(self.doc["Summary"])
return " ".join(self.doc["Extended Summary"])
@property
def needs_summary(self):
return not (bool(self.summary) and bool(self.extended_summary))
@property
def doc_parameters(self):
return collections.OrderedDict(
(name, (type_, "".join(desc)))
for name, type_, desc in self.doc["Parameters"]
)
@property
def signature_parameters(self):
if inspect.isclass(self.obj):
if hasattr(self.obj, "_accessors") and (
self.name.split(".")[-1] in self.obj._accessors
):
# accessor classes have a signature but don't want to show this
return tuple()
try:
sig = inspect.getfullargspec(self.obj)
except (TypeError, ValueError):
# Some objects, mainly in C extensions do not support introspection
# of the signature
return tuple()
params = sig.args
if sig.varargs:
params.append("*" + sig.varargs)
if sig.varkw:
params.append("**" + sig.varkw)
params = tuple(params)
if params and params[0] in ("self", "cls"):
return params[1:]
return params
@property
def parameter_mismatches(self):
errs = []
signature_params = self.signature_parameters
doc_params = tuple(self.doc_parameters)
missing = set(signature_params) - set(doc_params)
if missing:
errs.append(error("PR01", missing_params=pprint_thing(missing)))
extra = set(doc_params) - set(signature_params)
if extra:
errs.append(error("PR02", unknown_params=pprint_thing(extra)))
if (
not missing
and not extra
and signature_params != doc_params
and not (not signature_params and not doc_params)
):
errs.append(
error(
"PR03", actual_params=signature_params, documented_params=doc_params
)
)
return errs
@property
def correct_parameters(self):
return not bool(self.parameter_mismatches)
def parameter_type(self, param):
return self.doc_parameters[param][0]
def parameter_desc(self, param):
desc = self.doc_parameters[param][1]
# Find and strip out any sphinx directives
for directive in DIRECTIVES:
full_directive = ".. {}".format(directive)
if full_directive in desc:
# Only retain any description before the directive
desc = desc[: desc.index(full_directive)]
return desc
@property
def see_also(self):
result = collections.OrderedDict()
for funcs, desc in self.doc["See Also"]:
for func, _ in funcs:
result[func] = "".join(desc)
return result
@property
def examples(self):
return self.doc["Examples"]
@property
def returns(self):
return self.doc["Returns"]
@property
def yields(self):
return self.doc["Yields"]
@property
def method_source(self):
try:
source = inspect.getsource(self.obj)
except TypeError:
return ""
return textwrap.dedent(source)
@property
def method_returns_something(self):
"""
Check if the docstrings method can return something.
Bare returns, returns valued None and returns from nested functions are
disconsidered.
Returns
-------
bool
Whether the docstrings method can return something.
"""
def get_returns_not_on_nested_functions(node):
returns = [node] if isinstance(node, ast.Return) else []
for child in ast.iter_child_nodes(node):
# Ignore nested functions and its subtrees.
if not isinstance(child, ast.FunctionDef):
child_returns = get_returns_not_on_nested_functions(child)
returns.extend(child_returns)
return returns
tree = ast.parse(self.method_source).body
if tree:
returns = get_returns_not_on_nested_functions(tree[0])
return_values = [r.value for r in returns]
# Replace NameConstant nodes valued None for None.
for i, v in enumerate(return_values):
if isinstance(v, ast.NameConstant) and v.value is None:
return_values[i] = None
return any(return_values)
else:
return False
@property
def first_line_ends_in_dot(self):
if self.doc:
return self.doc.split("\n")[0][-1] == "."
@property
def deprecated(self):
return ".. deprecated:: " in (self.summary + self.extended_summary)
@property
def mentioned_private_classes(self):
return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc]
@property
def examples_errors(self):
flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL
finder = doctest.DocTestFinder()
runner = doctest.DocTestRunner(optionflags=flags)
context = {"np": numpy, "pd": pandas}
error_msgs = ""
for test in finder.find(self.raw_doc, self.name, globs=context):
f = StringIO()
runner.run(test, out=f.write)
error_msgs += f.getvalue()
return error_msgs
@property
def examples_source_code(self):
lines = doctest.DocTestParser().get_examples(self.raw_doc)
return [line.source for line in lines]
def validate_pep8(self):
if not self.examples:
return
# F401 is needed to not generate flake8 errors in examples
# that do not user numpy or pandas
content = "".join(
(
"import numpy as np # noqa: F401\n",
"import pandas as pd # noqa: F401\n",
*self.examples_source_code,
)
)
application = flake8.main.application.Application()
application.initialize(["--quiet"])
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as file:
file.write(content)
file.flush()
application.run_checks([file.name])
# We need this to avoid flake8 printing the names of the files to
# the standard output
application.formatter.write = lambda line, source: None
application.report()
yield from application.guide.stats.statistics_for("")
def get_validation_data(doc):
"""
Validate the docstring.
Parameters
----------
doc : Docstring
A Docstring object with the given function name.
Returns
-------
tuple
errors : list of tuple
Errors occurred during validation.
warnings : list of tuple
Warnings occurred during validation.
examples_errs : str
Examples usage displayed along the error, otherwise empty string.
Notes
-----
The errors codes are defined as:
- First two characters: Section where the error happens:
* GL: Global (no section, like section ordering errors)
* SS: Short summary
* ES: Extended summary
* PR: Parameters
* RT: Returns
* YD: Yields
* RS: Raises
* WN: Warns
* SA: See Also
* NT: Notes
* RF: References
* EX: Examples
- Last two characters: Numeric error code inside the section
For example, EX02 is the second codified error in the Examples section
(which in this case is assigned to examples that do not pass the tests).
The error codes, their corresponding error messages, and the details on how
they are validated, are not documented more than in the source code of this
function.
"""
errs = []
wrns = []
if not doc.raw_doc:
errs.append(error("GL08"))
return errs, wrns, ""
if doc.start_blank_lines != 1:
errs.append(error("GL01"))
if doc.end_blank_lines != 1:
errs.append(error("GL02"))
if doc.double_blank_lines:
errs.append(error("GL03"))
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
errs.append(error("GL04", mentioned_private_classes=", ".join(mentioned_errs)))
for line in doc.raw_doc.splitlines():
if re.match("^ *\t", line):
errs.append(error("GL05", line_with_tabs=line.lstrip()))
unexpected_sections = [
section for section in doc.section_titles if section not in ALLOWED_SECTIONS
]
for section in unexpected_sections:
errs.append(
error("GL06", section=section, allowed_sections=", ".join(ALLOWED_SECTIONS))
)
correct_order = [
section for section in ALLOWED_SECTIONS if section in doc.section_titles
]
if correct_order != doc.section_titles:
errs.append(error("GL07", correct_sections=", ".join(correct_order)))
if doc.deprecated and not doc.extended_summary.startswith(".. deprecated:: "):
errs.append(error("GL09"))
if not doc.summary:
errs.append(error("SS01"))
else:
if not doc.summary[0].isupper():
errs.append(error("SS02"))
if doc.summary[-1] != ".":
errs.append(error("SS03"))
if doc.summary != doc.summary.lstrip():
errs.append(error("SS04"))
elif doc.is_function_or_method and doc.summary.split(" ")[0][-1] == "s":
errs.append(error("SS05"))
if doc.num_summary_lines > 1:
errs.append(error("SS06"))
if not doc.extended_summary:
wrns.append(("ES01", "No extended summary found"))
# PR01: Parameters not documented
# PR02: Unknown parameters
# PR03: Wrong parameters order
errs += doc.parameter_mismatches
for param in doc.doc_parameters:
if not param.startswith("*"): # Check can ignore var / kwargs
if not doc.parameter_type(param):
if ":" in param:
errs.append(error("PR10", param_name=param.split(":")[0]))
else:
errs.append(error("PR04", param_name=param))
else:
if doc.parameter_type(param)[-1] == ".":
errs.append(error("PR05", param_name=param))
common_type_errors = [
("integer", "int"),
("boolean", "bool"),
("string", "str"),
]
for wrong_type, right_type in common_type_errors:
if wrong_type in doc.parameter_type(param):
errs.append(
error(
"PR06",
param_name=param,
right_type=right_type,
wrong_type=wrong_type,
)
)
if not doc.parameter_desc(param):
errs.append(error("PR07", param_name=param))
else:
if not doc.parameter_desc(param)[0].isupper():
errs.append(error("PR08", param_name=param))
if doc.parameter_desc(param)[-1] != ".":
errs.append(error("PR09", param_name=param))
if doc.is_function_or_method:
if not doc.returns:
if doc.method_returns_something:
errs.append(error("RT01"))
else:
if len(doc.returns) == 1 and doc.returns[0].name:
errs.append(error("RT02"))
for name_or_type, type_, desc in doc.returns:
if not desc:
errs.append(error("RT03"))
else:
desc = " ".join(desc)
if not desc[0].isupper():
errs.append(error("RT04"))
if not desc.endswith("."):
errs.append(error("RT05"))
if not doc.yields and "yield" in doc.method_source:
errs.append(error("YD01"))
if not doc.see_also:
wrns.append(error("SA01"))
else:
for rel_name, rel_desc in doc.see_also.items():
if rel_desc:
if not rel_desc.endswith("."):
errs.append(error("SA02", reference_name=rel_name))
if not rel_desc[0].isupper():
errs.append(error("SA03", reference_name=rel_name))
else:
errs.append(error("SA04", reference_name=rel_name))
if rel_name.startswith("pandas."):
errs.append(
error(
"SA05",
reference_name=rel_name,
right_reference=rel_name[len("pandas.") :],
)
)
examples_errs = ""
if not doc.examples:
wrns.append(error("EX01"))
else:
examples_errs = doc.examples_errors
if examples_errs:
errs.append(error("EX02", doctest_log=examples_errs))
for err in doc.validate_pep8():
errs.append(
error(
"EX03",
error_code=err.error_code,
error_message=err.message,
times_happening=" ({} times)".format(err.count)
if err.count > 1
else "",
)
)
examples_source_code = "".join(doc.examples_source_code)
for wrong_import in ("numpy", "pandas"):
if "import {}".format(wrong_import) in examples_source_code:
errs.append(error("EX04", imported_library=wrong_import))
return errs, wrns, examples_errs
def validate_one(func_name):
"""
Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated (e.g. pandas.read_csv).
Returns
-------
dict
A dictionary containing all the information obtained from validating
the docstring.
"""
doc = Docstring(func_name)
errs, wrns, examples_errs = get_validation_data(doc)
return {
"type": doc.type,
"docstring": doc.clean_doc,
"deprecated": doc.deprecated,
"file": doc.source_file_name,
"file_line": doc.source_file_def_line,
"github_link": doc.github_url,
"errors": errs,
"warnings": wrns,
"examples_errors": examples_errs,
}
def validate_all(prefix, ignore_deprecated=False):
"""
Execute the validation of all docstrings, and return a dict with the
results.
Parameters
----------
prefix : str or None
If provided, only the docstrings that start with this pattern will be
validated. If None, all docstrings will be validated.
ignore_deprecated: bool, default False
If True, deprecated objects are ignored when validating docstrings.
Returns
-------
dict
A dictionary with an item for every function/method... containing
all the validation information.
"""
result = {}
seen = {}
# functions from the API docs
api_doc_fnames = os.path.join(BASE_PATH, "doc", "source", "reference", "*.rst")
api_items = []
for api_doc_fname in glob.glob(api_doc_fnames):
with open(api_doc_fname) as f:
api_items += list(get_api_items(f))
for func_name, func_obj, section, subsection in api_items:
if prefix and not func_name.startswith(prefix):
continue
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info["deprecated"]:
continue
result[func_name] = doc_info
shared_code_key = doc_info["file"], doc_info["file_line"]
shared_code = seen.get(shared_code_key, "")
result[func_name].update(
{
"in_api": True,
"section": section,
"subsection": subsection,
"shared_code_with": shared_code,
}
)
seen[shared_code_key] = func_name
# functions from introspecting Series and DataFrame
api_item_names = set(list(zip(*api_items))[0])
for class_ in (pandas.Series, pandas.DataFrame):
for member in inspect.getmembers(class_):
func_name = "pandas.{}.{}".format(class_.__name__, member[0])
if not member[0].startswith("_") and func_name not in api_item_names:
if prefix and not func_name.startswith(prefix):
continue
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info["deprecated"]:
continue
result[func_name] = doc_info
result[func_name]["in_api"] = False
return result
def main(func_name, prefix, errors, output_format, ignore_deprecated):
def header(title, width=80, char="#"):
full_line = char * width
side_len = (width - len(title) - 2) // 2
adj = "" if len(title) % 2 == 0 else " "
title_line = "{side} {title}{adj} {side}".format(
side=char * side_len, title=title, adj=adj
)
return "\n{full_line}\n{title_line}\n{full_line}\n\n".format(
full_line=full_line, title_line=title_line
)
exit_status = 0
if func_name is None:
result = validate_all(prefix, ignore_deprecated)
if output_format == "json":
output = json.dumps(result)
else:
if output_format == "default":
output_format = "{text}\n"
elif output_format == "azure":
output_format = (
"##vso[task.logissue type=error;"
"sourcepath={path};"
"linenumber={row};"
"code={code};"
"]{text}\n"
)
else:
raise ValueError('Unknown output_format "{}"'.format(output_format))
output = ""
for name, res in result.items():
for err_code, err_desc in res["errors"]:
# The script would be faster if instead of filtering the
# errors after validating them, it didn't validate them
# initially. But that would complicate the code too much
if errors and err_code not in errors:
continue
exit_status += 1
output += output_format.format(
name=name,
path=res["file"],
row=res["file_line"],
code=err_code,
text="{}: {}".format(name, err_desc),
)
sys.stdout.write(output)
else:
result = validate_one(func_name)
sys.stderr.write(header("Docstring ({})".format(func_name)))
sys.stderr.write("{}\n".format(result["docstring"]))
sys.stderr.write(header("Validation"))
if result["errors"]:
sys.stderr.write("{} Errors found:\n".format(len(result["errors"])))
for err_code, err_desc in result["errors"]:
# Failing examples are printed at the end
if err_code == "EX02":
sys.stderr.write("\tExamples do not pass tests\n")
continue
sys.stderr.write("\t{}\n".format(err_desc))
if result["warnings"]:
sys.stderr.write("{} Warnings found:\n".format(len(result["warnings"])))
for wrn_code, wrn_desc in result["warnings"]:
sys.stderr.write("\t{}\n".format(wrn_desc))
if not result["errors"]:
sys.stderr.write('Docstring for "{}" correct. :)\n'.format(func_name))
if result["examples_errors"]:
sys.stderr.write(header("Doctests"))
sys.stderr.write(result["examples_errors"])
return exit_status
if __name__ == "__main__":
format_opts = "default", "json", "azure"
func_help = (
"function or method to validate (e.g. pandas.DataFrame.head) "
"if not provided, all docstrings are validated and returned "
"as JSON"
)
argparser = argparse.ArgumentParser(description="validate pandas docstrings")
argparser.add_argument("function", nargs="?", default=None, help=func_help)
argparser.add_argument(
"--format",
default="default",
choices=format_opts,
help="format of the output when validating "
"multiple docstrings (ignored when validating one)."
"It can be {}".format(str(format_opts)[1:-1]),
)
argparser.add_argument(
"--prefix",
default=None,
help="pattern for the "
"docstring names, in order to decide which ones "
'will be validated. A prefix "pandas.Series.str.'
"will make the script validate all the docstrings"
"of methods starting by this pattern. It is "
"ignored if parameter function is provided",
)
argparser.add_argument(
"--errors",
default=None,
help="comma separated "
"list of error codes to validate. By default it "
"validates all errors (ignored when validating "
"a single docstring)",
)
argparser.add_argument(
"--ignore_deprecated",
default=False,
action="store_true",
help="if this flag is set, "
"deprecated objects are ignored when validating "
"all docstrings",
)
args = argparser.parse_args()
sys.exit(
main(
args.function,
args.prefix,
args.errors.split(",") if args.errors else None,
args.format,
args.ignore_deprecated,
)
)
|
|
users_interests = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
from collections import Counter
popular_interests = Counter(interest
for user_interests in users_interests
for interest in user_interests)
from typing import Dict, List, Tuple
def most_popular_new_interests(
user_interests: List[str],
max_results: int = 5) -> List[Tuple[str, int]]:
suggestions = [(interest, frequency)
for interest, frequency in popular_interests.most_common()
if interest not in user_interests]
return suggestions[:max_results]
unique_interests = sorted({interest
for user_interests in users_interests
for interest in user_interests})
assert unique_interests[:6] == [
'Big Data',
'C++',
'Cassandra',
'HBase',
'Hadoop',
'Haskell',
# ...
]
def make_user_interest_vector(user_interests: List[str]) -> List[int]:
"""
Given a list ofinterests, produce a vector whose ith element is 1
if unique_interests[i] is in the list, 0 otherwise
"""
return [1 if interest in user_interests else 0
for interest in unique_interests]
user_interest_vectors = [make_user_interest_vector(user_interests)
for user_interests in users_interests]
from scratch.nlp import cosine_similarity
user_similarities = [[cosine_similarity(interest_vector_i, interest_vector_j)
for interest_vector_j in user_interest_vectors]
for interest_vector_i in user_interest_vectors]
# Users 0 and 9 share interests in Hadoop, Java, and Big Data
assert 0.56 < user_similarities[0][9] < 0.58, "several shared interests"
# Users 0 and 8 share only one interest: Big Data
assert 0.18 < user_similarities[0][8] < 0.20, "only one shared interest"
def most_similar_users_to(user_id: int) -> List[Tuple[int, float]]:
pairs = [(other_user_id, similarity) # Find other
for other_user_id, similarity in # users with
enumerate(user_similarities[user_id]) # nonzero
if user_id != other_user_id and similarity > 0] # similarity.
return sorted(pairs, # Sort them
key=lambda pair: pair[-1], # most similar
reverse=True) # first.
most_similar_to_zero = most_similar_users_to(0)
user, score = most_similar_to_zero[0]
assert user == 9
assert 0.56 < score < 0.57
user, score = most_similar_to_zero[1]
assert user == 1
assert 0.33 < score < 0.34
from collections import defaultdict
def user_based_suggestions(user_id: int,
include_current_interests: bool = False):
# Sum up the similarities.
suggestions: Dict[str, float] = defaultdict(float)
for other_user_id, similarity in most_similar_users_to(user_id):
for interest in users_interests[other_user_id]:
suggestions[interest] += similarity
# Convert them to a sorted list.
suggestions = sorted(suggestions.items(),
key=lambda pair: pair[-1], # weight
reverse=True)
# And (maybe) exclude already-interests
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
ubs0 = user_based_suggestions(0)
interest, score = ubs0[0]
assert interest == 'MapReduce'
assert 0.56 < score < 0.57
interest, score = ubs0[1]
assert interest == 'MongoDB'
assert 0.50 < score < 0.51
interest_user_matrix = [[user_interest_vector[j]
for user_interest_vector in user_interest_vectors]
for j, _ in enumerate(unique_interests)]
[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
interest_similarities = [[cosine_similarity(user_vector_i, user_vector_j)
for user_vector_j in interest_user_matrix]
for user_vector_i in interest_user_matrix]
def most_similar_interests_to(interest_id: int):
similarities = interest_similarities[interest_id]
pairs = [(unique_interests[other_interest_id], similarity)
for other_interest_id, similarity in enumerate(similarities)
if interest_id != other_interest_id and similarity > 0]
return sorted(pairs,
key=lambda pair: pair[-1],
reverse=True)
msit0 = most_similar_interests_to(0)
assert msit0[0][0] == 'Hadoop'
assert 0.815 < msit0[0][1] < 0.817
assert msit0[1][0] == 'Java'
assert 0.666 < msit0[1][1] < 0.667
def item_based_suggestions(user_id: int,
include_current_interests: bool = False):
# Add up the similar interests
suggestions = defaultdict(float)
user_interest_vector = user_interest_vectors[user_id]
for interest_id, is_interested in enumerate(user_interest_vector):
if is_interested == 1:
similar_interests = most_similar_interests_to(interest_id)
for interest, similarity in similar_interests:
suggestions[interest] += similarity
# Sort them by weight
suggestions = sorted(suggestions.items(),
key=lambda pair: pair[-1],
reverse=True)
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
[('MapReduce', 1.861807319565799),
('Postgres', 1.3164965809277263),
('MongoDB', 1.3164965809277263),
('NoSQL', 1.2844570503761732),
('programming languages', 0.5773502691896258),
('MySQL', 0.5773502691896258),
('Haskell', 0.5773502691896258),
('databases', 0.5773502691896258),
('neural networks', 0.4082482904638631),
('deep learning', 0.4082482904638631),
('C++', 0.4082482904638631),
('artificial intelligence', 0.4082482904638631),
('Python', 0.2886751345948129),
('R', 0.2886751345948129)]
ibs0 = item_based_suggestions(0)
assert ibs0[0][0] == 'MapReduce'
assert 1.86 < ibs0[0][1] < 1.87
assert ibs0[1][0] in ('Postgres', 'MongoDB') # A tie
assert 1.31 < ibs0[1][1] < 1.32
def main():
# Replace this with the locations of your files
# This points to the current directory, modify if your files are elsewhere.
MOVIES = "u.item" # pipe-delimited: movie_id|title|...
RATINGS = "u.data" # tab-delimited: user_id, movie_id, rating, timestamp
from typing import NamedTuple
class Rating(NamedTuple):
user_id: str
movie_id: str
rating: float
import csv
# We specify this encoding to avoid a UnicodeDecodeError.
# see: https://stackoverflow.com/a/53136168/1076346
with open(MOVIES, encoding="iso-8859-1") as f:
reader = csv.reader(f, delimiter="|")
movies = {movie_id: title for movie_id, title, *_ in reader}
# Create a list of [Rating]
with open(RATINGS, encoding="iso-8859-1") as f:
reader = csv.reader(f, delimiter="\t")
ratings = [Rating(user_id, movie_id, float(rating))
for user_id, movie_id, rating, _ in reader]
# 1682 movies rated by 943 users
assert len(movies) == 1682
assert len(list({rating.user_id for rating in ratings})) == 943
import re
# Data structure for accumulating ratings by movie_id
star_wars_ratings = {movie_id: []
for movie_id, title in movies.items()
if re.search("Star Wars|Empire Strikes|Jedi", title)}
# Iterate over ratings, accumulating the Star Wars ones
for rating in ratings:
if rating.movie_id in star_wars_ratings:
star_wars_ratings[rating.movie_id].append(rating.rating)
# Compute the average rating for each movie
avg_ratings = [(sum(title_ratings) / len(title_ratings), movie_id)
for movie_id, title_ratings in star_wars_ratings.items()]
# And then print them in order
for avg_rating, movie_id in sorted(avg_ratings, reverse=True):
print(f"{avg_rating:.2f} {movies[movie_id]}")
import random
random.seed(0)
random.shuffle(ratings)
split1 = int(len(ratings) * 0.7)
split2 = int(len(ratings) * 0.85)
train = ratings[:split1] # 70% of the data
validation = ratings[split1:split2] # 15% of the data
test = ratings[split2:] # 15% of the data
avg_rating = sum(rating.rating for rating in train) / len(train)
baseline_error = sum((rating.rating - avg_rating) ** 2
for rating in test) / len(test)
# This is what we hope to do better than
assert 1.26 < baseline_error < 1.27
# Embedding vectors for matrix factorization model
from scratch.deep_learning import random_tensor
EMBEDDING_DIM = 2
# Find unique ids
user_ids = {rating.user_id for rating in ratings}
movie_ids = {rating.movie_id for rating in ratings}
# Then create a random vector per id
user_vectors = {user_id: random_tensor(EMBEDDING_DIM)
for user_id in user_ids}
movie_vectors = {movie_id: random_tensor(EMBEDDING_DIM)
for movie_id in movie_ids}
# Training loop for matrix factorization model
from typing import List
import tqdm
from scratch.linear_algebra import dot
def loop(dataset: List[Rating],
learning_rate: float = None) -> None:
with tqdm.tqdm(dataset) as t:
loss = 0.0
for i, rating in enumerate(t):
movie_vector = movie_vectors[rating.movie_id]
user_vector = user_vectors[rating.user_id]
predicted = dot(user_vector, movie_vector)
error = predicted - rating.rating
loss += error ** 2
if learning_rate is not None:
# predicted = m_0 * u_0 + ... + m_k * u_k
# So each u_j enters output with coefficent m_j
# and each m_j enters output with coefficient u_j
user_gradient = [error * m_j for m_j in movie_vector]
movie_gradient = [error * u_j for u_j in user_vector]
# Take gradient steps
for j in range(EMBEDDING_DIM):
user_vector[j] -= learning_rate * user_gradient[j]
movie_vector[j] -= learning_rate * movie_gradient[j]
t.set_description(f"avg loss: {loss / (i + 1)}")
learning_rate = 0.05
for epoch in range(20):
learning_rate *= 0.9
print(epoch, learning_rate)
loop(train, learning_rate=learning_rate)
loop(validation)
loop(test)
from scratch.working_with_data import pca, transform
original_vectors = [vector for vector in movie_vectors.values()]
components = pca(original_vectors, 2)
ratings_by_movie = defaultdict(list)
for rating in ratings:
ratings_by_movie[rating.movie_id].append(rating.rating)
vectors = [
(movie_id,
sum(ratings_by_movie[movie_id]) / len(ratings_by_movie[movie_id]),
movies[movie_id],
vector)
for movie_id, vector in zip(movie_vectors.keys(),
transform(original_vectors, components))
]
# Print top 25 and bottom 25 by first principal component
print(sorted(vectors, key=lambda v: v[-1][0])[:25])
print(sorted(vectors, key=lambda v: v[-1][0])[-25:])
if __name__ == "__main__": main()
|
|
"""Test Hue bridge."""
import asyncio
from unittest.mock import AsyncMock, Mock, patch
import pytest
from homeassistant import config_entries
from homeassistant.components import hue
from homeassistant.components.hue import bridge, errors
from homeassistant.components.hue.const import (
CONF_ALLOW_HUE_GROUPS,
CONF_ALLOW_UNREACHABLE,
)
from homeassistant.exceptions import ConfigEntryNotReady
ORIG_SUBSCRIBE_EVENTS = bridge.HueBridge._subscribe_events
@pytest.fixture(autouse=True)
def mock_subscribe_events():
"""Mock subscribe events method."""
with patch(
"homeassistant.components.hue.bridge.HueBridge._subscribe_events"
) as mock:
yield mock
async def test_bridge_setup(hass, mock_subscribe_events):
"""Test a successful setup."""
entry = Mock()
api = Mock(initialize=AsyncMock())
entry.data = {"host": "1.2.3.4", "username": "mock-username"}
entry.options = {CONF_ALLOW_HUE_GROUPS: False, CONF_ALLOW_UNREACHABLE: False}
hue_bridge = bridge.HueBridge(hass, entry)
with patch("aiohue.Bridge", return_value=api), patch.object(
hass.config_entries, "async_forward_entry_setup"
) as mock_forward:
assert await hue_bridge.async_setup() is True
assert hue_bridge.api is api
assert len(mock_forward.mock_calls) == 3
forward_entries = {c[1][1] for c in mock_forward.mock_calls}
assert forward_entries == {"light", "binary_sensor", "sensor"}
assert len(mock_subscribe_events.mock_calls) == 1
async def test_bridge_setup_invalid_username(hass):
"""Test we start config flow if username is no longer whitelisted."""
entry = Mock()
entry.data = {"host": "1.2.3.4", "username": "mock-username"}
entry.options = {CONF_ALLOW_HUE_GROUPS: False, CONF_ALLOW_UNREACHABLE: False}
hue_bridge = bridge.HueBridge(hass, entry)
with patch.object(
bridge, "authenticate_bridge", side_effect=errors.AuthenticationRequired
), patch.object(hass.config_entries.flow, "async_init") as mock_init:
assert await hue_bridge.async_setup() is False
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][2]["data"] == {"host": "1.2.3.4"}
async def test_bridge_setup_timeout(hass):
"""Test we retry to connect if we cannot connect."""
entry = Mock()
entry.data = {"host": "1.2.3.4", "username": "mock-username"}
entry.options = {CONF_ALLOW_HUE_GROUPS: False, CONF_ALLOW_UNREACHABLE: False}
hue_bridge = bridge.HueBridge(hass, entry)
with patch.object(
bridge, "authenticate_bridge", side_effect=errors.CannotConnect
), pytest.raises(ConfigEntryNotReady):
await hue_bridge.async_setup()
async def test_reset_if_entry_had_wrong_auth(hass):
"""Test calling reset when the entry contained wrong auth."""
entry = Mock()
entry.data = {"host": "1.2.3.4", "username": "mock-username"}
entry.options = {CONF_ALLOW_HUE_GROUPS: False, CONF_ALLOW_UNREACHABLE: False}
hue_bridge = bridge.HueBridge(hass, entry)
with patch.object(
bridge, "authenticate_bridge", side_effect=errors.AuthenticationRequired
), patch.object(bridge, "create_config_flow") as mock_create:
assert await hue_bridge.async_setup() is False
assert len(mock_create.mock_calls) == 1
assert await hue_bridge.async_reset()
async def test_reset_unloads_entry_if_setup(hass, mock_subscribe_events):
"""Test calling reset while the entry has been setup."""
entry = Mock()
entry.data = {"host": "1.2.3.4", "username": "mock-username"}
entry.options = {CONF_ALLOW_HUE_GROUPS: False, CONF_ALLOW_UNREACHABLE: False}
hue_bridge = bridge.HueBridge(hass, entry)
with patch.object(bridge, "authenticate_bridge"), patch(
"aiohue.Bridge"
), patch.object(hass.config_entries, "async_forward_entry_setup") as mock_forward:
assert await hue_bridge.async_setup() is True
await asyncio.sleep(0)
assert len(hass.services.async_services()) == 0
assert len(mock_forward.mock_calls) == 3
assert len(mock_subscribe_events.mock_calls) == 1
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as mock_forward:
assert await hue_bridge.async_reset()
assert len(mock_forward.mock_calls) == 3
assert len(hass.services.async_services()) == 0
async def test_handle_unauthorized(hass):
"""Test handling an unauthorized error on update."""
entry = Mock(async_setup=AsyncMock())
entry.data = {"host": "1.2.3.4", "username": "mock-username"}
entry.options = {CONF_ALLOW_HUE_GROUPS: False, CONF_ALLOW_UNREACHABLE: False}
hue_bridge = bridge.HueBridge(hass, entry)
with patch.object(bridge, "authenticate_bridge"), patch("aiohue.Bridge"):
assert await hue_bridge.async_setup() is True
assert hue_bridge.authorized is True
with patch.object(bridge, "create_config_flow") as mock_create:
await hue_bridge.handle_unauthorized_error()
assert hue_bridge.authorized is False
assert len(mock_create.mock_calls) == 1
assert mock_create.mock_calls[0][1][1] == "1.2.3.4"
GROUP_RESPONSE = {
"group_1": {
"name": "Group 1",
"lights": ["1", "2"],
"type": "LightGroup",
"action": {
"on": True,
"bri": 254,
"hue": 10000,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
}
}
SCENE_RESPONSE = {
"scene_1": {
"name": "Cozy dinner",
"lights": ["1", "2"],
"owner": "ffffffffe0341b1b376a2389376a2389",
"recycle": True,
"locked": False,
"appdata": {"version": 1, "data": "myAppData"},
"picture": "",
"lastupdated": "2015-12-03T10:09:22",
"version": 2,
}
}
async def test_hue_activate_scene(hass, mock_api):
"""Test successful hue_activate_scene."""
config_entry = config_entries.ConfigEntry(
1,
hue.DOMAIN,
"Mock Title",
{"host": "mock-host", "username": "mock-username"},
"test",
options={CONF_ALLOW_HUE_GROUPS: True, CONF_ALLOW_UNREACHABLE: False},
)
hue_bridge = bridge.HueBridge(hass, config_entry)
mock_api.mock_group_responses.append(GROUP_RESPONSE)
mock_api.mock_scene_responses.append(SCENE_RESPONSE)
with patch("aiohue.Bridge", return_value=mock_api), patch.object(
hass.config_entries, "async_forward_entry_setup"
):
assert await hue_bridge.async_setup() is True
assert hue_bridge.api is mock_api
call = Mock()
call.data = {"group_name": "Group 1", "scene_name": "Cozy dinner"}
with patch("aiohue.Bridge", return_value=mock_api):
assert await hue_bridge.hue_activate_scene(call.data) is None
assert len(mock_api.mock_requests) == 3
assert mock_api.mock_requests[2]["json"]["scene"] == "scene_1"
assert "transitiontime" not in mock_api.mock_requests[2]["json"]
assert mock_api.mock_requests[2]["path"] == "groups/group_1/action"
async def test_hue_activate_scene_transition(hass, mock_api):
"""Test successful hue_activate_scene with transition."""
config_entry = config_entries.ConfigEntry(
1,
hue.DOMAIN,
"Mock Title",
{"host": "mock-host", "username": "mock-username"},
"test",
options={CONF_ALLOW_HUE_GROUPS: True, CONF_ALLOW_UNREACHABLE: False},
)
hue_bridge = bridge.HueBridge(hass, config_entry)
mock_api.mock_group_responses.append(GROUP_RESPONSE)
mock_api.mock_scene_responses.append(SCENE_RESPONSE)
with patch("aiohue.Bridge", return_value=mock_api), patch.object(
hass.config_entries, "async_forward_entry_setup"
):
assert await hue_bridge.async_setup() is True
assert hue_bridge.api is mock_api
call = Mock()
call.data = {"group_name": "Group 1", "scene_name": "Cozy dinner", "transition": 30}
with patch("aiohue.Bridge", return_value=mock_api):
assert await hue_bridge.hue_activate_scene(call.data) is None
assert len(mock_api.mock_requests) == 3
assert mock_api.mock_requests[2]["json"]["scene"] == "scene_1"
assert mock_api.mock_requests[2]["json"]["transitiontime"] == 30
assert mock_api.mock_requests[2]["path"] == "groups/group_1/action"
async def test_hue_activate_scene_group_not_found(hass, mock_api):
"""Test failed hue_activate_scene due to missing group."""
config_entry = config_entries.ConfigEntry(
1,
hue.DOMAIN,
"Mock Title",
{"host": "mock-host", "username": "mock-username"},
"test",
options={CONF_ALLOW_HUE_GROUPS: True, CONF_ALLOW_UNREACHABLE: False},
)
hue_bridge = bridge.HueBridge(hass, config_entry)
mock_api.mock_group_responses.append({})
mock_api.mock_scene_responses.append(SCENE_RESPONSE)
with patch("aiohue.Bridge", return_value=mock_api), patch.object(
hass.config_entries, "async_forward_entry_setup"
):
assert await hue_bridge.async_setup() is True
assert hue_bridge.api is mock_api
call = Mock()
call.data = {"group_name": "Group 1", "scene_name": "Cozy dinner"}
with patch("aiohue.Bridge", return_value=mock_api):
assert await hue_bridge.hue_activate_scene(call.data) is False
async def test_hue_activate_scene_scene_not_found(hass, mock_api):
"""Test failed hue_activate_scene due to missing scene."""
config_entry = config_entries.ConfigEntry(
1,
hue.DOMAIN,
"Mock Title",
{"host": "mock-host", "username": "mock-username"},
"test",
options={CONF_ALLOW_HUE_GROUPS: True, CONF_ALLOW_UNREACHABLE: False},
)
hue_bridge = bridge.HueBridge(hass, config_entry)
mock_api.mock_group_responses.append(GROUP_RESPONSE)
mock_api.mock_scene_responses.append({})
with patch("aiohue.Bridge", return_value=mock_api), patch.object(
hass.config_entries, "async_forward_entry_setup"
):
assert await hue_bridge.async_setup() is True
assert hue_bridge.api is mock_api
call = Mock()
call.data = {"group_name": "Group 1", "scene_name": "Cozy dinner"}
with patch("aiohue.Bridge", return_value=mock_api):
assert await hue_bridge.hue_activate_scene(call.data) is False
async def test_event_updates(hass, caplog):
"""Test calling reset while the entry has been setup."""
events = asyncio.Queue()
async def iterate_queue():
while True:
event = await events.get()
if event is None:
return
yield event
async def wait_empty_queue():
count = 0
while not events.empty() and count < 50:
await asyncio.sleep(0)
count += 1
hue_bridge = bridge.HueBridge(None, None)
hue_bridge.api = Mock(listen_events=iterate_queue)
subscription_task = asyncio.create_task(ORIG_SUBSCRIBE_EVENTS(hue_bridge))
calls = []
def obj_updated():
calls.append(True)
unsub = hue_bridge.listen_updates("lights", "2", obj_updated)
events.put_nowait(Mock(ITEM_TYPE="lights", id="1"))
await wait_empty_queue()
assert len(calls) == 0
events.put_nowait(Mock(ITEM_TYPE="lights", id="2"))
await wait_empty_queue()
assert len(calls) == 1
unsub()
events.put_nowait(Mock(ITEM_TYPE="lights", id="2"))
await wait_empty_queue()
assert len(calls) == 1
# Test we can override update listener.
def obj_updated_false():
calls.append(False)
unsub = hue_bridge.listen_updates("lights", "2", obj_updated)
unsub_false = hue_bridge.listen_updates("lights", "2", obj_updated_false)
events.put_nowait(Mock(ITEM_TYPE="lights", id="2"))
await wait_empty_queue()
assert len(calls) == 3
assert calls[-2] is True
assert calls[-1] is False
# Also call multiple times to make sure that works.
unsub()
unsub()
unsub_false()
unsub_false()
events.put_nowait(Mock(ITEM_TYPE="lights", id="2"))
await wait_empty_queue()
assert len(calls) == 3
events.put_nowait(None)
await subscription_task
|
|
'''
Created on Jan 19, 2014
@author: Chris
'''
import sys
import wx
from gooey.gui import image_repository, events
from gooey.gui.lang.i18n import _
from gooey.gui.pubsub import pub
from gooey.gui.util import wx_util
from gooey.gui.windows import footer, header, layouts
from gooey.gui.windows.runtime_display_panel import RuntimeDisplay
YES = 5103
NO = 5104
class BaseWindow(wx.Frame):
'''
Primary Frame under which all sub-Panels are organized.
'''
def __init__(self, layout_type, use_tabs):
wx.Frame.__init__(self, parent=None, id=-1)
self.SetDoubleBuffered(True)
# type of gui to render
self.layout_type = layout_type
self.use_tabs = use_tabs
# Components
self.icon = None
self.head_panel = None
self.config_panel = None
self.runtime_display = None
self.foot_panel = None
self.panels = None
self._init_properties()
self._init_components()
self._do_layout()
self.Bind(wx.EVT_SIZE, self.onResize)
self.Bind(wx.EVT_CLOSE, self.onClose)
@property
def window_size(self):
return self.GetSize()
@window_size.setter
def window_size(self, size_tuple):
self.SetSize(size_tuple)
@property
def window_title(self):
return self.GetTitle()
@window_title.setter
def window_title(self, title):
self.SetTitle(title)
@property
def heading_title(self):
return self.head_panel.title
@heading_title.setter
def heading_title(self, text):
self.head_panel.title = text
@property
def heading_subtitle(self):
return self.head_panel.subtitle
@heading_subtitle.setter
def heading_subtitle(self, text):
self.head_panel.subtitle = text
def create_section(self, name):
self.config_panel.main_content.CreateSection(name)
def delete_section(self, name):
self.config_panel.main_content.DeleteSection(name)
def do_layout(self):
self.config_panel.main_content._do_layout()
def section(self, name):
return self.config_panel.main_content.Section(name)
@property
def progress_bar(self):
return self.foot_panel.progress_bar
def set_list_contents(self, contents):
self.config_panel.sidebar.set_list_contents(contents)
def set_display_font_style(self, style):
# TODO: make this not stupid
# TODO: _actual_ font support
self.runtime_display.set_font_style(
wx.MODERN if style == 'monospace' else wx.DEFAULT)
def _init_properties(self):
# self.SetTitle(self.build_spec['program_name'])
# self.SetSize(self.build_spec['default_size'])
# # self.SetMinSize((400, 300))
self.icon = wx.Icon(image_repository.program_icon, wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
def _init_components(self):
self.runtime_display = RuntimeDisplay(self)
self.head_panel = header.FrameHeader(parent=self)
self.foot_panel = footer.Footer(self)
self.panels = [self.head_panel, self.config_panel, self.foot_panel]
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.head_panel, 0, wx.EXPAND)
sizer.Add(wx_util.horizontal_rule(self), 0, wx.EXPAND)
if self.layout_type == layouts.COLUMN:
self.config_panel = layouts.ColumnLayout(self)
else:
self.config_panel = layouts.FlatLayout(self)
sizer.Add(self.config_panel, 1, wx.EXPAND)
sizer.Add(self.runtime_display, 1, wx.EXPAND)
self.runtime_display.Hide()
sizer.Add(wx_util.horizontal_rule(self), 0, wx.EXPAND)
sizer.Add(self.foot_panel, 0, wx.EXPAND)
self.SetSizer(sizer)
self.sizer = sizer
def freeze(self):
self.Freeze()
def thaw(self):
self.Thaw()
def enable_stop_button(self):
self.foot_panel.stop_button.Enable()
def disable_stop_button(self):
self.foot_panel.stop_button.Disable()
def show(self, *args):
'''
Looks up the attribute across all available
panels and calls `Show()`
'''
self._set_visibility('Show', *args)
def hide(self, *args):
'''
Looks up the attribute across all available
panels and calls `Show()`
'''
self._set_visibility('Hide', *args)
def _set_visibility(self, action, *args):
'''
Checks for the existence `attr` on a given `panel` and
performs `action` if found
'''
def _set_visibility(obj, attrs):
for attr in attrs:
if hasattr(obj, attr):
instance = getattr(obj, attr)
getattr(instance, action)()
instance.Layout()
for panel in [self, self.head_panel, self.foot_panel, self.config_panel]:
_set_visibility(panel, args)
def hide_all_buttons(self):
self.foot_panel.hide_all_buttons()
def update_console_async(self, msg):
wx.CallAfter(self.runtime_display.append_text, msg)
def update_progress_aync(self, progress, disable_animation=False):
wx.CallAfter(self.UpdateProgressBar, progress, disable_animation)
def onResize(self, evt):
evt.Skip()
def onClose(self, evt):
if evt.CanVeto():
evt.Veto()
pub.send_message(str(events.WINDOW_CLOSE))
def UpdateProgressBar(self, value, disable_animation=False):
pb = self.foot_panel.progress_bar
if value < 0:
pb.Pulse()
else:
value = min(int(value), pb.GetRange())
if pb.GetValue() != value:
# Windows 7 progress bar animation hack
# http://stackoverflow.com/questions/5332616/disabling-net-progressbar-animation-when-changing-value
if disable_animation and sys.platform.startswith("win"):
if pb.GetRange() == value:
pb.SetValue(value)
pb.SetValue(value-1)
else:
pb.SetValue(value+1)
pb.SetValue(value)
def show_dialog(self, title, content, style):
dlg = wx.MessageDialog(None, content, title, style)
result = dlg.ShowModal()
dlg.Destroy()
return result
def show_missing_args_dialog(self):
self.show_dialog(_('error_title'), _('error_required_fields'), wx.ICON_ERROR)
def confirm_exit_dialog(self):
result = self.show_dialog(_('sure_you_want_to_exit'), _('close_program'), wx.YES_NO)
return result == YES
def confirm_stop_dialog(self):
result = self.show_dialog(_('sure_you_want_to_stop'), _('stop_task'), wx.YES_NO)
return result == YES
if __name__ == '__main__':
pass
|
|
"""
HB events
"""
from pox.openflow.libopenflow_01 import *
import json
from hb_utils import *
from hb_json_event import JsonEvent
from hb_json_event import AttributeCombiningMetaclass
class HbEvent(JsonEvent):
__metaclass__ = AttributeCombiningMetaclass
_attr_combining_metaclass_args = ["_to_json_attrs"]
_to_json_attrs = ['pid_in', # Incoming Packet ID
'pid_out', # Outgoing Packet ID
'mid_in', # Message ID (from the switch to the controller)
'mid_out', # Message ID (from the controller to the switch)
# The type of the message (what are the possible types?)
# is it just Pkt_In, Pkt_Out, Barrier_Req, Port_Mod, Flod_mod, Flow_Removed?)
('msg_type', lambda ofp_type: ofp_type_rev_map.keys()[ofp_type_rev_map.values().index(ofp_type)]),
# ????
('operations', lambda xs: [x.to_json() for x in xs]),
'dpid', # The unique per switch datapath ID
'controller_id', # socket.getpeername(), NOT the STS cid (#NOTE (AH): why not?)
'hid', # Host ID
('packet', base64_encode), # The content of the packet
('in_port', get_port_no), # The ingress port number
('out_port', get_port_no), # The egress port number
('msg', base64_encode), # The content of the openflow message
('msg_flowmod', base64_encode), #NOTE (AH): how is it different from the above?
]
_from_json_attrs = {
'eid': lambda x: x,
'pid_in': lambda x: x, # Incoming Packet ID
'pid_out': lambda x: x, # Outgoing Packet ID
'mid_in': lambda x: x, # Message ID (from the switch to the controller)
'mid_out': lambda x: x, # Message ID (from the controller to the switch)
# The type of the message (what are the possible types?)
# is it just Pkt_In, Pkt_Out, Barrier_Req, Port_Mod, Flod_mod, Flow_Removed?)
'msg_type': lambda x: ofp_type_rev_map[x],
# ????
'operations': lambda v: [JsonEvent.from_json(json.loads(x)) for x in v],
'dpid': lambda x: x, # The unique per switch datapath ID
'controller_id': lambda x: x, # socket.getpeername(), NOT the STS cid (#NOTE (AH): why not?)
'hid': lambda x: x, # Host ID
'packet': lambda x: decode_packet(x) if x else None, # The content of the packet
'in_port': lambda x: x, # The ingress port number
'out_port': lambda x: x, # The egress port number
'msg': base64_decode_openflow, # The content of the openflow message
'msg_flowmod': base64_decode, #NOTE (AH): how is it different from the above?
}
def __init__(self, eid=None):
super(HbEvent, self).__init__(eid=eid)
class HbAsyncFlowExpiry(HbEvent):
'''
"Async", as flows expire due to a timer running out. As this can happen even during another event, it needs to be handled separately.
Note that a single TraceSwitchFlowTableEntryExpiry operation is always part of this event once finished..
'''
def __init__(self, mid_in=None, mid_out=None, operations=None, dpid=None,
flow_table=None, entry=None, reason=None, eid=None):
HbEvent.__init__(self, eid=eid)
self.mid_in = mid_in # to be filled in later: predecessor is the HbMessageHandle that installed the flow (with the same cookie)
self.mid_out = check_list(mid_out)
self.operations = check_list(operations)
self.dpid = dpid
class HbPacketHandle(HbEvent):
"""
When a switch finished processing packet PID_IN.
The possible outcomes of this process are:
1. OpenFlow message to the controller (Pkt_Out)
2. Packet is forwarded, then PID_OUT will contain the new identifier.
"""
#NOTE (AH): What about dropped packets?
#NOTE (AH): What about duplicated packets? for example a switch can forward the same packet to multiple ports
#NOTE (AH): What buffer_out is for? It's not used!
def __init__(self,pid_in, pid_out=None, mid_out=None, operations=None,
dpid=None, packet=None, in_port=None, buffer_out=None, eid=None):
HbEvent.__init__(self, eid=eid)
self.pid_in = pid_in
self.pid_out = check_list(pid_out)
self.mid_out = check_list(mid_out)
self.operations = check_list(operations)
self.dpid = dpid
self.packet = packet
self.in_port = in_port
class HbPacketSend(HbEvent):
"""
Packet (PID_IN) was sent from switch (DPID) port (out_port) with new identifier (PID_OUT).
"""
def __init__(self, pid_in, pid_out, dpid=None, packet=None, out_port=None,
eid=None):
HbEvent.__init__(self, eid=eid)
self.pid_in = pid_in
self.pid_out = check_list(pid_out)
self.dpid = dpid
self.packet = packet
self.out_port = out_port
class HbMessageHandle(HbEvent):
"""
Switch processing OpenFlow message (mid_in, msg_type, and content msg) from
controller (controller_id).
pid_in is set if the switch read a packet from the buffer as result
from processing the OF message.
"""
#NOTE (AH): what are the other arguments, mid_out, pid_out, operations?
#NOTE (AH): is buffer_in used?
def __init__(self, mid_in, msg_type, operations=None, pid_in=None,
pid_out=None, mid_out=None, dpid=None, controller_id=None,
msg=None, buffer_in=None, msg_flowmod=None, packet=None,
in_port=None, eid=None):
HbEvent.__init__(self, eid=eid)
self.pid_in = pid_in # to be filled in when a read from buffer occurs
self.mid_in = mid_in # filled in, but never matches a mid_out. This link will be filled in by controller instrumentation.
self.msg_type = msg_type
self.pid_out = check_list(pid_out)
self.mid_out = check_list(mid_out)
self.operations = check_list(operations)
# self.packet = None # possible to get through OFPP_TABLE/buffer put
# self.in_port = None # possible to get through OFPP_TABLE/buffer put
self.dpid = dpid # possibly needed to match with controller instrumentation
self.controller_id = controller_id # possibly needed to match with controller instrumentation
self.msg = msg
self.packet = packet
self.in_port = in_port
if msg_flowmod is not None:
self.msg_flowmod = msg_flowmod # needed for rule 3
@property
def msg_type_str(self):
return ofp_type_to_str(self.msg_type)
class HbMessageSend(HbEvent):
"""
OpenFlow message with mid_in was sent to the controller with the new identifier in mids_out.
#NOTE (AH): Can you we explain this better?
"""
def __init__(self, mid_in, mid_out, msg_type, dpid=None, controller_id=None,
msg=None, eid=None):
HbEvent.__init__(self, eid=eid)
self.mid_in = mid_in
self.mid_out = check_list(mid_out) # filled in, but never matches a mid_in. This link will be filled in by controller instrumentation.
self.msg_type = msg_type
self.dpid = dpid
self.controller_id = controller_id
self.msg = msg
@property
def msg_type_str(self):
return ofp_type_to_str(self.msg_type)
class HbHostHandle(HbEvent):
"""
A host (hid) handling a packet (pid_in, packet) on port (in_port)
and maybe sending another packet in response (pid_out).
"""
def __init__(self, pid_in, pid_out=None, operations=None, hid=None,
packet=None, in_port=None, eid=None):
HbEvent.__init__(self, eid=eid)
self.pid_in = pid_in
self.pid_out = check_list(pid_out)
self.operations = check_list(operations)
self.hid = hid
self.packet = packet
self.in_port = in_port
class HbHostSend(HbEvent):
"""
A host (hid) is sending a packet (pid_out, packet) on port (out_port).
"""
#NOTE (AH); what pid_in is for?
def __init__(self, pid_in, pid_out, hid=None, packet=None, out_port=None,
eid=None):
HbEvent.__init__(self, eid=eid)
self.pid_in = pid_in
self.pid_out = check_list(pid_out)
self.hid = hid
self.packet = packet
self.out_port = out_port
class HbControllerHandle(HbEvent):
"""
Controller handled an OF message (mid_in)
and maybe generated another (mid_out).
"""
#NOTE (AH): Don't we need the CID?
def __init__(self, mid_in, mid_out, eid=None):
HbEvent.__init__(self, eid=eid)
self.mid_in = mid_in
self.mid_out = check_list(mid_out) # Generated, link with HbMessageSend
class HbControllerSend(HbEvent):
"""
Controller send an OF message (mid_out).
"""
#NOTE (AH): what mid_in is for?
#NOTE (AH): Don't we need the CID?
def __init__(self, mid_in, mid_out, eid=None):
HbEvent.__init__(self, eid=eid)
self.mid_in = mid_in # Generated, link with HbMessageHandle
self.mid_out = check_list(mid_out)
JsonEvent.register_type(HbEvent)
JsonEvent.register_type(HbAsyncFlowExpiry)
JsonEvent.register_type(HbPacketHandle)
JsonEvent.register_type(HbPacketSend)
JsonEvent.register_type(HbMessageHandle)
JsonEvent.register_type(HbMessageSend)
JsonEvent.register_type(HbHostHandle)
JsonEvent.register_type(HbHostSend)
JsonEvent.register_type(HbControllerHandle)
JsonEvent.register_type(HbControllerSend)
|
|
# -*- coding: utf-8 -*-
import httplib as http
import contextlib
import mock
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase, DbTestCase
from tests import factories
from tests.utils import make_drf_request
from api.base.settings.defaults import API_BASE
from api.base.serializers import JSONAPISerializer
from api.base import serializers as base_serializers
from api.nodes.serializers import NodeSerializer, RelationshipField
class FakeModel(object):
def null_field(self):
return None
def valued_field(self):
return 'Some'
null = None
foo = 'bar'
pk = '1234'
class FakeSerializer(base_serializers.JSONAPISerializer):
class Meta:
type_ = 'foos'
links = base_serializers.LinksField({
'null_field': 'null_field',
'valued_field': 'valued_field',
})
null_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<null>'},
)
valued_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<foo>'},
)
def null_field(*args, **kwargs):
return None
def valued_field(*args, **kwargs):
return 'http://foo.com'
class TestNullLinks(ApiTestCase):
def test_null_links_are_omitted(self):
req = make_drf_request()
rep = FakeSerializer(FakeModel, context={'request': req}).data['data']
assert_not_in('null_field', rep['links'])
assert_in('valued_field', rep['links'])
assert_not_in('null_link_field', rep['relationships'])
assert_in('valued_link_field', rep['relationships'])
class TestApiBaseSerializers(ApiTestCase):
def setUp(self):
super(TestApiBaseSerializers, self).setUp()
self.node = factories.ProjectFactory(is_public=True)
for i in range(5):
factories.ProjectFactory(is_public=True, parent=self.node)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
def test_counts_not_included_in_link_fields_by_default(self):
res = self.app.get(self.url)
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {}:
continue
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_counts_included_in_link_fields_with_related_counts_query_param(self):
res = self.app.get(self.url, params={'related_counts': True})
relationships = res.json['data']['relationships']
for key, relation in relationships.iteritems():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
if (field.related_meta or {}).get('count'):
link = relation['links'].values()[0]
assert_in('count', link['meta'])
def test_related_counts_excluded_query_param_false(self):
res = self.app.get(self.url, params={'related_counts': False})
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {}:
continue
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_invalid_related_counts_value_raises_bad_request(self):
res = self.app.get(self.url, params={'related_counts': 'fish'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invalid_embed_value_raise_bad_request(self):
res = self.app.get(self.url, params={'embed': 'foo'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: foo")
def test_counts_included_in_children_field_with_children_related_counts_query_param(self):
res = self.app.get(self.url, params={'related_counts': 'children'})
relationships = res.json['data']['relationships']
for key, relation in relationships.iteritems():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
link = relation['links'].values()[0]
if (field.related_meta or {}).get('count') and key == 'children':
assert_in('count', link['meta'])
else:
assert_not_in('count', link['meta'])
def test_counts_included_in_children_and_contributors_fields_with_field_csv_related_counts_query_param(self):
res = self.app.get(self.url, params={'related_counts': 'children,contributors'})
relationships = res.json['data']['relationships']
for key, relation in relationships.iteritems():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
link = relation['links'].values()[0]
if (field.related_meta or {}).get('count') and key == 'children' or key == 'contributors':
assert_in('count', link['meta'])
else:
assert_not_in('count', link['meta'])
def test_error_when_requesting_related_counts_for_attribute_field(self):
res = self.app.get(self.url, params={'related_counts': 'title'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(res.json['errors'][0]['detail'], "Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got 'title'")
class TestRelationshipField(DbTestCase):
# We need a Serializer to test the Relationship field (needs context)
class BasicNodeSerializer(JSONAPISerializer):
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'}
)
parent_with_meta = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_count', 'extra': 'get_extra'},
)
self_and_related_field = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-contributors',
self_view_kwargs={'node_id': '<pk>'},
)
two_url_kwargs = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-pointer-detail',
related_view_kwargs={'node_id': '<pk>', 'node_link_id': '<pk>'},
)
not_attribute_on_target = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-children',
related_view_kwargs={'node_id': '12345'}
)
class Meta:
type_ = 'nodes'
def get_count(self, obj):
return 1
def get_extra(self, obj):
return 'foo'
# TODO: Expand tests
# Regression test for https://openscience.atlassian.net/browse/OSF-4832
def test_serializing_meta(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
meta = data['relationships']['parent_with_meta']['links']['related']['meta']
assert_not_in('count', meta)
assert_in('extra', meta)
assert_equal(meta['extra'], 'foo')
def test_self_and_related_fields(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
relationship_field = data['relationships']['self_and_related_field']['links']
assert_in('/v2/nodes/{}/contributors/'.format(node._id), relationship_field['self']['href'])
assert_in('/v2/nodes/{}/'.format(node._id), relationship_field['related']['href'])
def test_field_with_two_kwargs(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['two_url_kwargs']['links']
assert_in('/v2/nodes/{}/node_links/{}/'.format(node._id, node._id), field['related']['href'])
def test_field_with_non_attribute(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['not_attribute_on_target']['links']
assert_in('/v2/nodes/{}/children/'.format('12345'), field['related']['href'])
|
|
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
X_train, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
X_train, train_labels = resample(X_train, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for greyscale image data
def normalize_greyscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# ToDo: Implement Min-Max scaling for greyscale image data
a = 0.1
b = 0.9
x_min = np.min(image_data)
x_max = np.max(image_data)
x_prime = [a + (((x-x_min)*(b-a))/(x_max-x_min)) for x in image_data]
# print(image_data, ' normalized to ---> ', x_prime)
return x_prime
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_greyscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_greyscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
X_train = normalize_greyscale(X_train)
test_features = normalize_greyscale(test_features)
is_features_normal = True
print('Tests Passed!')
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
X_train, valid_features, train_labels, valid_labels = train_test_split(
X_train,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': X_train,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
X_train = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
features_count = 784
labels_count = 10
# ToDo: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# ToDo: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal([features_count, labels_count]))
biases = tf.Variable(tf.zeros([labels_count], dtype=tf.float32))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape in [None, 10], 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: X_train, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.initialize_all_variables()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# ToDo: Find the best parameters for each configuration
# Validation accuracy at 0.8085333108901978
epochs = 5
batch_size = 50
learning_rate = 0.2
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(X_train) / batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = X_train[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'b', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy for [{}, {}, {}] at {}'.format(epochs, batch_size, learning_rate, validation_accuracy))
"""
Test
Set the epochs, batch_size, and learning_rate with the best learning parameters you discovered in problem 3. You're
going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well
the model will do in the real world. You should have a test accuracy of atleast 80%.
"""
# ToDo: Set the epochs, batch_size, and learning_rate with the best parameters from problem 3
epochs = 5
batch_size = 50
learning_rate = 0.1
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(X_train) / batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i + 1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i * batch_size
batch_features = X_train[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import defaultdict
from collections import OrderedDict
import six
import st2common
from st2common import log as logging
from st2common.bootstrap.triggersregistrar import TriggersRegistrar
from st2common.bootstrap.sensorsregistrar import SensorsRegistrar
from st2common.bootstrap.actionsregistrar import ActionsRegistrar
from st2common.bootstrap.aliasesregistrar import AliasesRegistrar
from st2common.bootstrap.policiesregistrar import PolicyRegistrar
import st2common.bootstrap.policiesregistrar as policies_registrar
import st2common.bootstrap.runnersregistrar as runners_registrar
from st2common.bootstrap.rulesregistrar import RulesRegistrar
import st2common.bootstrap.ruletypesregistrar as rule_types_registrar
from st2common.bootstrap.configsregistrar import ConfigsRegistrar
import st2common.content.utils as content_utils
from st2common.models.api.action import LiveActionCreateAPI
from st2common.models.api.pack import PackAPI
from st2common.models.api.pack import PackAsyncAPI
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.persistence.pack import Pack
from st2common.rbac.types import PermissionType
from st2common.rbac import utils as rbac_utils
from st2common.services import packs as packs_service
from st2common.router import abort
from st2common.router import Response
from st2api.controllers.resource import ResourceController
from st2api.controllers.v1.actionexecutions import ActionExecutionsControllerMixin
http_client = six.moves.http_client
__all__ = [
'PacksController',
'BasePacksController',
'ENTITIES'
]
LOG = logging.getLogger(__name__)
# Note: The order those are defined it's important so they are registered in
# the same order as they are in st2-register-content.
# We also need to use list of tuples to preserve the order.
ENTITIES = OrderedDict([
('trigger', (TriggersRegistrar, 'triggers')),
('sensor', (SensorsRegistrar, 'sensors')),
('action', (ActionsRegistrar, 'actions')),
('rule', (RulesRegistrar, 'rules')),
('alias', (AliasesRegistrar, 'aliases')),
('policy', (PolicyRegistrar, 'policies')),
('config', (ConfigsRegistrar, 'configs'))
])
class PackInstallController(ActionExecutionsControllerMixin):
def post(self, pack_install_request):
parameters = {
'packs': pack_install_request.packs,
}
if pack_install_request.force:
parameters['force'] = True
new_liveaction_api = LiveActionCreateAPI(action='packs.install',
parameters=parameters,
user=None)
execution_resp = self._handle_schedule_execution(liveaction_api=new_liveaction_api,
requester_user=None)
exec_id = PackAsyncAPI(execution_id=execution_resp.json['id'])
return Response(json=exec_id, status=http_client.ACCEPTED)
class PackUninstallController(ActionExecutionsControllerMixin):
def post(self, pack_uninstall_request, ref_or_id=None):
if ref_or_id:
parameters = {
'packs': [ref_or_id]
}
else:
parameters = {
'packs': pack_uninstall_request.packs
}
new_liveaction_api = LiveActionCreateAPI(action='packs.uninstall',
parameters=parameters,
user=None)
execution_resp = self._handle_schedule_execution(liveaction_api=new_liveaction_api,
requester_user=None)
exec_id = PackAsyncAPI(execution_id=execution_resp.json['id'])
return Response(json=exec_id, status=http_client.ACCEPTED)
class PackRegisterController(object):
def post(self, pack_register_request):
if pack_register_request and hasattr(pack_register_request, 'types'):
types = pack_register_request.types
else:
types = ['runner', 'action', 'trigger', 'sensor', 'rule',
'rule_type', 'alias', 'policy_type', 'policy', 'config']
if pack_register_request and hasattr(pack_register_request, 'packs'):
packs = list(set(pack_register_request.packs))
else:
packs = None
result = defaultdict(int)
# Register depended resources (actions depend on runners, rules depend on rule types, etc)
if ('runner' in types or 'runners' in types) or ('action' in types or 'actions' in types):
result['runners'] = runners_registrar.register_runners(experimental=True)
if ('rule_type' in types or 'rule_types' in types) or \
('rule' in types or 'rules' in types):
result['rule_types'] = rule_types_registrar.register_rule_types()
if ('policy_type' in types or 'policy_types' in types) or \
('policy' in types or 'policies' in types):
result['policy_types'] = policies_registrar.register_policy_types(st2common)
use_pack_cache = False
fail_on_failure = getattr(pack_register_request, 'fail_on_failure', True)
for type, (Registrar, name) in six.iteritems(ENTITIES):
if type in types or name in types:
registrar = Registrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if packs:
for pack in packs:
pack_path = content_utils.get_pack_base_path(pack)
try:
registered_count = registrar.register_from_pack(pack_dir=pack_path)
result[name] += registered_count
except ValueError as e:
# Throw more user-friendly exception if requsted pack doesn't exist
if re.match('Directory ".*?" doesn\'t exist', str(e)):
msg = 'Pack "%s" not found on disk: %s' % (pack, str(e))
raise ValueError(msg)
raise e
else:
packs_base_paths = content_utils.get_packs_base_paths()
registered_count = registrar.register_from_packs(base_dirs=packs_base_paths)
result[name] += registered_count
return result
class PackSearchController(object):
def post(self, pack_search_request):
if hasattr(pack_search_request, 'query'):
packs = packs_service.search_pack_index(pack_search_request.query,
case_sensitive=False)
return [PackAPI(**pack) for pack in packs]
else:
pack = packs_service.get_pack_from_index(pack_search_request.pack)
return PackAPI(**pack) if pack else None
class IndexHealthController(object):
def get(self):
"""
Check if all listed indexes are healthy: they should be reachable,
return valid JSON objects, and yield more than one result.
"""
_, status = packs_service.fetch_pack_index(allow_empty=True)
health = {
"indexes": {
"count": len(status),
"valid": 0,
"invalid": 0,
"errors": {},
"status": status,
},
"packs": {
"count": 0,
},
}
for index in status:
if index['error']:
error_count = health['indexes']['errors'].get(index['error'], 0) + 1
health['indexes']['invalid'] += 1
health['indexes']['errors'][index['error']] = error_count
else:
health['indexes']['valid'] += 1
health['packs']['count'] += index['packs']
return health
class BasePacksController(ResourceController):
model = PackAPI
access = Pack
def _get_one_by_ref_or_id(self, ref_or_id, requester_user, exclude_fields=None):
instance = self._get_by_ref_or_id(ref_or_id=ref_or_id, exclude_fields=exclude_fields)
rbac_utils.assert_user_has_resource_db_permission(user_db=requester_user,
resource_db=instance,
permission_type=PermissionType.PACK_VIEW)
if not instance:
msg = 'Unable to identify resource with ref_or_id "%s".' % (ref_or_id)
abort(http_client.NOT_FOUND, msg)
return
result = self.model.from_model(instance, **self.from_model_kwargs)
return result
def _get_by_ref_or_id(self, ref_or_id, exclude_fields=None):
resource_db = self._get_by_id(resource_id=ref_or_id, exclude_fields=exclude_fields)
if not resource_db:
# Try ref
resource_db = self._get_by_ref(ref=ref_or_id, exclude_fields=exclude_fields)
if not resource_db:
msg = 'Resource with a ref or id "%s" not found' % (ref_or_id)
raise StackStormDBObjectNotFoundError(msg)
return resource_db
def _get_by_ref(self, ref, exclude_fields=None):
"""
Note: In this case "ref" is pack name and not StackStorm's ResourceReference.
"""
resource_db = self.access.query(ref=ref, exclude_fields=exclude_fields).first()
return resource_db
class PacksIndexController():
search = PackSearchController()
health = IndexHealthController()
class PacksController(BasePacksController):
from st2api.controllers.v1.packviews import PackViewsController
model = PackAPI
access = Pack
supported_filters = {
'name': 'name',
'ref': 'ref'
}
query_options = {
'sort': ['ref']
}
# Nested controllers
install = PackInstallController()
uninstall = PackUninstallController()
register = PackRegisterController()
views = PackViewsController()
index = PacksIndexController()
def __init__(self):
super(PacksController, self).__init__()
self.get_one_db_method = self._get_by_ref_or_id
def get_all(self, sort=None, offset=0, limit=None, **raw_filters):
return super(PacksController, self)._get_all(sort=sort,
offset=offset,
limit=limit,
raw_filters=raw_filters)
def get_one(self, ref_or_id, requester_user):
return self._get_one_by_ref_or_id(ref_or_id=ref_or_id, requester_user=requester_user)
packs_controller = PacksController()
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import inspect
import six
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp import protocol
from autobahn.wamp.types import ComponentConfig
from autobahn.websocket.util import parse_url
from autobahn.twisted.websocket import WampWebSocketClientFactory
# new API
# from autobahn.twisted.connection import Connection
import txaio
txaio.use_twisted()
__all__ = [
'ApplicationSession',
'ApplicationSessionFactory',
'ApplicationRunner',
'Application',
'Service',
# new API
'Session'
]
try:
from twisted.application import service
except (ImportError, SyntaxError):
# Not on PY3 yet
service = None
__all__.pop(__all__.index('Service'))
class ApplicationSession(protocol.ApplicationSession):
"""
WAMP application session for Twisted-based applications.
"""
class ApplicationSessionFactory(protocol.ApplicationSessionFactory):
"""
WAMP application session factory for Twisted-based applications.
"""
session = ApplicationSession
"""
The application session class this application session factory will use. Defaults to :class:`autobahn.twisted.wamp.ApplicationSession`.
"""
class ApplicationRunner(object):
"""
This class is a convenience tool mainly for development and quick hosting
of WAMP application components.
It can host a WAMP application component in a WAMP-over-WebSocket client
connecting to a WAMP router.
"""
log = txaio.make_logger()
def __init__(self, url, realm, extra=None, serializers=None, ssl=None, proxy=None):
"""
:param url: The WebSocket URL of the WAMP router to connect to (e.g. `ws://somehost.com:8090/somepath`)
:type url: unicode
:param realm: The WAMP realm to join the application session to.
:type realm: unicode
:param extra: Optional extra configuration to forward to the application component.
:type extra: dict
:param serializers: A list of WAMP serializers to use (or None for default serializers).
Serializers must implement :class:`autobahn.wamp.interfaces.ISerializer`.
:type serializers: list
:param ssl: (Optional). If specified this should be an
instance suitable to pass as ``sslContextFactory`` to
:class:`twisted.internet.endpoints.SSL4ClientEndpoint`` such
as :class:`twisted.internet.ssl.CertificateOptions`. Leaving
it as ``None`` will use the result of calling Twisted's
:meth:`twisted.internet.ssl.platformTrust` which tries to use
your distribution's CA certificates.
:type ssl: :class:`twisted.internet.ssl.CertificateOptions`
:param proxy: Explicit proxy server to use; a dict with ``host`` and ``port`` keys
:type proxy: dict or None
"""
assert(type(url) == six.text_type)
assert(realm is None or type(realm) == six.text_type)
assert(extra is None or type(extra) == dict)
assert(proxy is None or type(proxy) == dict)
self.url = url
self.realm = realm
self.extra = extra or dict()
self.serializers = serializers
self.ssl = ssl
self.proxy = proxy
def run(self, make, start_reactor=True):
"""
Run the application component.
:param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
:type make: callable
:param start_reactor: if True (the default) this method starts
the Twisted reactor and doesn't return until the reactor
stops. If there are any problems starting the reactor or
connect()-ing, we stop the reactor and raise the exception
back to the caller.
:returns: None is returned, unless you specify
``start_reactor=False`` in which case the Deferred that
connect() returns is returned; this will callback() with
an IProtocol instance, which will actually be an instance
of :class:`WampWebSocketClientProtocol`
"""
if start_reactor:
# only select framework, set loop and start logging when we are asked
# start the reactor - otherwise we are running in a program that likely
# already tool care of all this.
from twisted.internet import reactor
txaio.use_twisted()
txaio.config.loop = reactor
txaio.start_logging(level='info')
isSecure, host, port, resource, path, params = parse_url(self.url)
# factory for use ApplicationSession
def create():
cfg = ComponentConfig(self.realm, self.extra)
try:
session = make(cfg)
except Exception as e:
if start_reactor:
# the app component could not be created .. fatal
self.log.error("{err}", err=e)
reactor.stop()
else:
# if we didn't start the reactor, it's up to the
# caller to deal with errors
raise
else:
return session
# create a WAMP-over-WebSocket transport client factory
transport_factory = WampWebSocketClientFactory(create, url=self.url, serializers=self.serializers, proxy=self.proxy)
# supress pointless log noise like
# "Starting factory <autobahn.twisted.websocket.WampWebSocketClientFactory object at 0x2b737b480e10>""
transport_factory.noisy = False
# if user passed ssl= but isn't using isSecure, we'll never
# use the ssl argument which makes no sense.
context_factory = None
if self.ssl is not None:
if not isSecure:
raise RuntimeError(
'ssl= argument value passed to %s conflicts with the "ws:" '
'prefix of the url argument. Did you mean to use "wss:"?' %
self.__class__.__name__)
context_factory = self.ssl
elif isSecure:
from twisted.internet.ssl import optionsForClientTLS
context_factory = optionsForClientTLS(host)
from twisted.internet import reactor
if self.proxy is not None:
from twisted.internet.endpoints import TCP4ClientEndpoint
client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port'])
transport_factory.contextFactory = context_factory
elif isSecure:
from twisted.internet.endpoints import SSL4ClientEndpoint
assert context_factory is not None
client = SSL4ClientEndpoint(reactor, host, port, context_factory)
else:
from twisted.internet.endpoints import TCP4ClientEndpoint
client = TCP4ClientEndpoint(reactor, host, port)
d = client.connect(transport_factory)
# as the reactor shuts down, we wish to wait until we've sent
# out our "Goodbye" message; leave() returns a Deferred that
# fires when the transport gets to STATE_CLOSED
def cleanup(proto):
if hasattr(proto, '_session') and proto._session is not None:
if proto._session.is_attached():
return proto._session.leave()
elif proto._session.is_connected():
return proto._session.disconnect()
# when our proto was created and connected, make sure it's cleaned
# up properly later on when the reactor shuts down for whatever reason
def init_proto(proto):
reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
return proto
# if we connect successfully, the arg is a WampWebSocketClientProtocol
d.addCallback(init_proto)
# if the user didn't ask us to start the reactor, then they
# get to deal with any connect errors themselves.
if start_reactor:
# if an error happens in the connect(), we save the underlying
# exception so that after the event-loop exits we can re-raise
# it to the caller.
class ErrorCollector(object):
exception = None
def __call__(self, failure):
self.exception = failure.value
reactor.stop()
connect_error = ErrorCollector()
d.addErrback(connect_error)
# now enter the Twisted reactor loop
reactor.run()
# if we exited due to a connection error, raise that to the
# caller
if connect_error.exception:
raise connect_error.exception
else:
# let the caller handle any errors
return d
class _ApplicationSession(ApplicationSession):
"""
WAMP application session class used internally with :class:`autobahn.twisted.app.Application`.
"""
def __init__(self, config, app):
"""
:param config: The component configuration.
:type config: Instance of :class:`autobahn.wamp.types.ComponentConfig`
:param app: The application this session is for.
:type app: Instance of :class:`autobahn.twisted.wamp.Application`.
"""
# noinspection PyArgumentList
ApplicationSession.__init__(self, config)
self.app = app
@inlineCallbacks
def onConnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onConnect`
"""
yield self.app._fire_signal('onconnect')
self.join(self.config.realm)
@inlineCallbacks
def onJoin(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
for uri, proc in self.app._procs:
yield self.register(proc, uri)
for uri, handler in self.app._handlers:
yield self.subscribe(handler, uri)
yield self.app._fire_signal('onjoined')
@inlineCallbacks
def onLeave(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
yield self.app._fire_signal('onleave')
self.disconnect()
@inlineCallbacks
def onDisconnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onDisconnect`
"""
yield self.app._fire_signal('ondisconnect')
class Application(object):
"""
A WAMP application. The application object provides a simple way of
creating, debugging and running WAMP application components.
"""
log = txaio.make_logger()
def __init__(self, prefix=None):
"""
:param prefix: The application URI prefix to use for procedures and topics,
e.g. ``"com.example.myapp"``.
:type prefix: unicode
"""
self._prefix = prefix
# procedures to be registered once the app session has joined the router/realm
self._procs = []
# event handler to be subscribed once the app session has joined the router/realm
self._handlers = []
# app lifecycle signal handlers
self._signals = {}
# once an app session is connected, this will be here
self.session = None
def __call__(self, config):
"""
Factory creating a WAMP application session for the application.
:param config: Component configuration.
:type config: Instance of :class:`autobahn.wamp.types.ComponentConfig`
:returns: obj -- An object that derives of
:class:`autobahn.twisted.wamp.ApplicationSession`
"""
assert(self.session is None)
self.session = _ApplicationSession(config, self)
return self.session
def run(self, url=u"ws://localhost:8080/ws", realm=u"realm1", start_reactor=True):
"""
Run the application.
:param url: The URL of the WAMP router to connect to.
:type url: unicode
:param realm: The realm on the WAMP router to join.
:type realm: unicode
"""
runner = ApplicationRunner(url, realm)
return runner.run(self.__call__, start_reactor)
def register(self, uri=None):
"""
Decorator exposing a function as a remote callable procedure.
The first argument of the decorator should be the URI of the procedure
to register under.
:Example:
.. code-block:: python
@app.register('com.myapp.add2')
def add2(a, b):
return a + b
Above function can then be called remotely over WAMP using the URI `com.myapp.add2`
the function was registered under.
If no URI is given, the URI is constructed from the application URI prefix
and the Python function name.
:Example:
.. code-block:: python
app = Application('com.myapp')
# implicit URI will be 'com.myapp.add2'
@app.register()
def add2(a, b):
return a + b
If the function `yields` (is a co-routine), the `@inlineCallbacks` decorator
will be applied automatically to it. In that case, if you wish to return something,
you should use `returnValue`:
:Example:
.. code-block:: python
from twisted.internet.defer import returnValue
@app.register('com.myapp.add2')
def add2(a, b):
res = yield stuff(a, b)
returnValue(res)
:param uri: The URI of the procedure to register under.
:type uri: unicode
"""
def decorator(func):
if uri:
_uri = uri
else:
assert(self._prefix is not None)
_uri = "{0}.{1}".format(self._prefix, func.__name__)
if inspect.isgeneratorfunction(func):
func = inlineCallbacks(func)
self._procs.append((_uri, func))
return func
return decorator
def subscribe(self, uri=None):
"""
Decorator attaching a function as an event handler.
The first argument of the decorator should be the URI of the topic
to subscribe to. If no URI is given, the URI is constructed from
the application URI prefix and the Python function name.
If the function yield, it will be assumed that it's an asynchronous
process and inlineCallbacks will be applied to it.
:Example:
.. code-block:: python
@app.subscribe('com.myapp.topic1')
def onevent1(x, y):
print("got event on topic1", x, y)
:param uri: The URI of the topic to subscribe to.
:type uri: unicode
"""
def decorator(func):
if uri:
_uri = uri
else:
assert(self._prefix is not None)
_uri = "{0}.{1}".format(self._prefix, func.__name__)
if inspect.isgeneratorfunction(func):
func = inlineCallbacks(func)
self._handlers.append((_uri, func))
return func
return decorator
def signal(self, name):
"""
Decorator attaching a function as handler for application signals.
Signals are local events triggered internally and exposed to the
developer to be able to react to the application lifecycle.
If the function yield, it will be assumed that it's an asynchronous
coroutine and inlineCallbacks will be applied to it.
Current signals :
- `onjoined`: Triggered after the application session has joined the
realm on the router and registered/subscribed all procedures
and event handlers that were setup via decorators.
- `onleave`: Triggered when the application session leaves the realm.
.. code-block:: python
@app.signal('onjoined')
def _():
# do after the app has join a realm
:param name: The name of the signal to watch.
:type name: unicode
"""
def decorator(func):
if inspect.isgeneratorfunction(func):
func = inlineCallbacks(func)
self._signals.setdefault(name, []).append(func)
return func
return decorator
@inlineCallbacks
def _fire_signal(self, name, *args, **kwargs):
"""
Utility method to call all signal handlers for a given signal.
:param name: The signal name.
:type name: str
"""
for handler in self._signals.get(name, []):
try:
# FIXME: what if the signal handler is not a coroutine?
# Why run signal handlers synchronously?
yield handler(*args, **kwargs)
except Exception as e:
# FIXME
self.log.info("Warning: exception in signal handler swallowed: {err}", err=e)
if service:
# Don't define it if Twisted's service support isn't here
class Service(service.MultiService):
"""
A WAMP application as a twisted service.
The application object provides a simple way of creating, debugging and running WAMP application
components inside a traditional twisted application
This manages application lifecycle of the wamp connection using startService and stopService
Using services also allows to create integration tests that properly terminates their connections
It can host a WAMP application component in a WAMP-over-WebSocket client
connecting to a WAMP router.
"""
factory = WampWebSocketClientFactory
def __init__(self, url, realm, make, extra=None, context_factory=None):
"""
:param url: The WebSocket URL of the WAMP router to connect to (e.g. `ws://somehost.com:8090/somepath`)
:type url: unicode
:param realm: The WAMP realm to join the application session to.
:type realm: unicode
:param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
:type make: callable
:param extra: Optional extra configuration to forward to the application component.
:type extra: dict
:param context_factory: optional, only for secure connections. Passed as contextFactory to
the ``listenSSL()`` call; see https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IReactorSSL.connectSSL.html
:type context_factory: twisted.internet.ssl.ClientContextFactory or None
You can replace the attribute factory in order to change connectionLost or connectionFailed behaviour.
The factory attribute must return a WampWebSocketClientFactory object
"""
self.url = url
self.realm = realm
self.extra = extra or dict()
self.make = make
self.context_factory = context_factory
service.MultiService.__init__(self)
self.setupService()
def setupService(self):
"""
Setup the application component.
"""
is_secure, host, port, resource, path, params = parse_url(self.url)
# factory for use ApplicationSession
def create():
cfg = ComponentConfig(self.realm, self.extra)
session = self.make(cfg)
return session
# create a WAMP-over-WebSocket transport client factory
transport_factory = self.factory(create, url=self.url)
# setup the client from a Twisted endpoint
if is_secure:
from twisted.application.internet import SSLClient
ctx = self.context_factory
if ctx is None:
from twisted.internet.ssl import optionsForClientTLS
ctx = optionsForClientTLS(host)
client = SSLClient(host, port, transport_factory, contextFactory=ctx)
else:
if self.context_factory is not None:
raise Exception("context_factory specified on non-secure URI")
from twisted.application.internet import TCPClient
client = TCPClient(host, port, transport_factory)
client.setServiceParent(self)
# new API
class Session(ApplicationSession):
def onJoin(self, details):
return self.on_join(details)
def onLeave(self, details):
return self.on_leave(details)
def onDisconnect(self):
return self.on_disconnect()
def on_join(self):
pass
def on_leave(self, details):
self.disconnect()
def on_disconnect(self):
pass
|
|
#!/usr/bin/env python3
# testNLHTree3.py
""" Test more NLHTree functionality. """
import time
import unittest
from rnglib import SimpleRNG
from nlhtree import NLHTree as NT
from xlattice import (HashTypes, check_hashtype,
SHA1_HEX_LEN, SHA2_HEX_LEN)
class TestNLHTree3(unittest.TestCase):
""" Test more NLHTree functionality. """
# adapted from the buildList example 2015-05-22
EXAMPLE1 = [
'dataDir',
' data1 bea7383743859a81b84cec8fde2ccd1f3e2ff688',
' data2 895c210f5203c48c1e3a574a2d5eba043c0ec72d',
' subDir1',
' data11 cb0ece05cbb91501d3dd78afaf362e63816f6757',
' data12 da39a3ee5e6b4b0d3255bfef95601890afd80709',
' subDir2',
' subDir3',
' data31 8cddeb23f9de9da4547a0d5adcecc7e26cb098c0',
' subDir4',
' subDir41',
' subDir411',
' data41 31c16def9fc4a4b6415b0b133e156a919cf41cc8',
' zData 31c16def9fc4a4b6415b0b133e156a919cf41cc8',
]
# this is just a hack but ...
EXAMPLE2 = [
'dataDir',
' data1 012345678901234567890123bea7383743859a81b84' +
'cec8fde2ccd1f3e2ff688',
' data2 012345678901234567890123895c210f5203c48c1e3' +
'a574a2d5eba043c0ec72d',
' subDir1',
' data11 012345678901234567890123cb0ece05cbb91501d' +
'3dd78afaf362e63816f6757',
' data12 012345678901234567890123da39a3ee5e6b4b0d3' +
'255bfef95601890afd80709',
' subDir2',
' subDir3',
' data31 0123456789012345678901238cddeb23f9de9da45' +
'47a0d5adcecc7e26cb098c0',
' subDir4',
' subDir41',
' subDir411',
' data41 01234567890123456789012331c16def9fc4a4b' +
'6415b0b133e156a919cf41cc8',
' zData 01234567890123456789012331c16def9fc4a4b6415' +
'b0b133e156a919cf41cc8',
]
EXAMPLE3 = [
'dataDir',
' data1 6d57759cf499a8ff7762a10043548f22513ed834564' +
'52332a8abd4b59d7e9203',
' data2 dacbf5c11f4ddbd1277ecbc304e09967d3124148560' +
'f82634d3912db8b4bd547',
' subDir1',
' data11 fb47958129f261f65c1655002ff5f9806bc969283' +
'ad772af5e8caaf214a9ed72',
' data12 e3b0c44298fc1c149afbf4c8996fb92427ae41e46' +
'49b934ca495991b7852b855',
' subDir2',
' subDir3',
' data31 7659fb836a76fb3f3369e1a4ca247104220e4778d5' +
'862e38a123e10f02520e87',
' subDir4',
' subDir41',
' subDir411',
' data41 00bb6d0864cb4952a0c41cbea65cf09de41e00f' +
'c6fa1011a27c5dd8814c98175',
]
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def do_test_pattern_matching(self, hashtype):
"""
Check pattern matching functions using a specific hash type.
"""
check_hashtype(hashtype)
if hashtype == HashTypes.SHA1:
strings = self.EXAMPLE1
elif hashtype == HashTypes.SHA2:
strings = self.EXAMPLE2
elif hashtype == HashTypes.SHA3:
strings = self.EXAMPLE3
# first line --------------------------------------
match = NT.DIR_LINE_RE.match(strings[0])
self.assertTrue(match)
self.assertEqual(len(match.group(1)), 0)
self.assertEqual(match.group(2), 'dataDir')
# simpler approach ----------------------
name = NT.parse_first_line(strings[0])
self.assertEqual(name, 'dataDir')
# file with indent of 1 ---------------------------
if hashtype == HashTypes.SHA1:
match = NT.FILE_LINE_RE_1.match(strings[1])
else:
# This works for both SHA2 and SHA3
match = NT.FILE_LINE_RE_2.match(strings[1])
self.assertTrue(match)
self.assertEqual(len(match.group(1)), 1)
self.assertEqual(match.group(2), 'data1')
# that simpler approach -----------------
indent, name, hash_ = NT.parse_other_line(strings[1])
self.assertEqual(indent, 1)
self.assertEqual(name, 'data1')
if hashtype == HashTypes.SHA1:
self.assertEqual(len(hash_), SHA1_HEX_LEN)
else:
# This works for both SHA2 and SHA 3
self.assertEqual(len(hash_), SHA2_HEX_LEN)
# subdirectory ------------------------------------
match = NT.DIR_LINE_RE.match(strings[3])
self.assertTrue(match)
self.assertEqual(len(match.group(1)), 1)
self.assertEqual(match.group(2), 'subDir1')
# that simpler approach -----------------
indent, name, hash_ = NT.parse_other_line(strings[3])
self.assertEqual(indent, 1)
self.assertEqual(name, 'subDir1')
self.assertEqual(hash_, None)
# lower level file ----------------------
if hashtype == HashTypes.SHA1:
match = NT.FILE_LINE_RE_1.match(strings[12])
else:
# This works for both SHA2 and SHA 3
match = NT.FILE_LINE_RE_2.match(strings[12])
self.assertTrue(match)
self.assertEqual(len(match.group(1)), 4)
self.assertEqual(match.group(2), 'data41')
# that simpler approach -----------------
indent, name, hash_ = NT.parse_other_line(strings[12])
self.assertEqual(indent, 4)
self.assertEqual(name, 'data41')
if hashtype == HashTypes.SHA1:
self.assertEqual(len(hash_), SHA1_HEX_LEN)
else:
# This works for both SHA2 and SHA 3
self.assertEqual(len(hash_), SHA2_HEX_LEN)
def test_pattern_matching(self):
""" Check pattern matching functions using various hash types. """
for hashtype in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:
self.do_test_pattern_matching(hashtype)
def do_test_serialization(self, hashtype):
"""
Verify that the serialization of the NLHTree is correct
using a specific hash type.
"""
check_hashtype(hashtype)
if hashtype == HashTypes.SHA1:
tree = NT.create_from_string_array(self.EXAMPLE1, hashtype)
elif hashtype == HashTypes.SHA2:
tree = NT.create_from_string_array(self.EXAMPLE2, hashtype)
elif hashtype == HashTypes.SHA3:
tree = NT.create_from_string_array(self.EXAMPLE3, hashtype)
self.assertEqual(tree.hashtype, hashtype)
strings = []
tree.to_strings(strings, 0)
tree2 = NT.create_from_string_array(strings, hashtype)
self.assertEqual(tree, tree2)
string = '\n'.join(strings) + '\n'
tree3 = NT.parse(string, hashtype)
serial3 = tree3.__str__()
self.assertEqual(serial3, string)
self.assertEqual(tree3, tree)
dupe3 = tree3.clone()
self.assertEqual(dupe3, tree3)
def test_serialization(self):
"""
Verify that the serialization of the NLHTree is correct
using various hash types.
"""
for hashtype in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:
self.do_test_serialization(hashtype)
if __name__ == '__main__':
unittest.main()
|
|
"""
============================
``ctypes`` Utility Functions
============================
See Also
--------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> _lib.foo_func.restype = None #doctest: +SKIP
>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array',
'as_ctypes_type']
import os
from numpy import (
integer, ndarray, dtype as _dtype, array, frombuffer
)
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
"""
Dummy object that raises an ImportError if ctypes is not available.
Raises
------
ImportError
If ctypes is not available.
"""
raise ImportError("ctypes is not available.")
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
_ndptr_base = object
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
_ndptr_base = ctypes.c_void_p
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
"""
It is possible to load a library using
>>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
But there are cross-platform considerations, such as library file extensions,
plus the fact Windows will just load the first library it finds with that name.
NumPy supplies the load_library function as a convenience.
Parameters
----------
libname : str
Name of the library, which can have 'lib' as a prefix,
but without an extension.
loader_path : str
Where the library can be found.
Returns
-------
ctypes.cdll[libpath] : library object
A ctypes library object
Raises
------
OSError
If there is no library with the expected extension, or the
library is defective and cannot be loaded.
"""
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work "
"with ctypes < 1.0.1", stacklevel=2)
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
from numpy.distutils.misc_util import get_shared_lib_extension
so_ext = get_shared_lib_extension()
libname_ext = [libname + so_ext]
# mac, windows and linux >= py3.2 shared library and loadable
# module have different extensions so try both
so_ext2 = get_shared_lib_extension(is_python_ext=True)
if not so_ext2 == so_ext:
libname_ext.insert(0, libname + so_ext2)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
libpath = os.path.join(libdir, ln)
if os.path.exists(libpath):
try:
return ctypes.cdll[libpath]
except OSError:
## defective lib file
raise
## if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'UPDATEIFCOPY', 'WRITEBACKIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(_ndptr_base):
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError("argument must be an ndarray")
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError("array must have data type %s" % cls._dtype_)
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError("array must have %d dimension(s)" % cls._ndim_)
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError("array must have shape %s" % str(cls._shape_))
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError("array must have flags %s" %
_flags_fromnum(cls._flags_))
return obj.ctypes
class _concrete_ndptr(_ndptr):
"""
Like _ndptr, but with `_shape_` and `_dtype_` specified.
Notably, this means the pointer has enough information to reconstruct
the array, which is not generally true.
"""
def _check_retval_(self):
"""
This method is called when this class is used as the .restype
attribute for a shared-library function, to automatically wrap the
pointer into an array.
"""
return self.contents
@property
def contents(self):
"""
Get an ndarray viewing the data pointed to by this pointer.
This mirrors the `contents` attribute of a normal ctypes pointer
"""
full_dtype = _dtype((self._dtype_, self._shape_))
full_ctype = ctypes.c_char * full_dtype.itemsize
buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- WRITEBACKIFCOPY / X
- UPDATEIFCOPY / U
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
... #doctest: +SKIP
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
... #doctest: +SKIP
"""
# normalize dtype to an Optional[dtype]
if dtype is not None:
dtype = _dtype(dtype)
# normalize flags to an Optional[int]
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except Exception as e:
raise TypeError("invalid flags specification") from e
num = _num_fromflags(flags)
# normalize shape to an Optional[tuple]
if shape is not None:
try:
shape = tuple(shape)
except TypeError:
# single integer -> 1-tuple
shape = (shape,)
cache_key = (dtype, ndim, shape, num)
try:
return _pointer_type_cache[cache_key]
except KeyError:
pass
# produce a name for the new type
if dtype is None:
name = 'any'
elif dtype.names is not None:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
name += "_"+"x".join(str(x) for x in shape)
if flags is not None:
name += "_"+"_".join(flags)
if dtype is not None and shape is not None:
base = _concrete_ndptr
else:
base = _ndptr
klass = type("ndpointer_%s"%name, (base,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[cache_key] = klass
return klass
if ctypes is not None:
def _ctype_ndarray(element_type, shape):
""" Create an ndarray of the given element type and shape """
for dim in shape[::-1]:
element_type = dim * element_type
# prevent the type name include np.ctypeslib
element_type.__module__ = None
return element_type
def _get_scalar_type_map():
"""
Return a dictionary mapping native endian scalar dtype to ctypes types
"""
ct = ctypes
simple_types = [
ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
ct.c_float, ct.c_double,
ct.c_bool,
]
return {_dtype(ctype): ctype for ctype in simple_types}
_scalar_type_map = _get_scalar_type_map()
def _ctype_from_dtype_scalar(dtype):
# swapping twice ensure that `=` is promoted to <, >, or |
dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
dtype_native = dtype.newbyteorder('=')
try:
ctype = _scalar_type_map[dtype_native]
except KeyError as e:
raise NotImplementedError(
"Converting {!r} to a ctypes type".format(dtype)
) from None
if dtype_with_endian.byteorder == '>':
ctype = ctype.__ctype_be__
elif dtype_with_endian.byteorder == '<':
ctype = ctype.__ctype_le__
return ctype
def _ctype_from_dtype_subarray(dtype):
element_dtype, shape = dtype.subdtype
ctype = _ctype_from_dtype(element_dtype)
return _ctype_ndarray(ctype, shape)
def _ctype_from_dtype_structured(dtype):
# extract offsets of each field
field_data = []
for name in dtype.names:
field_dtype, offset = dtype.fields[name][:2]
field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
# ctypes doesn't care about field order
field_data = sorted(field_data, key=lambda f: f[0])
if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data):
# union, if multiple fields all at address 0
size = 0
_fields_ = []
for offset, name, ctype in field_data:
_fields_.append((name, ctype))
size = max(size, ctypes.sizeof(ctype))
# pad to the right size
if dtype.itemsize != size:
_fields_.append(('', ctypes.c_char * dtype.itemsize))
# we inserted manual padding, so always `_pack_`
return type('union', (ctypes.Union,), dict(
_fields_=_fields_,
_pack_=1,
__module__=None,
))
else:
last_offset = 0
_fields_ = []
for offset, name, ctype in field_data:
padding = offset - last_offset
if padding < 0:
raise NotImplementedError("Overlapping fields")
if padding > 0:
_fields_.append(('', ctypes.c_char * padding))
_fields_.append((name, ctype))
last_offset = offset + ctypes.sizeof(ctype)
padding = dtype.itemsize - last_offset
if padding > 0:
_fields_.append(('', ctypes.c_char * padding))
# we inserted manual padding, so always `_pack_`
return type('struct', (ctypes.Structure,), dict(
_fields_=_fields_,
_pack_=1,
__module__=None,
))
def _ctype_from_dtype(dtype):
if dtype.fields is not None:
return _ctype_from_dtype_structured(dtype)
elif dtype.subdtype is not None:
return _ctype_from_dtype_subarray(dtype)
else:
return _ctype_from_dtype_scalar(dtype)
def as_ctypes_type(dtype):
r"""
Convert a dtype into a ctypes type.
Parameters
----------
dtype : dtype
The dtype to convert
Returns
-------
ctype
A ctype scalar, union, array, or struct
Raises
------
NotImplementedError
If the conversion is not possible
Notes
-----
This function does not losslessly round-trip in either direction.
``np.dtype(as_ctypes_type(dt))`` will:
- insert padding fields
- reorder fields to be sorted by offset
- discard field titles
``as_ctypes_type(np.dtype(ctype))`` will:
- discard the class names of `ctypes.Structure`\ s and
`ctypes.Union`\ s
- convert single-element `ctypes.Union`\ s into single-element
`ctypes.Structure`\ s
- insert padding fields
"""
return _ctype_from_dtype(_dtype(dtype))
def as_array(obj, shape=None):
"""
Create a numpy array from a ctypes array or POINTER.
The numpy array shares the memory with the ctypes object.
The shape parameter must be given if converting from a ctypes POINTER.
The shape parameter is ignored if converting from a ctypes array
"""
if isinstance(obj, ctypes._Pointer):
# convert pointers to an array of the desired shape
if shape is None:
raise TypeError(
'as_array() requires a shape argument when called on a '
'pointer')
p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))
obj = ctypes.cast(obj, p_arr_type).contents
return array(obj, copy=False)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
# can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
# dtype.itemsize (gh-14214)
ctype_scalar = as_ctypes_type(ai["typestr"])
result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
result = result_type.from_address(addr)
result.__keep = obj
return result
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from django.core.urlresolvers import reverse
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from desktop.lib.django_util import format_preserving_redirect
from desktop.lib.i18n import smart_str
from desktop.lib.parameterization import substitute_variables
from filebrowser.views import location_to_url
from beeswax import hive_site
from beeswax.conf import HIVE_SERVER_HOST, HIVE_SERVER_PORT, BROWSE_PARTITIONED_TABLE_LIMIT, SERVER_CONN_TIMEOUT, get_auth_username, get_auth_password, \
APPLY_NATURAL_SORT_MAX
from beeswax.common import apply_natural_sort
from beeswax.design import hql_query
from beeswax.hive_site import hiveserver2_use_ssl
from beeswax.models import QueryHistory, QUERY_TYPES
LOG = logging.getLogger(__name__)
DBMS_CACHE = {}
DBMS_CACHE_LOCK = threading.Lock()
def get(user, query_server=None):
global DBMS_CACHE
global DBMS_CACHE_LOCK
# Avoid circular dependency
from beeswax.server.hive_server2_lib import HiveServerClientCompatible, HiveServerClient
if query_server is None:
query_server = get_query_server_config()
DBMS_CACHE_LOCK.acquire()
try:
DBMS_CACHE.setdefault(user.username, {})
if query_server['server_name'] not in DBMS_CACHE[user.username]:
DBMS_CACHE[user.username][query_server['server_name']] = HiveServer2Dbms(HiveServerClientCompatible(HiveServerClient(query_server, user)), QueryHistory.SERVER_TYPE[1][0])
return DBMS_CACHE[user.username][query_server['server_name']]
finally:
DBMS_CACHE_LOCK.release()
def get_query_server_config(name='beeswax', server=None):
if name == 'impala':
from impala.conf import SERVER_HOST as IMPALA_SERVER_HOST, SERVER_PORT as IMPALA_SERVER_PORT, \
IMPALA_PRINCIPAL, IMPERSONATION_ENABLED, QUERYCACHE_ROWS, QUERY_TIMEOUT_S, get_auth_username as get_impala_auth_username, get_auth_password as get_impala_auth_password
query_server = {
'server_name': 'impala',
'server_host': IMPALA_SERVER_HOST.get(),
'server_port': IMPALA_SERVER_PORT.get(),
'principal': IMPALA_PRINCIPAL.get(),
'impersonation_enabled': IMPERSONATION_ENABLED.get(),
'querycache_rows': QUERYCACHE_ROWS.get(),
'QUERY_TIMEOUT_S': QUERY_TIMEOUT_S.get(),
'auth_username': get_impala_auth_username(),
'auth_password': get_impala_auth_password()
}
else:
kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
query_server = {
'server_name': 'beeswax', # Aka HiveServer2 now
'server_host': HIVE_SERVER_HOST.get(),
'server_port': HIVE_SERVER_PORT.get(),
'principal': kerberos_principal,
'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
'protocol': 'https' if hiveserver2_use_ssl() else 'http',
'host': HIVE_SERVER_HOST.get(),
'port': hive_site.hiveserver2_thrift_http_port(),
'end_point': hive_site.hiveserver2_thrift_http_path()
},
'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
'auth_username': get_auth_username(),
'auth_password': get_auth_password()
}
debug_query_server = query_server.copy()
debug_query_server['auth_password_used'] = bool(debug_query_server.pop('auth_password'))
LOG.debug("Query Server: %s" % debug_query_server)
return query_server
class QueryServerException(Exception):
# Ideally the query handle will be stored here too.
def __init__(self, e, message=''):
super(QueryServerException, self).__init__(e)
self.message = message
class QueryServerTimeoutException(Exception):
def __init__(self, message=''):
super(QueryServerTimeoutException, self).__init__(message)
self.message = message
class NoSuchObjectException: pass
class HiveServer2Dbms(object):
def __init__(self, client, server_type):
self.client = client
self.server_type = server_type
self.server_name = self.client.query_server['server_name']
@classmethod
def to_matching_wildcard(cls, identifier=None):
cleaned = "*"
if identifier and identifier.strip() != "*":
cleaned = "*%s*" % identifier.strip().strip("*")
return cleaned
def get_databases(self, database_names='*'):
identifier = self.to_matching_wildcard(database_names)
hql = "SHOW DATABASES LIKE '%s'" % (identifier) # self.client.get_databases() is too slow
query = hql_query(hql)
timeout = SERVER_CONN_TIMEOUT.get()
handle = self.execute_and_wait(query, timeout_sec=timeout)
if handle:
result = self.fetch(handle, rows=5000)
self.close(handle)
databases = [name for database in result.rows() for name in database]
if len(databases) <= APPLY_NATURAL_SORT_MAX.get():
databases = apply_natural_sort(databases)
return databases
else:
return []
def get_database(self, database):
return self.client.get_database(database)
def get_tables_meta(self, database='default', table_names='*'):
identifier = self.to_matching_wildcard(table_names)
tables = self.client.get_tables_meta(database, identifier)
if len(tables) <= APPLY_NATURAL_SORT_MAX.get():
tables = apply_natural_sort(tables, key='name')
return tables
def get_tables(self, database='default', table_names='*'):
identifier = self.to_matching_wildcard(table_names)
hql = "SHOW TABLES IN `%s` '%s'" % (database, identifier) # self.client.get_tables(database, table_names) is too slow
query = hql_query(hql)
timeout = SERVER_CONN_TIMEOUT.get()
handle = self.execute_and_wait(query, timeout_sec=timeout)
if handle:
result = self.fetch(handle, rows=5000)
self.close(handle)
tables = [name for table in result.rows() for name in table]
if len(tables) <= APPLY_NATURAL_SORT_MAX.get():
tables = apply_natural_sort(tables)
return tables
else:
return []
def get_table(self, database, table_name):
return self.client.get_table(database, table_name)
def get_column(self, database, table_name, column_name):
table = self.client.get_table(database, table_name)
for col in table.cols:
if col.name == column_name:
return col
return None
def execute_query(self, query, design):
return self.execute_and_watch(query, design=design)
def select_star_from(self, database, table):
hql = "SELECT * FROM `%s`.`%s` %s" % (database, table.name, self._get_browse_limit_clause(table))
return self.execute_statement(hql)
def execute_statement(self, hql):
if self.server_name == 'impala':
query = hql_query(hql, QUERY_TYPES[1])
else:
query = hql_query(hql, QUERY_TYPES[0])
return self.execute_and_watch(query)
def fetch(self, query_handle, start_over=False, rows=None):
no_start_over_support = [config_variable for config_variable in self.get_default_configuration(False)
if config_variable.key == 'support_start_over'
and config_variable.value == 'false']
if no_start_over_support:
start_over = False
return self.client.fetch(query_handle, start_over, rows)
def close_operation(self, query_handle):
return self.client.close_operation(query_handle)
def open_session(self, user):
return self.client.open_session(user)
def close_session(self, session):
return self.client.close_session(session)
def cancel_operation(self, query_handle):
resp = self.client.cancel_operation(query_handle)
if self.client.query_server['server_name'] == 'impala':
resp = self.client.close_operation(query_handle)
return resp
def get_sample(self, database, table):
"""No samples if it's a view (HUE-526)"""
if not table.is_view:
limit = min(100, BROWSE_PARTITIONED_TABLE_LIMIT.get())
partition_query = ""
if table.partition_keys:
partitions = self.get_partitions(database, table, partition_spec=None, max_parts=1)
partition_query = 'WHERE ' + ' AND '.join(["%s='%s'" % (table.partition_keys[idx].name, key) for idx, key in enumerate(partitions[0].values)])
hql = "SELECT * FROM `%s`.`%s` %s LIMIT %s" % (database, table.name, partition_query, limit)
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
return result
def analyze_table(self, database, table):
if self.server_name == 'impala':
hql = 'COMPUTE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
else:
hql = 'ANALYZE TABLE `%(database)s`.`%(table)s` COMPUTE STATISTICS' % {'database': database, 'table': table}
return self.execute_statement(hql)
def analyze_table_columns(self, database, table):
if self.server_name == 'impala':
hql = 'COMPUTE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
else:
hql = 'ANALYZE TABLE `%(database)s`.`%(table)s` COMPUTE STATISTICS FOR COLUMNS' % {'database': database, 'table': table}
return self.execute_statement(hql)
def get_table_stats(self, database, table):
stats = []
if self.server_name == 'impala':
hql = 'SHOW TABLE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
stats = list(result.rows())
else:
table = self.get_table(database, table)
stats = table.stats
return stats
def get_table_columns_stats(self, database, table, column):
if self.server_name == 'impala':
hql = 'SHOW COLUMN STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
else:
hql = 'DESCRIBE FORMATTED `%(database)s`.`%(table)s` %(column)s' % {'database': database, 'table': table, 'column': column}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
data = list(result.rows())
if self.server_name == 'impala':
data = [col for col in data if col[0] == column][0]
return [
{'col_name': data[0]},
{'data_type': data[1]},
{'distinct_count': data[2]},
{'num_nulls': data[3]},
{'max_col_len': data[4]},
{'avg_col_len': data[5]},
]
else:
return [
{'col_name': data[2][0]},
{'data_type': data[2][1]},
{'min': data[2][2]},
{'max': data[2][3]},
{'num_nulls': data[2][4]},
{'distinct_count': data[2][5]},
{'avg_col_len': data[2][6]},
{'max_col_len': data[2][7]},
{'num_trues': data[2][8]},
{'num_falses': data[2][9]}
]
else:
return []
def get_top_terms(self, database, table, column, limit=30, prefix=None):
limit = min(limit, 100)
prefix_match = ''
if prefix:
prefix_match = "WHERE CAST(%(column)s AS STRING) LIKE '%(prefix)s%%'" % {'column': column, 'prefix': prefix}
hql = 'SELECT %(column)s, COUNT(*) AS ct FROM `%(database)s`.`%(table)s` %(prefix_match)s GROUP BY %(column)s ORDER BY ct DESC LIMIT %(limit)s' % {
'database': database, 'table': table, 'column': column, 'prefix_match': prefix_match, 'limit': limit,
}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=60.0) # Hive is very slow
if handle:
result = self.fetch(handle, rows=limit)
self.close(handle)
return list(result.rows())
else:
return []
def drop_table(self, database, table):
if table.is_view:
hql = "DROP VIEW `%s`.`%s`" % (database, table.name,)
else:
hql = "DROP TABLE `%s`.`%s`" % (database, table.name,)
return self.execute_statement(hql)
def load_data(self, database, table, form, design):
hql = "LOAD DATA INPATH"
hql += " '%s'" % form.cleaned_data['path']
if form.cleaned_data['overwrite']:
hql += " OVERWRITE"
hql += " INTO TABLE "
hql += "`%s`.`%s`" % (database, table.name,)
if form.partition_columns:
hql += " PARTITION ("
vals = []
for key, column_name in form.partition_columns.iteritems():
vals.append("%s='%s'" % (column_name, form.cleaned_data[key]))
hql += ", ".join(vals)
hql += ")"
query = hql_query(hql, database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def drop_tables(self, database, tables, design):
hql = []
for table in tables:
if table.is_view:
hql.append("DROP VIEW `%s`.`%s`" % (database, table.name,))
else:
hql.append("DROP TABLE `%s`.`%s`" % (database, table.name,))
query = hql_query(';'.join(hql), database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def invalidate_tables(self, database, tables):
handle = None
for table in tables:
try:
hql = "INVALIDATE METADATA `%s`.`%s`" % (database, table,)
query = hql_query(hql, database, query_type=QUERY_TYPES[1])
handle = self.execute_and_wait(query, timeout_sec=10.0)
except Exception, e:
LOG.warn('Refresh tables cache out of sync: %s' % smart_str(e))
finally:
if handle:
self.close(handle)
def drop_database(self, database):
return self.execute_statement("DROP DATABASE `%s`" % database)
def drop_databases(self, databases, design):
hql = []
for database in databases:
hql.append("DROP DATABASE `%s`" % database)
query = hql_query(';'.join(hql), database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def _get_and_validate_select_query(self, design, query_history):
query = design.get_query_statement(query_history.statement_number)
if not query.strip().lower().startswith('select'):
raise Exception(_('Only SELECT statements can be saved. Provided query: %(query)s') % {'query': query})
return query
def insert_query_into_directory(self, query_history, target_dir):
design = query_history.design.get_design()
database = design.query['database']
self.use(database)
query = self._get_and_validate_select_query(design, query_history)
hql = "INSERT OVERWRITE DIRECTORY '%s' %s" % (target_dir, query)
return self.execute_statement(hql)
def create_table_as_a_select(self, request, query_history, target_database, target_table, result_meta):
design = query_history.design.get_design()
database = design.query['database']
# Case 1: Hive Server 2 backend or results straight from an existing table
if result_meta.in_tablename:
self.use(database)
query = self._get_and_validate_select_query(design, query_history)
hql = 'CREATE TABLE `%s`.`%s` AS %s' % (target_database, target_table, query)
query_history = self.execute_statement(hql)
else:
# FYI: this path is dead since moving to HiveServer2
#
# Case 2: The results are in some temporary location
# Beeswax backward compatibility and optimization
# 1. Create table
cols = ''
schema = result_meta.schema
for i, field in enumerate(schema.fieldSchemas):
if i != 0:
cols += ',\n'
cols += '`%s` %s' % (field.name, field.type)
# The representation of the delimiter is messy.
# It came from Java as a string, which might has been converted from an integer.
# So it could be "1" (^A), or "10" (\n), or "," (a comma literally).
delim = result_meta.delim
if not delim.isdigit():
delim = str(ord(delim))
hql = '''
CREATE TABLE `%s` (
%s
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\%s'
STORED AS TextFile
''' % (target_table, cols, delim.zfill(3))
query = hql_query(hql)
self.execute_and_wait(query)
try:
# 2. Move the results into the table's storage
table_obj = self.get_table('default', target_table)
table_loc = request.fs.urlsplit(table_obj.path_location)[2]
result_dir = request.fs.urlsplit(result_meta.table_dir)[2]
request.fs.rename_star(result_dir, table_loc)
LOG.debug("Moved results from %s to %s" % (result_meta.table_dir, table_loc))
request.info(request, _('Saved query results as new table %(table)s.') % {'table': target_table})
query_history.save_state(QueryHistory.STATE.expired)
except Exception, ex:
query = hql_query('DROP TABLE `%s`' % target_table)
try:
self.execute_and_wait(query)
except Exception, double_trouble:
LOG.exception('Failed to drop table "%s" as well: %s' % (target_table, double_trouble))
raise ex
url = format_preserving_redirect(request, reverse('metastore:index'))
return query_history
def use(self, database):
query = hql_query('USE `%s`' % database)
return self.client.use(query)
def get_log(self, query_handle, start_over=True):
return self.client.get_log(query_handle, start_over)
def get_state(self, handle):
return self.client.get_state(handle)
def get_operation_status(self, handle):
return self.client.get_operation_status(handle)
def execute_and_wait(self, query, timeout_sec=30.0, sleep_interval=0.5):
"""
Run query and check status until it finishes or timeouts.
Check status until it finishes or timeouts.
"""
handle = self.client.query(query)
curr = time.time()
end = curr + timeout_sec
while curr <= end:
state = self.client.get_state(handle)
if state not in (QueryHistory.STATE.running, QueryHistory.STATE.submitted):
return handle
time.sleep(sleep_interval)
curr = time.time()
# Query timed out, so attempt to cancel operation and raise exception
msg = "The query timed out after %(timeout)d seconds, canceled query." % {'timeout': timeout_sec}
LOG.warning(msg)
try:
self.cancel_operation(handle)
except Exception, e:
msg = "Failed to cancel query."
LOG.warning(msg)
self.close_operation(handle)
raise QueryServerException(e, message=msg)
raise QueryServerTimeoutException(message=msg)
def execute_next_statement(self, query_history, hql_query):
if query_history.is_success() or query_history.is_expired():
# We need to go to the next statement only if the previous one passed
query_history.statement_number += 1
else:
# We need to update the query in case it was fixed
query_history.refresh_design(hql_query)
query_history.last_state = QueryHistory.STATE.submitted.index
query_history.save()
query = query_history.design.get_design()
# In case of multiquery, we need to re-replace the parameters as we save the non substituted query
if query._data_dict['query']['is_parameterized']:
real_query = substitute_variables(query._data_dict['query']['query'], query_history.get_extra('parameters'))
query._data_dict['query']['query'] = real_query
return self.execute_and_watch(query, query_history=query_history)
def execute_and_watch(self, query, design=None, query_history=None):
"""
Run query and return a QueryHistory object in order to see its progress on a Web page.
"""
hql_query = query.hql_query
if query_history is None:
query_history = QueryHistory.build(
owner=self.client.user,
query=hql_query,
server_host='%(server_host)s' % self.client.query_server,
server_port='%(server_port)d' % self.client.query_server,
server_name='%(server_name)s' % self.client.query_server,
server_type=self.server_type,
last_state=QueryHistory.STATE.submitted.index,
design=design,
notify=query.query.get('email_notify', False),
query_type=query.query['type'],
statement_number=0
)
query_history.save()
LOG.debug("Made new QueryHistory id %s user %s query: %s..." % (query_history.id, self.client.user, query_history.query[:25]))
try:
handle = self.client.query(query, query_history.statement_number)
if not handle.is_valid():
msg = _("Server returning invalid handle for query id %(id)d [%(query)s]...") % {'id': query_history.id, 'query': query[:40]}
raise QueryServerException(msg)
except QueryServerException, ex:
LOG.exception(ex)
# Kind of expected (hql compile/syntax error, etc.)
if hasattr(ex, 'handle') and ex.handle:
query_history.server_id, query_history.server_guid = ex.handle.id, ex.handle.id
query_history.log_context = ex.handle.log_context
query_history.save_state(QueryHistory.STATE.failed)
raise ex
# All good
query_history.server_id, query_history.server_guid = handle.get()
query_history.operation_type = handle.operation_type
query_history.has_results = handle.has_result_set
query_history.modified_row_count = handle.modified_row_count
query_history.log_context = handle.log_context
query_history.query_type = query.query['type']
query_history.set_to_running()
query_history.save()
LOG.debug("Updated QueryHistory id %s user %s statement_number: %s" % (query_history.id, self.client.user, query_history.statement_number))
return query_history
def get_results_metadata(self, handle):
return self.client.get_results_metadata(handle)
def close(self, handle):
return self.client.close(handle)
def get_partitions(self, db_name, table, partition_spec=None, max_parts=None, reverse_sort=True):
if max_parts is None or max_parts > BROWSE_PARTITIONED_TABLE_LIMIT.get():
max_parts = BROWSE_PARTITIONED_TABLE_LIMIT.get()
return self.client.get_partitions(db_name, table.name, partition_spec, max_parts, reverse_sort)
def get_partition(self, db_name, table_name, partition_spec):
table = self.get_table(db_name, table_name)
partitions = self.get_partitions(db_name, table, partition_spec=partition_spec, max_parts=None)
if len(partitions) != 1:
raise NoSuchObjectException(_("Query did not return exactly one partition result"))
partition = partitions[0]
partition_query = " AND ".join(partition.partition_spec.split(','))
hql = "SELECT * FROM `%s`.`%s` WHERE %s" % (db_name, table_name, partition_query)
return self.execute_statement(hql)
def describe_partition(self, db_name, table_name, partition_spec):
return self.client.get_table(db_name, table_name, partition_spec=partition_spec)
def explain(self, query):
return self.client.explain(query)
def getStatus(self):
return self.client.getStatus()
def get_default_configuration(self, include_hadoop):
return self.client.get_default_configuration(include_hadoop)
def _get_browse_limit_clause(self, table):
"""Get the limit clause when browsing a partitioned table"""
if table.partition_keys:
limit = BROWSE_PARTITIONED_TABLE_LIMIT.get()
if limit > 0:
return "LIMIT %d" % (limit,)
return ""
class Table:
"""
Represents the metadata of a Hive Table.
"""
@property
def hdfs_link(self):
return location_to_url(self.path_location)
class DataTable:
"""
Represents the data of a Hive Table.
If the dataset has more rows, a new fetch should be done in order to return a new data table with the next rows.
"""
pass
# TODO decorator?
def expand_exception(exc, db, handle=None):
try:
if handle is not None:
log = db.get_log(handle)
elif hasattr(exc, 'get_rpc_handle') or hasattr(exc, 'log_context'):
log = db.get_log(exc)
else:
log = ''
except Exception, e:
# Always show something, even if server has died on the job.
log = _("Could not retrieve logs: %s." % e)
if not exc.args or not exc.args[0]:
error_message = _("Unknown exception.")
else:
error_message = force_unicode(exc.args[0], strings_only=True, errors='replace')
return error_message, log
|
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import logging
import asyncio
import sys
import os
import json
import pytest
from datetime import timedelta
from msrest.serialization import UTC
from urllib.parse import urlparse
import datetime as dt
from devtools_testutils import AzureRecordedTestCase
from devtools_testutils.aio import recorded_by_proxy_async
from azure.core.credentials import AzureKeyCredential, AzureSasCredential
from azure.core.messaging import CloudEvent
from azure.core.serialization import NULL
from azure.eventgrid import EventGridEvent, generate_sas
from azure.eventgrid.aio import EventGridPublisherClient
from azure.eventgrid._helpers import _cloud_event_to_generated
from eventgrid_preparer import (
EventGridPreparer
)
class TestEventGridPublisherClient(AzureRecordedTestCase):
def create_eg_publisher_client(self, endpoint):
credential = self.get_credential(EventGridPublisherClient, is_async=True)
client = self.create_client_from_credential(EventGridPublisherClient, credential=credential, endpoint=endpoint)
return client
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_event_grid_event_data_dict(self, variables, eventgrid_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_topic_endpoint)
eg_event = EventGridEvent(
subject="sample",
data={"sample": "eventgridevent"},
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
await client.send(eg_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_event_grid_event_data_as_list(self, variables, eventgrid_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_topic_endpoint)
eg_event1 = EventGridEvent(
subject="sample",
data="eventgridevent",
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
eg_event2 = EventGridEvent(
subject="sample2",
data="eventgridevent2",
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
await client.send([eg_event1, eg_event2])
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_event_grid_event_fails_without_full_url(self, variables, eventgrid_topic_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_key)
parsed_url = urlparse(eventgrid_topic_endpoint)
client = EventGridPublisherClient(parsed_url.netloc, akc_credential)
eg_event = EventGridEvent(
subject="sample",
data={"sample": "eventgridevent"},
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
with pytest.raises(ValueError):
await client.send(eg_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_event_grid_event_data_str(self, variables, eventgrid_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_topic_endpoint)
eg_event = EventGridEvent(
subject="sample",
data="eventgridevent",
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
await client.send(eg_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_event_grid_event_data_bytes(self, variables, eventgrid_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_topic_endpoint)
eg_event = EventGridEvent(
subject="sample",
data=b"eventgridevent",
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
with pytest.raises(TypeError, match="Data in EventGridEvent cannot be bytes*"):
await client.send(eg_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_event_grid_event_dict_data_bytes(self, variables, eventgrid_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_topic_endpoint)
eg_event = {
"subject":"sample",
"data":b"eventgridevent",
"eventType":"Sample.EventGrid.Event",
"dataVersion":"2.0",
"id": "123-ddf-133-324255ffd",
"eventTime": dt.datetime.utcnow()
}
with pytest.raises(TypeError, match="Data in EventGridEvent cannot be bytes*"):
await client.send(eg_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_dict(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = {"sample": "cloudevent"},
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_str(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = "cloudevent",
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_bytes(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = b"cloudevent",
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_as_list(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = "cloudevent",
type="Sample.Cloud.Event"
)
await client.send([cloud_event])
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_with_extensions(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = "cloudevent",
type="Sample.Cloud.Event",
extensions={
'reasoncode':204,
'extension':'hello'
}
)
await client.send([cloud_event])
internal = _cloud_event_to_generated(cloud_event).serialize()
assert 'reasoncode' in internal
assert 'extension' in internal
assert internal['reasoncode'] == 204
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_dict(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
cloud_event1 = {
"id": "1234",
"source": "http://samplesource.dev",
"specversion": "1.0",
"data": "cloudevent",
"type": "Sample.Cloud.Event"
}
await client.send(cloud_event1)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_none(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = None,
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@pytest.mark.skip("https://github.com/Azure/azure-sdk-for-python/issues/16993")
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_NULL(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = NULL,
type="Sample.Cloud.Event"
)
def callback(request):
req = json.loads(request.http_request.body)
assert req[0].get("data") is None
await client.send(cloud_event, raw_request_hook=callback)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_signature_credential(self, variables, eventgrid_topic_key, eventgrid_topic_endpoint):
expiration_date_utc = dt.datetime.now(UTC()) + timedelta(hours=1)
signature = generate_sas(eventgrid_topic_endpoint, eventgrid_topic_key, expiration_date_utc)
credential = AzureSasCredential(signature)
client = EventGridPublisherClient(eventgrid_topic_endpoint, credential)
eg_event = EventGridEvent(
subject="sample",
data={"sample": "eventgridevent"},
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
await client.send(eg_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_custom_schema_event(self, variables, eventgrid_custom_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_custom_event_topic_endpoint)
custom_event = {
"customSubject": "sample",
"customEventType": "sample.event",
"customDataVersion": "2.0",
"customId": "1234",
"customEventTime": dt.datetime.now(UTC()).isoformat(),
"customData": "sample data"
}
await client.send(custom_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_custom_schema_event_as_list(self, variables, eventgrid_custom_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_custom_event_topic_endpoint)
custom_event1 = {
"customSubject": "sample",
"customEventType": "sample.event",
"customDataVersion": "2.0",
"customId": "1234",
"customEventTime": dt.datetime.now(UTC()).isoformat(),
"customData": "sample data"
}
custom_event2 = {
"customSubject": "sample2",
"customEventType": "sample.event",
"customDataVersion": "2.0",
"customId": "12345",
"customEventTime": dt.datetime.now(UTC()).isoformat(),
"customData": "sample data 2"
}
await client.send([custom_event1, custom_event2])
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_and_close_async_session(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
async with client: # this throws if client can't close
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = "cloudevent",
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@pytest.mark.skip()
@EventGridPreparer()
@recorded_by_proxy_async
def test_send_NONE_credential_async(self, variables, eventgrid_topic_endpoint):
with pytest.raises(ValueError, match="Parameter 'self._credential' must not be None."):
client = EventGridPublisherClient(eventgrid_topic_endpoint, None)
@pytest.mark.live_test_only
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_token_credential(self, variables, eventgrid_topic_endpoint):
credential = self.get_credential(EventGridPublisherClient)
client = EventGridPublisherClient(eventgrid_topic_endpoint, credential)
eg_event = EventGridEvent(
subject="sample",
data={"sample": "eventgridevent"},
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
await client.send(eg_event)
|
|
"""
Python functions to facilitate interacting with the dataset from Koumura and
Okanoya 2016 [1].
The original code was released under the GNU license:
https://github.com/cycentum/birdsong-recognition/blob/master/
birdsong-recognition/src/computation/ViterbiSequencer.java
Note that the Python implementations are not based directly on the Java code but
they have been tested to see whether they produce the same results.
data: https://figshare.com/articles/BirdsongRecognition/3470165
[1] Koumura T, Okanoya K (2016) Automatic Recognition of Element Classes and
Boundaries in the Birdsong with Variable Sequences. PLoS ONE 11(7): e0159188.
doi:10.1371/journal.pone.0159188
"""
import os
import glob
import xml.etree.ElementTree as ET
import numpy as np
class Syllable:
"""
Object that represents a syllable.
Properties
----------
position : int
starting sample number ("frame") within .wav file
*** relative to start of sequence! ***
length : int
duration given as number of samples
label : char
text representation of syllable as classified by a human
or a machine learning algorithm
"""
def __init__(self, position, length, label):
self.position = position
self.length = length
self.label = label
def __repr__(self):
rep_str = "Syllable labeled {} at position {} with length {}".format(
self.label, self.position, self.length
)
return rep_str
class Sequence:
"""
Object that represents a sequence of syllables.
Properties
----------
wavFile : string
file name of .wav file in which sequence occurs
position : int
starting sample number within .wav file
length : int
duration given as number of samples
syls : list
list of syllable objects that make up sequence
seqSpect : spectrogram object
"""
def __init__(self, wav_file, position, length, syl_list):
self.wavFile = wav_file
self.position = position
self.length = length
self.numSyls = len(syl_list)
self.syls = syl_list
self.seqSpect = None
def __repr__(self):
rep_str = "Sequence from {} with position {} and length {}".format(
self.wavFile, self.position, self.length
)
return rep_str
def parse_xml(xml_file, concat_seqs_into_songs=False):
"""
parses Annotation.xml files.
Parameters
----------
xml_file : string
filename of .xml file, e.g. 'Annotation.xml'
concat_seqs_into_songs : Boolean
if True, concatenate sequences into songs, where each wav file is a
song. Default is False.
Returns
-------
seq_list : list of Sequence objects
"""
tree = ET.ElementTree(file=xml_file)
seq_list = []
for seq in tree.iter(tag="Sequence"):
wav_file = seq.find("WaveFileName").text
position = int(seq.find("Position").text)
length = int(seq.find("Length").text)
syl_list = []
for syl in seq.iter(tag="Note"):
syl_position = int(syl.find("Position").text)
syl_length = int(syl.find("Length").text)
label = syl.find("Label").text
syl_obj = Syllable(position=syl_position, length=syl_length, label=label)
syl_list.append(syl_obj)
seq_obj = Sequence(
wav_file=wav_file, position=position, length=length, syl_list=syl_list
)
seq_list.append(seq_obj)
if concat_seqs_into_songs:
song_list = []
curr_wavFile = seq_list[0].wavFile
new_seq_obj = seq_list[0]
for syl in new_seq_obj.syls:
syl.position += new_seq_obj.position
for seq in seq_list[1:]:
if seq.wavFile == curr_wavFile:
new_seq_obj.length += seq.length
new_seq_obj.numSyls += seq.numSyls
for syl in seq.syls:
syl.position += seq.position
new_seq_obj.syls += seq.syls
else:
song_list.append(new_seq_obj)
curr_wavFile = seq.wavFile
new_seq_obj = seq
for syl in new_seq_obj.syls:
syl.position += new_seq_obj.position
song_list.append(new_seq_obj) # to append last song
return song_list
else:
return seq_list
def load_song_annot(filename, annot_file=None):
"""load annotation for specific song from Koumura dataset
Parameters
----------
filename : str
filename of .wav file from Koumura dataset
annotation_file : str
absolute path to 'Annotation.xml' file that
contains annotation for 'songfile'.
Default is None, in which case the function
searchs for Annotation.xml in the parent directory
of songfile (if a full path is given) or the
parent of the current working directory.
Returns
-------
songfile_dict :
with keys onsets, offsets, and labels
"""
if annot_file is None:
dirname, songfile = os.path.split(filename)
if dirname == "":
annot_file = glob.glob("../Annotation.xml")
else:
annot_file = glob.glob(os.path.join(dirname, "../Annotation.xml"))
if len(annot_file) < 1:
raise ValueError(
"Can't open {}, Annotation.xml file not found in parent of current directory".format(
songfile
)
)
elif len(annot_file) > 1:
raise ValueError(
"Can't open {}, found more than one Annotation.xml file "
"in parent of current directory".format(songfile)
)
else:
annot_file = annot_file[0]
seq_list = parse_xml(annot_file, concat_seqs_into_songs=True)
wav_files = [seq.wavFile for seq in seq_list]
ind = wav_files.index(songfile)
this_seq = seq_list[ind]
onsets_Hz = np.asarray([syl.position for syl in this_seq.syls])
offsets_Hz = np.asarray([syl.position + syl.length for syl in this_seq.syls])
labels = [syl.label for syl in this_seq.syls]
annotation_dict = {
"filename": filename,
"onsets_Hz": onsets_Hz,
"offsets_Hz": offsets_Hz,
"onsets_s": None,
"offsets_s": None,
"labels": labels,
}
return annotation_dict
def determine_unique_labels(annotation_file):
"""given an annotation.xml file
from a bird in BirdsongRecognition dataset,
determine unique set of labels applied to syllables from that bird"""
annotation = parse_xml(annotation_file, concat_seqs_into_songs=True)
lbls = [syl.label for seq in annotation for syl in seq.syls]
unique_lbls = np.unique(lbls).tolist()
unique_lbls = "".join(unique_lbls) # convert from list to string
return unique_lbls
class resequencer:
"""
Computes most likely sequence of labels given observation probabilities
at each time step in sequence and a second-order transition probability
matrix taken from training data.
Uses a Viterbi-like dynamic programming algorithm. (Viterbi-like because
the observation probabilities are not strictly speaking the emission
probabilities from hidden states but instead are outputs from some machine
learning model, e.g., the softmax layer of a DCNN that assigns a probability
to each label at each time step.)
This is a Python implementation of the algorithm from Koumura Okanoya 2016.
See "compLabelSequence" in:
https://github.com/cycentum/birdsong-recognition/blob/master/
birdsong-recognition/src/computation/ViterbiSequencer.java
Parameters
----------
sequences : list of strings
Each string represents a sequence of syllables
observation_prob : ndarray
n x m x p matrix, n sequences of m estimated probabilities for p classes
transition_prob : ndarray
second-order transition matrix, n x m x p matrix where the value at
[n,m,p] is the probability of transitioning to labels[p] at time step
t given that labels[m] was observed at t-1 and labels[n] was observed
at t-2
labels : list of chars
Contains all unique labels used to label songs being resequenced
Returns
-------
resequenced : list of strings
Each string represents the sequence of syllables after resequencing. So
e.g. resequenced[0] is sequences[0] after running through the algorithm.
"""
def __init__(self, transition_probs, labels):
self.transition_probs = transition_probs
self.labels = labels
self.num_labels = len(labels)
self.destination_label_ind = range(0, self.num_labels)
# num_states calculation: +1 for 'e' state at beginning of initial states
# number of labels (now without 'e') and + 1 for the final 'tail' state
self.num_states = (self.num_labels + 1) * self.num_labels + 1
# create dict of lists used to determine 'destination' state
# given source state (key) and emitted label (index into each list)
# i..e if source state is 5, destination_state{5} will return a list as
# long as the number of labels, indexing into that list w/ e.g. index 4
# will return some state number that then becomes the destination state
# so len(destination_states.keys()) == num_states
self.destination_states = {}
dst_state_ctr = 0
for label_one in range(0, self.num_labels + 1): # +1 for 'e' state
for label_two in range(0, self.num_labels): # now without e
dest_label_one = label_two
dest_state_list = []
for dest_label_two in range(0, self.num_labels):
dest_state_list.append(
dest_label_one * self.num_labels + dest_label_two
)
self.destination_states[dst_state_ctr] = dest_state_list
dst_state_ctr += 1
# + 1 for the final tail states
dest_label_one = self.num_labels
dest_state_list = []
for dest_label_two in range(0, self.num_labels):
dest_state_list.append(dest_label_one * self.num_labels + dest_label_two)
self.destination_states[dst_state_ctr] = dest_state_list
# number of tail states = num_states because any state can transition to
# a tail state and the tail state is non-emitting
self.tail_states = list(range(0, self.num_states))
self.head_state = self.num_states - 1 # last state in list is head
# prob. of tranisitioning from head state 'e' to any state
#'e1)','e2',...'e(N-1)' where N is number of labels is equal for all
# initial states.
self.initial_transition_prob = 1.0 / self.num_labels
def resequence(self, observation_probs):
num_time_steps = observation_probs.shape[0] - 1
source_states = []
for time_step in range(num_time_steps):
source_states.append(np.zeros((self.num_states,), dtype=int))
# initial inductive step of Viterbi
current_score = np.ones((self.num_states,)) * -np.inf
# use dest_labl_id to index into observation_prob array
for dest_labl_id in self.destination_label_ind:
# need destination state to assign it a score in the
# next_score array
dest_state = self.destination_states[self.head_state][dest_labl_id]
obsv_prob = observation_probs[0, dest_labl_id] # row 0 = 1st t step
current_score[dest_state] = np.log(self.initial_transition_prob) + np.log(
obsv_prob
)
# main loop for Viterbi
for time_step in range(num_time_steps):
next_score = np.ones((self.num_states,)) * -np.inf
for source_state in range(self.num_states):
for dest_label_ind in self.destination_label_ind:
# need destination state to assign it a score in the
# next_score array
dest_state = self.destination_states[source_state][dest_label_ind]
label_one = source_state // self.num_labels # floor div
if label_one == self.num_labels or source_state == self.head_state:
trans_prob = self.initial_transition_prob
else:
label_two = source_state % self.num_labels
trans_prob = self.transition_probs[
label_one, label_two, dest_label_ind
]
ob_prob = observation_probs[time_step + 1][dest_label_ind]
tmp_next_score = (
current_score[source_state]
+ np.log(trans_prob)
+ np.log(ob_prob)
)
if tmp_next_score >= next_score[dest_state]:
next_score[dest_state] = tmp_next_score
source_states[time_step][dest_state] = source_state
tmp = current_score
current_score = next_score
next_score = tmp
# retrieve best state sequence in reverse using scores directly
current_state = -1
# initial step to get best state
for state in self.tail_states:
if (
current_state == -1
or current_score[state] > current_score[current_state]
):
current_state = state
resequenced = []
# loop through len-2 because we already figured out last element at -1
for time_step in range((len(observation_probs) - 2), -1, -1):
previous_state = source_states[time_step][current_state]
source_label = -1
possible_dest_states = self.destination_states[previous_state]
for d in range(len(possible_dest_states)):
if possible_dest_states[d] == current_state:
source_label_ind = self.destination_label_ind[d]
source_label = self.labels[source_label_ind]
break
resequenced.append(source_label)
current_state = previous_state
previous_state = self.head_state
source_label = -1
possible_dest_states = self.destination_states[previous_state]
for d in range(len(possible_dest_states)):
if possible_dest_states[d] == current_state:
source_label_ind = self.destination_label_ind[d]
source_label = self.labels[source_label_ind]
break
resequenced.append(source_label)
resequenced.reverse()
return resequenced
def get_trans_mat(seqs, smoothing_constant=1e-4):
"""
calculate second-order transition matrix given sequences of syllable labels
Parameters
----------
seqs : list of Sequence objects
smoothing_constant : float
default is 1e-4. Added to all probabilities so that none are zero.
Mathematically convenient for computing Viterbi algorithm with
exponential.
Returns
-------
labels : 1-d array of ints
set of unique labels across all Sequences.
trans_mat : 3-d array
Shape is n * n * n where n is the number of labels.
trans_mat[i,j,k] is the probability of transitioning to labels[k]
at time step t, given that label at time step t-1 was labels[k]
and the label at time step t-2 was labels[i].
"""
all_syls = [syl.label for seq in seqs for syl in seq.syls]
labels = np.unique(all_syls)
all_label_seqs = []
for seq in seqs:
all_label_seqs.append([syl.label for syl in seq.syls])
num_labels = labels.shape[0]
counts = np.zeros((num_labels, num_labels, num_labels))
for label_seq in all_label_seqs:
for ind in range(2, len(label_seq)):
k = np.where(labels == label_seq[ind])
j = np.where(labels == label_seq[ind - 1])
i = np.where(labels == label_seq[ind - 2])
counts[i, j, k] += 1
trans_mat = np.zeros(counts.shape)
for i in range(num_labels):
for j in range(num_labels):
num_ij_occurences = np.sum(counts[i, j, :])
if num_ij_occurences > 0:
for k in range(num_labels):
trans_mat[i, j, k] = counts[i, j, k] / num_ij_occurences
if smoothing_constant:
for i in range(num_labels):
for j in range(num_labels):
trans_mat[i, j, :] += smoothing_constant
trans_mat[i, j, :] /= np.sum(trans_mat[i, j, :])
return trans_mat
|
|
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Dell Storage Center Block Device Driver."""
import logging
import platform
import threading
import time
import uuid
import bitmath
import eliot
from flocker.node.agents import blockdevice
from twisted.python import filepath
from zope.interface import implementer
import dell_storagecenter_api
import iscsi_utils
LOG = logging.getLogger(__name__)
ALLOCATION_UNIT = bitmath.GiB(1).bytes
class DellStorageCenterBlockDriverLogHandler(logging.Handler):
"""Python log handler to route to Eliot logging."""
def emit(self, record):
"""Writes log message to the stream.
:param record: The record to be logged.
"""
msg = self.format(record)
eliot.Message.new(
message_type="flocker:node:agents:blockdevice:dellstoragecenter",
message_level=record.levelname,
message=msg).write()
def create_driver_instance(cluster_id, **config):
"""Instantiate a new driver instances.
Creates a new instance with parameters passed in from the config.
:param cluster_id: The container cluster ID.
:param config: The driver configuration settings.
:return: A new StorageCenterBlockDeviceAPI object.
"""
# Configure log routing to the Flocker Eliot logging
root_logger = logging.getLogger()
root_logger.addHandler(DellStorageCenterBlockDriverLogHandler())
root_logger.setLevel(logging.DEBUG)
config['cluster_id'] = cluster_id
return DellStorageCenterBlockDeviceAPI(**config)
class BlockDriverAPIException(Exception):
"""General backend API exception."""
@implementer(blockdevice.IBlockDeviceAPI)
@implementer(blockdevice.IProfiledBlockDeviceAPI)
class DellStorageCenterBlockDeviceAPI(object):
"""Block device driver for Dell Storage Center.
Implements the ``IBlockDeviceAPI`` for interacting with Storage Center
array storage.
"""
VERSION = '1.0.0'
def __init__(self, **kwargs):
"""Initialize new instance of the driver.
:param configuration: The driver configuration settings.
:param cluster_id: The cluster ID we are running on.
"""
self.cluster_id = kwargs.get('cluster_id')
self._local_compute = None
self.ssn = kwargs.get('dell_sc_ssn', 448)
self.configuration = kwargs
self._client = dell_storagecenter_api.StorageCenterApiHelper(
kwargs)
def _to_blockdevicevolume(self, scvolume, attached_to=None):
"""Converts our API volume to a ``BlockDeviceVolume``."""
dataset_id = uuid.UUID('{00000000-0000-0000-0000-000000000000}')
try:
dataset_id = uuid.UUID("{%s}" % scvolume.get('name'))
except ValueError:
pass
retval = blockdevice.BlockDeviceVolume(
blockdevice_id=scvolume.get('name'),
size=int(
float(scvolume.get('configuredSize').replace(' Bytes', ''))),
attached_to=attached_to,
dataset_id=dataset_id)
return retval
def allocation_unit(self):
"""Gets the minimum allocation unit for our backend.
The Storage Center recommended minimum is 1 GiB.
:returns: 1 GiB in bytes.
"""
return ALLOCATION_UNIT
def compute_instance_id(self):
"""Gets an identifier for this node.
This will be compared against ``BlockDeviceVolume.attached_to``
to determine which volumes are locally attached and it will be used
with ``attach_volume`` to locally attach volumes.
For Storage Center we use the node's hostname as the identifier.
:returns: A ``unicode`` object giving a provider-specific node
identifier which identifies the node where the method
is run.
"""
if not self._local_compute:
self._local_compute = unicode(platform.uname()[1])
return self._local_compute
def create_volume(self, dataset_id, size):
"""Create a new volume on the array.
:param dataset_id: The Flocker dataset ID for the volume.
:param size: The size of the new volume in bytes.
:return: A ``BlockDeviceVolume``
"""
return self.create_volume_with_profile(dataset_id, size, None)
def create_volume_with_profile(self, dataset_id, size, profile_name):
"""Create a new volume on the array.
:param dataset_id: The Flocker dataset ID for the volume.
:param size: The size of the new volume in bytes.
:param profile_name: The name of the storage profile for
this volume.
:return: A ``BlockDeviceVolume``
"""
volume_name = u"%s" % dataset_id
volume_size = self._bytes_to_gig(size)
scvolume = None
with self._client.open_connection() as api:
try:
scvolume = api.create_volume(volume_name,
volume_size,
profile_name)
except Exception:
LOG.exception('Error creating volume.')
raise
return self._to_blockdevicevolume(scvolume)
def destroy_volume(self, blockdevice_id):
"""Destroy an existing volume.
:param blockdevice_id: The volume unique ID.
"""
deleted = False
LOG.info('Destroying volume %s', blockdevice_id)
with self._client.open_connection() as api:
try:
volume = api.find_volume(blockdevice_id)
if not volume:
raise blockdevice.UnknownVolume(blockdevice_id)
deleted = api.delete_volume(blockdevice_id)
except Exception:
# TODO(smcginnis) Catch more specific exception
LOG.exception('Error destroying volume.')
raise
if not deleted:
# Something happened
raise BlockDriverAPIException('Unable to delete volume.')
def _do_rescan(self, process):
"""Performs a SCSI rescan on this host."""
rescan_thread = threading.Thread(target=iscsi_utils.rescan_iscsi)
rescan_thread.name = '%s_rescan' % process
rescan_thread.daemon = True
rescan_thread.start()
def attach_volume(self, blockdevice_id, attach_to):
"""Attach an existing volume to an initiator.
:param blockdevice_id: The unique identifier for the volume.
:param attach_to: An identifier like the one returned by the
``compute_instance_id`` method indicating the node to which to
attach the volume.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:returns: A ``BlockDeviceVolume`` with a ``attached_to`` attribute set
to ``attach_to``.
"""
LOG.info('Attaching %s to %s', blockdevice_id, attach_to)
# Functional tests expect a failure if it's already
# attached, even if we're being asked to attach to
# the same host.
# not_local = attach_to != self.compute_instance_id()
not_local = True
with self._client.open_connection() as api:
# Check that we have that volume
scvolume = api.find_volume(blockdevice_id)
if not scvolume:
raise blockdevice.UnknownVolume(blockdevice_id)
# Make sure we have a server defined for this host
iqn = iscsi_utils.get_initiator_name()
host = api.find_server(iqn)
LOG.info("Search for server returned: %s", host)
if not host:
# Try to create a new host
host = api.create_server(attach_to, iqn)
LOG.info("Created server %s", host)
# Make sure the server is logged in to the array
ports = api.get_iscsi_ports()
for port in ports:
iscsi_utils.iscsi_login(port[0], port[1])
# Make sure we were able to find something
if not host:
raise BlockDriverAPIException()
# First check if we are already mapped
mappings = api.find_mapping_profiles(scvolume)
if mappings:
# See if it is to this server
if not_local:
raise blockdevice.AlreadyAttachedVolume(blockdevice_id)
for mapping in mappings:
if (mapping['server']['instanceName'] !=
host['instanceName']):
raise blockdevice.AlreadyAttachedVolume(blockdevice_id)
mapping = api.map_volume(scvolume, host)
if not mapping:
raise BlockDriverAPIException(
'Unable to map volume to server.')
self._do_rescan('attach')
return self._to_blockdevicevolume(scvolume, attach_to)
def detach_volume(self, blockdevice_id):
"""Detach ``blockdevice_id`` from whatever host it is attached to.
:param unicode blockdevice_id: The unique identifier for the block
device being detached.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to anything.
:returns: ``None``
"""
LOG.info('Detaching %s', blockdevice_id)
with self._client.open_connection() as api:
# Check that we have that volume
scvolume = api.find_volume(blockdevice_id)
if not scvolume:
raise blockdevice.UnknownVolume(blockdevice_id)
# First check if we are mapped
mappings = api.find_mapping_profiles(scvolume)
if not mappings:
raise blockdevice.UnattachedVolume(blockdevice_id)
device_id = scvolume['deviceId']
paths = iscsi_utils.find_paths(device_id)
paths.reverse()
for path in paths:
iscsi_utils.remove_device(path)
# Make sure we have a server defined for this host
iqn = iscsi_utils.get_initiator_name()
host = api.find_server(iqn)
LOG.info("Search for server returned: %s", host)
if not host:
# Try to create a new host
host = api.create_server(
self.compute_instance_id(), iqn)
LOG.info("Created server %s", host)
# Make sure we were able to find something
if not host:
raise BlockDriverAPIException('Unable to locate server.')
api.unmap_volume(scvolume, host)
self._do_rescan('detach')
def list_volumes(self):
"""List all the block devices available via the back end API.
:returns: A ``list`` of ``BlockDeviceVolume``s.
"""
volumes = []
try:
with self._client.open_connection() as api:
vols = api.list_volumes()
# Now convert our API objects to flocker ones
for vol in vols:
attached_to = None
mappings = api.find_mapping_profiles(vol)
if mappings:
attached_to = mappings[0]['server']['instanceName']
volumes.append(
self._to_blockdevicevolume(vol, attached_to))
except Exception:
LOG.exception('Error encountered listing volumes.')
raise
LOG.info(volumes)
return volumes
def get_device_path(self, blockdevice_id):
"""Return the device path.
Returns the local device path that has been allocated to the block
device on the host to which it is currently attached.
:param unicode blockdevice_id: The unique identifier for the block
device.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to a host.
:returns: A ``FilePath`` for the device.
"""
device_id = None
with self._client.open_connection() as api:
# Check that we have that volume
volume = api.find_volume(blockdevice_id)
if not volume:
raise blockdevice.UnknownVolume(blockdevice_id)
scvolume = api.find_volume(blockdevice_id)
device_id = scvolume['deviceId']
# First check if we are mapped
# NOTE: The assumption right now is if we are mapped,
# we are mapped to the local compute host.
mappings = api.find_mapping_profiles(scvolume)
if not mappings:
raise blockdevice.UnattachedVolume(blockdevice_id)
if not device_id:
raise blockdevice.UnknownVolume(blockdevice_id)
# Look for any new devices
retries = 0
while retries < 4:
paths = iscsi_utils.find_paths(device_id)
if paths:
# Just return the first path
return filepath.FilePath(paths[0]).realpath()
retries += 1
LOG.info('%s not found, attempt %d', device_id, retries)
time.sleep(5)
return None
def resize_volume(self, blockdevice_id, size):
"""Resize an existing volume.
:param blockdevice_id: The unique identifier for the device.
:param size: The new requested size.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does
not exist.
:returns: ``None``
"""
with self._client.open_connection() as api:
# Check that we have that volume
scvolume = api.find_volume(blockdevice_id)
if not scvolume:
raise blockdevice.UnknownVolume(blockdevice_id)
volume_size = self._bytes_to_gig(size)
if not api.expand_volume(scvolume, volume_size):
raise blockdevice.VolumeException(blockdevice_id)
def _bytes_to_gig(self, size):
"""Convert size in bytes to GiB.
:param size: The number of bytes.
:returns: The size in gigabytes.
"""
return bitmath.Byte(size).to_GiB().value
|
|
import pytest
import networkx as nx
from networkx.testing import almost_equal
def validate_grid_path(r, c, s, t, p):
assert isinstance(p, list)
assert p[0] == s
assert p[-1] == t
s = ((s - 1) // c, (s - 1) % c)
t = ((t - 1) // c, (t - 1) % c)
assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1
p = [((u - 1) // c, (u - 1) % c) for u in p]
for u in p:
assert 0 <= u[0] < r
assert 0 <= u[1] < c
for u, v in zip(p[:-1], p[1:]):
assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)]
class TestGenericPath:
@classmethod
def setup_class(cls):
from networkx import convert_node_labels_to_integers as cnlti
cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1,
ordering="sorted")
cls.cycle = nx.cycle_graph(7)
cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
cls.neg_weights = nx.DiGraph()
cls.neg_weights.add_edge(0, 1, weight=1)
cls.neg_weights.add_edge(0, 2, weight=3)
cls.neg_weights.add_edge(1, 3, weight=1)
cls.neg_weights.add_edge(2, 3, weight=-2)
def test_shortest_path(self):
assert nx.shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3]
assert nx.shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4]
validate_grid_path(4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12))
assert nx.shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3]
# now with weights
assert (nx.shortest_path(self.cycle, 0, 3, weight='weight') ==
[0, 1, 2, 3])
assert (nx.shortest_path(self.cycle, 0, 4, weight='weight') ==
[0, 6, 5, 4])
validate_grid_path(4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12,
weight='weight'))
assert (nx.shortest_path(self.directed_cycle, 0, 3,
weight='weight') ==
[0, 1, 2, 3])
# weights and method specified
assert (nx.shortest_path(self.directed_cycle, 0, 3,
weight='weight', method='dijkstra') ==
[0, 1, 2, 3])
assert (nx.shortest_path(self.directed_cycle, 0, 3,
weight='weight', method='bellman-ford') ==
[0, 1, 2, 3])
# when Dijkstra's will probably (depending on precise implementation)
# incorrectly return [0, 1, 3] instead
assert (nx.shortest_path(self.neg_weights, 0, 3, weight='weight',
method='bellman-ford') ==
[0, 2, 3])
# confirm bad method rejection
pytest.raises(ValueError, nx.shortest_path, self.cycle, method='SPAM')
# confirm absent source rejection
pytest.raises(nx.NodeNotFound, nx.shortest_path, self.cycle, 8)
def test_shortest_path_target(self):
answer = {0: [0, 1], 1: [1], 2: [2, 1]}
sp = nx.shortest_path(nx.path_graph(3), target=1)
assert sp == answer
# with weights
sp = nx.shortest_path(nx.path_graph(3), target=1, weight='weight')
assert sp == answer
# weights and method specified
sp = nx.shortest_path(nx.path_graph(3), target=1, weight='weight',
method='dijkstra')
assert sp == answer
sp = nx.shortest_path(nx.path_graph(3), target=1, weight='weight',
method='bellman-ford')
assert sp == answer
def test_shortest_path_length(self):
assert nx.shortest_path_length(self.cycle, 0, 3) == 3
assert nx.shortest_path_length(self.grid, 1, 12) == 5
assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4
# now with weights
assert (nx.shortest_path_length(self.cycle, 0, 3,
weight='weight') ==
3)
assert (nx.shortest_path_length(self.grid, 1, 12,
weight='weight') ==
5)
assert (nx.shortest_path_length(self.directed_cycle, 0, 4,
weight='weight') ==
4)
# weights and method specified
assert (nx.shortest_path_length(self.cycle, 0, 3, weight='weight',
method='dijkstra') ==
3)
assert (nx.shortest_path_length(self.cycle, 0, 3, weight='weight',
method='bellman-ford') ==
3)
# confirm bad method rejection
pytest.raises(ValueError,
nx.shortest_path_length,
self.cycle,
method='SPAM')
# confirm absent source rejection
pytest.raises(nx.NodeNotFound, nx.shortest_path_length, self.cycle, 8)
def test_shortest_path_length_target(self):
answer = {0: 1, 1: 0, 2: 1}
sp = dict(nx.shortest_path_length(nx.path_graph(3), target=1))
assert sp == answer
# with weights
sp = nx.shortest_path_length(nx.path_graph(3), target=1,
weight='weight')
assert sp == answer
# weights and method specified
sp = nx.shortest_path_length(nx.path_graph(3), target=1,
weight='weight', method='dijkstra')
assert sp == answer
sp = nx.shortest_path_length(nx.path_graph(3), target=1,
weight='weight', method='bellman-ford')
assert sp == answer
def test_single_source_shortest_path(self):
p = nx.shortest_path(self.cycle, 0)
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_shortest_path(self.cycle, 0)
p = nx.shortest_path(self.grid, 1)
validate_grid_path(4, 4, 1, 12, p[12])
# now with weights
p = nx.shortest_path(self.cycle, 0, weight='weight')
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_dijkstra_path(self.cycle, 0)
p = nx.shortest_path(self.grid, 1, weight='weight')
validate_grid_path(4, 4, 1, 12, p[12])
# weights and method specified
p = nx.shortest_path(self.cycle, 0, method='dijkstra', weight='weight')
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_shortest_path(self.cycle, 0)
p = nx.shortest_path(self.cycle, 0, method='bellman-ford',
weight='weight')
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_shortest_path(self.cycle, 0)
def test_single_source_shortest_path_length(self):
ans = dict(nx.shortest_path_length(self.cycle, 0))
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert (ans ==
dict(nx.single_source_shortest_path_length(self.cycle,
0)))
ans = dict(nx.shortest_path_length(self.grid, 1))
assert ans[16] == 6
# now with weights
ans = dict(nx.shortest_path_length(self.cycle, 0, weight='weight'))
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.single_source_dijkstra_path_length(
self.cycle, 0))
ans = dict(nx.shortest_path_length(self.grid, 1, weight='weight'))
assert ans[16] == 6
# weights and method specified
ans = dict(nx.shortest_path_length(self.cycle, 0, weight='weight',
method='dijkstra'))
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.single_source_dijkstra_path_length(
self.cycle, 0))
ans = dict(nx.shortest_path_length(self.cycle, 0, weight='weight',
method='bellman-ford'))
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.single_source_bellman_ford_path_length(
self.cycle, 0))
def test_all_pairs_shortest_path(self):
p = nx.shortest_path(self.cycle)
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_shortest_path(self.cycle))
p = nx.shortest_path(self.grid)
validate_grid_path(4, 4, 1, 12, p[1][12])
# now with weights
p = nx.shortest_path(self.cycle, weight='weight')
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_dijkstra_path(self.cycle))
p = nx.shortest_path(self.grid, weight='weight')
validate_grid_path(4, 4, 1, 12, p[1][12])
# weights and method specified
p = nx.shortest_path(self.cycle, weight='weight', method='dijkstra')
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_dijkstra_path(self.cycle))
p = nx.shortest_path(self.cycle, weight='weight',
method='bellman-ford')
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_bellman_ford_path(self.cycle))
def test_all_pairs_shortest_path_length(self):
ans = dict(nx.shortest_path_length(self.cycle))
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_shortest_path_length(self.cycle))
ans = dict(nx.shortest_path_length(self.grid))
assert ans[1][16] == 6
# now with weights
ans = dict(nx.shortest_path_length(self.cycle, weight='weight'))
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle))
ans = dict(nx.shortest_path_length(self.grid, weight='weight'))
assert ans[1][16] == 6
# weights and method specified
ans = dict(nx.shortest_path_length(self.cycle, weight='weight',
method='dijkstra'))
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle))
ans = dict(nx.shortest_path_length(self.cycle, weight='weight',
method='bellman-ford'))
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert (ans ==
dict(nx.all_pairs_bellman_ford_path_length(self.cycle)))
def test_has_path(self):
G = nx.Graph()
nx.add_path(G, range(3))
nx.add_path(G, range(3, 5))
assert nx.has_path(G, 0, 2)
assert not nx.has_path(G, 0, 4)
def test_all_shortest_paths(self):
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert ([[0, 1, 2, 3], [0, 10, 20, 3]] ==
sorted(nx.all_shortest_paths(G, 0, 3)))
# with weights
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert ([[0, 1, 2, 3], [0, 10, 20, 3]] ==
sorted(nx.all_shortest_paths(G, 0, 3, weight='weight')))
# weights and method specified
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert ([[0, 1, 2, 3], [0, 10, 20, 3]] ==
sorted(nx.all_shortest_paths(G, 0, 3, weight='weight',
method='dijkstra')))
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert ([[0, 1, 2, 3], [0, 10, 20, 3]] ==
sorted(nx.all_shortest_paths(G, 0, 3, weight='weight',
method='bellman-ford')))
def test_all_shortest_paths_raise(self):
with pytest.raises(nx.NetworkXNoPath):
G = nx.path_graph(4)
G.add_node(4)
list(nx.all_shortest_paths(G, 0, 4))
def test_bad_method(self):
with pytest.raises(ValueError):
G = nx.path_graph(2)
list(nx.all_shortest_paths(G, 0, 1, weight='weight', method='SPAM'))
class TestAverageShortestPathLength(object):
def test_cycle_graph(self):
ans = nx.average_shortest_path_length(nx.cycle_graph(7))
assert almost_equal(ans, 2)
def test_path_graph(self):
ans = nx.average_shortest_path_length(nx.path_graph(5))
assert almost_equal(ans, 2)
def test_weighted(self):
G = nx.Graph()
nx.add_cycle(G, range(7), weight=2)
ans = nx.average_shortest_path_length(G, weight='weight')
assert almost_equal(ans, 4)
G = nx.Graph()
nx.add_path(G, range(5), weight=2)
ans = nx.average_shortest_path_length(G, weight='weight')
assert almost_equal(ans, 4)
def test_specified_methods(self):
G = nx.Graph()
nx.add_cycle(G, range(7), weight=2)
ans = nx.average_shortest_path_length(G,
weight='weight',
method='dijkstra')
assert almost_equal(ans, 4)
ans = nx.average_shortest_path_length(G,
weight='weight',
method='bellman-ford')
assert almost_equal(ans, 4)
ans = nx.average_shortest_path_length(G,
weight='weight',
method='floyd-warshall')
assert almost_equal(ans, 4)
G = nx.Graph()
nx.add_path(G, range(5), weight=2)
ans = nx.average_shortest_path_length(G,
weight='weight',
method='dijkstra')
assert almost_equal(ans, 4)
ans = nx.average_shortest_path_length(G,
weight='weight',
method='bellman-ford')
assert almost_equal(ans, 4)
ans = nx.average_shortest_path_length(G,
weight='weight',
method='floyd-warshall')
assert almost_equal(ans, 4)
def test_disconnected(self):
g = nx.Graph()
g.add_nodes_from(range(3))
g.add_edge(0, 1)
pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g)
g = g.to_directed()
pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g)
def test_trivial_graph(self):
"""Tests that the trivial graph has average path length zero,
since there is exactly one path of length zero in the trivial
graph.
For more information, see issue #1960.
"""
G = nx.trivial_graph()
assert nx.average_shortest_path_length(G) == 0
def test_null_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.average_shortest_path_length(nx.null_graph())
def test_bad_method(self):
with pytest.raises(ValueError):
G = nx.path_graph(2)
nx.average_shortest_path_length(G, weight='weight', method='SPAM')
class TestAverageShortestPathLengthNumpy(object):
@classmethod
def setup_class(cls):
global numpy
global npt
import pytest
numpy = pytest.importorskip('numpy')
npt = pytest.importorskip('numpy.testing')
def test_specified_methods_numpy(self):
G = nx.Graph()
nx.add_cycle(G, range(7), weight=2)
ans = nx.average_shortest_path_length(G,
weight='weight',
method='floyd-warshall-numpy')
npt.assert_almost_equal(ans, 4)
G = nx.Graph()
nx.add_path(G, range(5), weight=2)
ans = nx.average_shortest_path_length(G,
weight='weight',
method='floyd-warshall-numpy')
npt.assert_almost_equal(ans, 4)
|
|
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from lxml import etree as ET
class project_indicators_task(osv.osv):
_inherit = 'project.task'
def add_value(self, cr, uid, fields=None, context=None):
action = self.pool.get('ir.actions.act_window').for_xml_id(
cr,
uid,
'project_indicators',
'project_indicators_values_new',
context=context
)
return action
_columns = {
'indicators_definitions': fields.one2many(
'project_indicators.indicators_definition',
'tasks_ids',
'Indicators definitions'),
}
class project_indicators_indicators_definition(osv.osv):
_name = 'project_indicators.indicators_definition'
_description = 'Indicators projects definitions'
_order = 'sequence'
_rec_name = 'field_name'
def fields_get(self, cr, uid, fields=None, context=None):
if not context:
context = {}
res = super(project_indicators_indicators_definition, self).\
fields_get(
cr,
uid,
fields,
context=context)
return res
def read(self, cr, uid, ids, fields=None, context=None,
load='_classic_read'):
if not context:
context = {}
res = super(project_indicators_indicators_definition, self).read(
cr,
uid,
ids,
fields,
context=context,
load=load)
infos = {}
for ind_id in ids:
infos[ind_id] = {}
definition = self.\
pool['project_indicators.indicators_definition'].browse(
cr,
uid,
ind_id,
context=context)
infos[ind_id]['mov'] = definition.obj_month_value
infos[ind_id]['moo'] = definition.obj_month_operator
infos[ind_id]['sov'] = definition.obj_sum_value
infos[ind_id]['soo'] = definition.obj_sum_operator
infos[ind_id]['dates'] = {}
values = definition.values_ids
for val in values:
date = str(val.year).zfill(4) + "-" + str(val.month).zfill(2)
infos[ind_id]['dates'][date] = val.value
for line in res:
current_sum = 0
for date in infos[line['id']]['dates']:
line[date] = infos[line['id']]['dates'][date]
if infos[line['id']]['dates'][date] and \
infos[line['id']]['dates'][date].isdigit():
current_sum += int(infos[line['id']]['dates'][date])
else:
current_sum = '-'
line['sum'] = current_sum
line['objectives'] = ""
if infos[line['id']]['moo'] and infos[line['id']]['mov']:
line['objectives'] += ('Monthly: \n' +
str(infos[line['id']]['moo']) + ' ' +
str(infos[line['id']]['mov']) + '\n')
if infos[line['id']]['soo'] and infos[line['id']]['sov']:
line['objectives'] += ('Total: \n' +
str(infos[line['id']]['soo']) + ' ' +
str(infos[line['id']]['sov']))
return res
def delete_value(self, cr, uid, ids, context=None):
definition = self.pool['project_indicators.indicators_definition']\
.browse(
cr,
uid,
ids[0],
context=context)
values = definition.values_ids
year = int(context['date'][:4])
month = int(context['date'][5:])
for val in values:
if val.year == year and val.month == month:
val.unlink()
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def __getattr__(self, name, *args, **kwargs):
if name[:13] == 'delete_value_':
date = name[13:]
self.date = date
return self.delete_value
else:
return super(project_indicators_indicators_definition, self).\
__getattr__(name, *args, **kwargs)
def fields_view_get(self, cr, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
if not context:
context = {}
res = super(
project_indicators_indicators_definition,
self).fields_view_get(
cr,
uid,
view_id,
view_type,
context,
toolbar,
submenu)
task_id = context.get('id', False)
if not task_id is False:
if view_type == 'tree':
task = self.pool['project.task'].browse(
cr,
uid,
task_id,
context=context)
definitions = task.indicators_definitions
months_str = ""
months = {}
for definition in definitions:
values = definition.values_ids
for value in values:
date = str(value.year).zfill(4) + "-" + \
str(value.month).zfill(2)
if not date in months:
months[date] = True
for key in months:
months_str += '<field string="%(key)s" name="%(key)s" />'\
% {'key': key}
months_str += '\
<button name="delete_value_%(key)s" type="object"\
icon="gtk-close"\
context="{\'date\': \'%(key)s\'}"/>' % {'key': key}
arch = """
<tree string="Indicators projects value">
<field string="Indicators" name="field_name"/>
<field string="Objectives" name="objectives"/>
%s
<field string="Sum" name="sum" />
</tree>
""" % months_str
res['arch'] = arch
return res
_columns = {
'field_name': fields.char('Field name', required="True"),
'field_type': fields.selection((
('number', 'Number'),
('text', 'Text')),
'Field type', required="True"),
'obj_month_operator': fields.selection
((('==', 'Equal'), ('<', '<'), ('>', '>')),
'Monthly objectives operator'),
'obj_month_value': fields.char('Monthly objective value'),
'obj_sum_operator': fields.selection
((('==', 'Equal'), ('<', '<'), ('>', '>')),
'Total objectives operator'),
'obj_sum_value': fields.char('Total objective value'),
'values_ids': fields.one2many(
'project_indicators.indicators_value',
'definition_id',
'Values'),
'tasks_ids': fields.many2one(
'project.task',
'indicators_definitions',
'Task'),
'sequence': fields.integer('Sequence'),
}
class project_indicators_indicators_value(osv.osv):
_name = 'project_indicators.indicators_value'
_description = 'Indicators projects value'
_order = 'year,month'
def write(self, cr, uid, ids, values, context=None):
super(project_indicators_indicators_value, self).write(
cr,
uid,
ids,
values,
context=context
)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
_columns = {
'value': fields.char('Value', required="True"),
'month': fields.integer('Month', required="True"),
'year': fields.integer('Year', required="True"),
'definition_id': fields.many2one(
'project_indicators.indicators_definition',
'value_ids',
'Definition',
required="True"),
}
_sql_constraints = [('unique_sheme_type',
'unique(month,year,definition_id)',
'Error! This Type already exists!')]
|
|
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Utils
"""
import mock
import mox
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova.conductor import api as conductor_api
from nova import db
from nova import notifications
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fake_instance
CONF = cfg.CONF
class SchedulerUtilsTestCase(test.NoDBTestCase):
"""Test case for scheduler utils methods."""
def setUp(self):
super(SchedulerUtilsTestCase, self).setUp()
self.context = 'fake-context'
def test_build_request_spec_without_image(self):
image = None
instance = {'uuid': 'fake-uuid'}
instance_type = {'flavorid': 'fake-id'}
self.mox.StubOutWithMock(flavors, 'extract_flavor')
self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
db.flavor_extra_specs_get(self.context, mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
request_spec = scheduler_utils.build_request_spec(self.context, image,
[instance])
self.assertEqual({}, request_spec['image'])
@mock.patch.object(flavors, 'extract_flavor')
@mock.patch.object(db, 'flavor_extra_specs_get')
def test_build_request_spec_with_object(self, flavor_extra_specs_get,
extract_flavor):
instance_type = {'flavorid': 'fake-id'}
instance = fake_instance.fake_instance_obj(self.context)
extract_flavor.return_value = instance_type
flavor_extra_specs_get.return_value = []
request_spec = scheduler_utils.build_request_spec(self.context, None,
[instance])
self.assertIsInstance(request_spec['instance_properties'], dict)
def _test_set_vm_state_and_notify(self, request_spec,
expected_uuids):
updates = dict(vm_state='fake-vm-state')
service = 'fake-service'
method = 'fake-method'
exc_info = 'exc_info'
self.mox.StubOutWithMock(compute_utils,
'add_instance_fault_from_exc')
self.mox.StubOutWithMock(notifications, 'send_update')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(rpc, 'get_notifier')
notifier = self.mox.CreateMockAnything()
rpc.get_notifier('conductor', CONF.host).AndReturn(notifier)
rpc.get_notifier(service).AndReturn(notifier)
old_ref = 'old_ref'
new_ref = 'new_ref'
for uuid in expected_uuids:
db.instance_update_and_get_original(
self.context, uuid, updates).AndReturn((old_ref, new_ref))
notifications.send_update(self.context, old_ref, new_ref,
service=service)
compute_utils.add_instance_fault_from_exc(
self.context,
mox.IsA(conductor_api.LocalAPI),
new_ref, exc_info, mox.IsA(tuple))
payload = dict(request_spec=request_spec,
instance_properties=request_spec.get(
'instance_properties', {}),
instance_id=uuid,
state='fake-vm-state',
method=method,
reason=exc_info)
event_type = '%s.%s' % (service, method)
notifier.error(self.context, event_type, payload)
self.mox.ReplayAll()
scheduler_utils.set_vm_state_and_notify(self.context,
service,
method,
updates,
exc_info,
request_spec,
db)
def test_set_vm_state_and_notify_rs_uuids(self):
expected_uuids = ['1', '2', '3']
request_spec = dict(instance_uuids=expected_uuids)
self._test_set_vm_state_and_notify(request_spec, expected_uuids)
def test_set_vm_state_and_notify_uuid_from_instance_props(self):
expected_uuids = ['fake-uuid']
request_spec = dict(instance_properties=dict(uuid='fake-uuid'))
self._test_set_vm_state_and_notify(request_spec, expected_uuids)
def _test_populate_filter_props(self, host_state_obj=True,
with_retry=True,
force_hosts=None,
force_nodes=None):
if force_hosts is None:
force_hosts = []
if force_nodes is None:
force_nodes = []
if with_retry:
if not force_hosts and not force_nodes:
filter_properties = dict(retry=dict(hosts=[]))
else:
filter_properties = dict(force_hosts=force_hosts,
force_nodes=force_nodes)
else:
filter_properties = dict()
if host_state_obj:
class host_state(object):
host = 'fake-host'
nodename = 'fake-node'
limits = 'fake-limits'
else:
host_state = dict(host='fake-host',
nodename='fake-node',
limits='fake-limits')
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if with_retry and not force_hosts and not force_nodes:
# So we can check for 2 hosts
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if force_hosts:
expected_limits = None
else:
expected_limits = 'fake-limits'
self.assertEqual(expected_limits,
filter_properties.get('limits'))
if with_retry and not force_hosts and not force_nodes:
self.assertEqual([['fake-host', 'fake-node'],
['fake-host', 'fake-node']],
filter_properties['retry']['hosts'])
else:
self.assertNotIn('retry', filter_properties)
def test_populate_filter_props(self):
self._test_populate_filter_props()
def test_populate_filter_props_host_dict(self):
self._test_populate_filter_props(host_state_obj=False)
def test_populate_filter_props_no_retry(self):
self._test_populate_filter_props(with_retry=False)
def test_populate_filter_props_force_hosts_no_retry(self):
self._test_populate_filter_props(force_hosts=['force-host'])
def test_populate_filter_props_force_nodes_no_retry(self):
self._test_populate_filter_props(force_nodes=['force-node'])
def _check_parse_options(self, opts, sep, converter, expected):
good = scheduler_utils.parse_options(opts,
sep=sep,
converter=converter)
for item in expected:
self.assertIn(item, good)
def test_parse_options(self):
# check normal
self._check_parse_options(['foo=1', 'bar=-2.1'],
'=',
float,
[('foo', 1.0), ('bar', -2.1)])
# check convert error
self._check_parse_options(['foo=a1', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check separator missing
self._check_parse_options(['foo', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check key missing
self._check_parse_options(['=5', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import errno
import shutil
import time
import core
import data_feeder
import executor
import framework
import io
# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module
import optimizer as opt_module
import parallel_executor
from transpiler import distribute_transpiler
__all__ = [
'Trainer', 'BeginEpochEvent', 'EndEpochEvent', 'BeginStepEvent',
'EndStepEvent', 'CheckpointConfig'
]
class BeginEpochEvent(object):
"""
The begin of a training epoch.
Args:
epoch_id(int): The current epoch ID.
"""
def __init__(self, epoch_id):
self.epoch = epoch_id
class EndEpochEvent(object):
"""
The end of a training epoch.
Args:
epoch_id(int): The current epoch ID.
"""
def __init__(self, epoch_id):
self.epoch = epoch_id
class BeginStepEvent(object):
"""
The begin of a training epoch.
Args:
epoch_id(int): The current epoch ID.
step_id(int): The current step ID.
"""
def __init__(self, epoch_id, step_id):
self.epoch = epoch_id
self.step = step_id
self.fetch_metrics = True
"""
If fetch_metrics is true, the metrics will be fetched at the
EndStepEvent. Default is True.
"""
class EndStepEvent(object):
"""
The end of a training step.
Args:
epoch_id(int): The current epoch ID.
step_id(int): The current step ID.
metrics(list): A list of fetched tensor. The order of this list is same
as the :code:`train_func` returns.
"""
def __init__(self, epoch_id, step_id, metrics):
self.epoch = epoch_id
self.step = step_id
self.metrics = metrics
class CheckpointConfig(object):
"""
Parameter object for :code:`save_checkpoint` and
:code:`fluid.Trainer`. Used to configuration how to save checkpoint.
Args:
checkpoint_dir(str): Directory path to save check point. Default is the
current directory.
max_num_checkpoints(int): The max number of local check points.
epoch_interval(int): Every number of epoch to save check point.
step_interval(int): Every number of step to save check point.
Examples:
>>> config = fluid.CheckpointConfig("./checkpoints")
>>> trainer = fluid.Trainer(train_func=train_program,
>>> place=place,
>>> optimizer_func=optimizer_func,
>>> checkpoint_config=config)
>>> trainer.train(...)
"""
def __init__(self,
checkpoint_dir=None,
max_num_checkpoints=3,
epoch_interval=1,
step_interval=10):
assert epoch_interval >= 1
assert step_interval >= 1
self.checkpoint_dir = checkpoint_dir \
if checkpoint_dir is not None else os.getcwd()
self.max_num_checkpoints = max_num_checkpoints
self.epoch_interval = epoch_interval
self.step_interval = step_interval
self.epoch_id = 0
self.step_id = 0
self.load_serial = None
self.pserver_id = None
self.lookup_table_name = None
def check_and_get_place(place):
"""
Check the type of place or get the default place
Args:
place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on.
Raises:
TypeError if the type mismatched.
Returns:
the original place if it is not None.
if fluid is compiled with CUDA, returns CUDAPlace(0) by default.
Otherwise returns CPUPlace by default.
"""
if place is None:
if core.is_compiled_with_cuda():
return core.CUDAPlace(0)
else:
return core.CPUPlace()
else:
if not isinstance(place, core.CUDAPlace) and not isinstance(
place, core.CPUPlace):
raise TypeError("Place should be either CUDAPlace or CPUPlace")
return place
class Trainer(object):
"""
A trainer wraps MultiGPU/MultiNode training loops and can be used to train a
simple neural network easily.
This API takes a :code:`train_func`. A :code:`train_func` is a function that
return loss as it first return value. The reset value can be fetched by
EndStepEvent.metrics
This API also takes a :code:`optimizer_func` that will return an optimizer
instance.
For example, to train a MLP for MNIST dataset, the sample program is
>>> import paddle.fluid as fluid
>>>
>>> def mlp(image, layer_sizes=[200, 100], activation="relu", num_classes=10):
>>> hidden = image
>>> for layer_size in layer_sizes:
>>> hidden = fluid.layers.fc(input=hidden, size=layer_size, act=activation)
>>> return fluid.layers.fc(input=hidden, size=num_classes, act="softmax")
>>>
>>> def train_mnist_mlp():
>>> img = fluid.layers.data(name='image', shape=[784])
>>> label = fluid.layers.data(name='label', shape=[1], dtype='int64')
>>> prediction = mlp(img)
>>> return fluid.layers.mean(fluid.layers.cross_entropy(prediction, label))
>>>
>>> def optimizer():
>>> return fluid.optimizer.Adam()
>>>
>>> trainer = Trainer(train_func=train_mnist_mlp,
>>> optimizer_func=optimizer,
>>> place=fluid.CUDAPlace(0),
>>> parallel=True)
>>>
>>> def train_callback(event):
>>> if isinstance(event, fluid.EndStepEvent):
>>> print "Epoch ID", event.epoch, "Step ID",\
>>> event.step, "AvgLoss", event.metrics[0]
>>> elif isinstance(event, fluid.EndEpochEvent):
>>> trainer.save_params("./model_{0}".format(event.epoch))
>>>
>>> trainer.train(num_epochs=100, event_handler=train_callback)
For more example, please see :ref:`api_guide_high_level_api`.
Args:
train_func(callable): A function which will return loss. The loss must be
a scalar tensor.
optimizer_func(callable): A function that returns an Optimizer object.
place(CUDAPlace|CPUPlace): The device place of this trainer. If
:code:`parallel=True,` all CUDA Places will be used if :code:`place`
is a :code:`CUDAPlace`.
parallel(bool): True if use multiple devices.
checkpoint_config(CheckpointConfig): Configuration about how to save
checkpoints.
"""
def __init__(self,
train_func,
optimizer_func,
param_path=None,
place=None,
parallel=False,
checkpoint_config=None):
self.__stop = False
self.parallel = parallel
# config for checkpoint
# only chief worker will save variables
self.trainer_id = 0
self.checkpoint_cfg = checkpoint_config
if self.checkpoint_cfg:
assert isinstance(self.checkpoint_cfg, CheckpointConfig)
serial = _get_latest_checkpoint_serial(
self.checkpoint_cfg.checkpoint_dir)
self.checkpoint_cfg.load_serial = serial if serial >= 0 else None
self.scope = core.Scope()
# 1. we need to generate a framework.Program by calling
# program_func. Reference: fluid.program_guard in
# test_word2vec.py
self.startup_program = framework.Program()
self.train_program = framework.Program()
with framework.program_guard(self.train_program, self.startup_program):
program_func_outs = train_func()
self.train_func_outputs = program_func_outs if isinstance(
program_func_outs, list) else [program_func_outs]
self.test_program = self.train_program.clone(for_test=True)
# The first element of program_func_outs is loss.
loss = self.train_func_outputs[0]
optimizer = optimizer_func()
if not isinstance(optimizer, opt_module.Optimizer):
raise TypeError(
"The optimizer should be an instance of Optimizer")
optimize_ops, params_grads = optimizer.minimize(loss)
self.place = check_and_get_place(place)
self._dist_transpile_if_necessary(optimize_ops, params_grads)
# 2. move the default_main_program to self.program and run the
# default_startup program on an empty core.Scope()
# Run startup program
with self._prog_and_scope_guard():
exe = executor.Executor(place)
exe.run(self.startup_program)
if self.checkpoint_cfg and self.checkpoint_cfg.load_serial is not None:
self._load_checkpoint()
if param_path and os.path.isdir(param_path):
# load params from param_path into scope
io.load_persistables(
executor=exe,
dirname=param_path,
main_program=self.startup_program)
def _transpile_nccl2_dist(self):
# PADDLE_TRAINER_IPS
if "PADDLE_TRAINER_IPS" not in os.environ:
self.nccl_id_var = None
else:
self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
port = os.getenv("PADDLE_PSERVER_PORT")
worker_ips = os.getenv("PADDLE_TRAINER_IPS")
worker_endpoints = []
for ip in worker_ips.split(","):
worker_endpoints.append(':'.join([ip, port]))
self.num_trainers = len(worker_endpoints)
current_endpoint = os.getenv("PADDLE_CURRENT_IP") + ":" + port
worker_endpoints.remove(current_endpoint)
# TODO(wuyi): use self.nccl_id_var, self.num_trainers and self.trainer_id
# in ParallelExecutor to start
# distributed training using NCCL2
self.nccl_id_var = self.startup_program.global_block().create_var(
name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW)
self.startup_program.global_block().append_op(
type="gen_nccl_id",
inputs={},
outputs={"NCCLID": self.nccl_id_var},
attrs={
"endpoint": current_endpoint,
"endpoint_list": worker_endpoints,
"trainer_id": self.trainer_id
})
def _dist_transpile_if_necessary(self, optimize_ops, params_grads):
self._transpile_nccl2_dist()
if self.nccl_id_var != None:
return
if "PADDLE_TRAINING_ROLE" not in os.environ:
return
# the port of all pservers, needed by both trainer and pserver
port = os.getenv("PADDLE_PSERVER_PORT", "6174")
# comma separated ips of all pservers, needed by trainer and
# pserver
pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "")
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist)
# total number of workers/trainers in the job, needed by
# trainer and pserver
trainers = int(os.getenv("PADDLE_TRAINERS"))
# the IP of the local machine, needed by pserver only
current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port
# the unique trainer id, starting from 0, needed by trainer
# only
self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
# the role, should be either PSERVER or TRAINER
training_role = os.getenv("PADDLE_TRAINING_ROLE")
with self._prog_and_scope_guard():
t = distribute_transpiler.DistributeTranspiler()
t.transpile(
self.trainer_id, pservers=pserver_endpoints, trainers=trainers)
if training_role == "PSERVER":
if self.checkpoint_cfg:
pserver_id = eplist.index(current_endpoint)
self.checkpoint_cfg.pserver_id = pserver_id
if t.has_distributed_lookup_table:
self.checkpoint_cfg.lookup_table_name = t.table_name
self.train_program = t.get_pserver_program(current_endpoint)
self.startup_program = t.get_startup_program(current_endpoint,
self.train_program)
elif training_role == "TRAINER":
self.train_program = t.get_trainer_program()
else:
raise ValueError(
'TRAINING_ROLE environment variable must be either TRAINER or PSERVER'
)
def stop(self):
"""
stop training
"""
self.__stop = True
def train(self, num_epochs, event_handler, reader=None, feed_order=None):
"""
Start the train loop to train the model.
Args:
num_epochs(int): The number of epoch. An epoch will process all data in reader
event_handler(callable): The event handler. A function with type (ev:Event)->void
reader(callable): A reader creator object. See also
:ref:`api_guide_python_reader` .
feed_order(list): Feeding order of reader. None will following the defining
order in program
Returns:
None
"""
training_role = os.getenv("PADDLE_TRAINING_ROLE", "")
if training_role == "PSERVER":
with self._prog_and_scope_guard():
exe = executor.Executor(self.place)
exe.run()
return
if self.parallel:
self._train_by_parallel_executor(num_epochs, event_handler, reader,
feed_order)
else:
self._train_by_executor(num_epochs, event_handler, reader,
feed_order)
def test(self, reader, feed_order):
"""
Test the model on given test data
Args:
reader(callable): The reader that yields test data.
feed_order(list): Feeding order of reader. None will following the
defining order in program
"""
return self._test_by_executor(reader, feed_order,
self.train_func_outputs)
def save_params(self, param_path):
"""
Save all parameters into :code:`param_path`.
Args:
param_path(str): The path to save parameters.
Returns:
None
"""
with self._prog_and_scope_guard():
exe = executor.Executor(self.place)
io.save_persistables(exe, dirname=param_path)
@contextlib.contextmanager
def _prog_and_scope_guard(self):
with framework.program_guard(
main_program=self.train_program,
startup_program=self.startup_program):
with executor.scope_guard(self.scope):
yield
def _train_by_executor(self, num_epochs, event_handler, reader, feed_order):
"""
Train by Executor and single device.
Args:
num_epochs:
event_handler:
reader:
feed_order:
Returns:
"""
with self._prog_and_scope_guard():
feed_var_list = build_feed_var_list(self.train_program, feed_order)
feeder = data_feeder.DataFeeder(
feed_list=feed_var_list, place=self.place)
exe = executor.Executor(self.place)
reader = feeder.decorate_reader(reader, multi_devices=False)
self._train_by_any_executor(event_handler, exe, num_epochs, reader)
def _train_by_any_executor(self, event_handler, exe, num_epochs, reader):
if self.checkpoint_cfg:
epochs = [
epoch_id for epoch_id in range(num_epochs)
if epoch_id >= self.checkpoint_cfg.epoch_id
]
else:
epochs = [epoch_id for epoch_id in range(num_epochs)]
for epoch_id in epochs:
event_handler(BeginEpochEvent(epoch_id))
for step_id, data in enumerate(reader()):
if self.__stop:
if self.checkpoint_cfg:
self._clean_checkpoint()
return
if self.checkpoint_cfg and self.checkpoint_cfg.load_serial \
and self.checkpoint_cfg.step_id >= step_id and self.checkpoint_cfg.epoch_id == epoch_id:
continue
begin_event = BeginStepEvent(epoch_id, step_id)
event_handler(begin_event)
if begin_event.fetch_metrics:
metrics = exe.run(feed=data,
fetch_list=[
var.name
for var in self.train_func_outputs
])
else:
metrics = exe.run(feed=data, fetch_list=[])
if self.checkpoint_cfg:
self._save_checkpoint(epoch_id, step_id)
event_handler(EndStepEvent(epoch_id, step_id, metrics))
event_handler(EndEpochEvent(epoch_id))
if self.checkpoint_cfg:
self._clean_checkpoint()
def _test_by_executor(self, reader, feed_order, fetch_list):
with executor.scope_guard(self.scope):
feed_var_list = build_feed_var_list(self.test_program, feed_order)
feeder = data_feeder.DataFeeder(
feed_list=feed_var_list, place=self.place)
exe = executor.Executor(self.place)
accumulated = len(fetch_list) * [0]
count = 0
for data in reader():
outs = exe.run(program=self.test_program,
feed=feeder.feed(data),
fetch_list=fetch_list)
accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)]
count += 1
return [x / count for x in accumulated]
def _train_by_parallel_executor(self, num_epochs, event_handler, reader,
feed_order):
with self._prog_and_scope_guard():
pe = self._get_or_create_parallel_executor()
feed_var_list = build_feed_var_list(self.train_program, feed_order)
feeder = data_feeder.DataFeeder(
feed_list=feed_var_list, place=self.place)
reader = feeder.decorate_reader(reader, multi_devices=True)
self._train_by_any_executor(event_handler, pe, num_epochs, reader)
def _get_parallel_executor(self):
return getattr(self, 'parallel_executor', None)
def _get_or_create_parallel_executor(self):
if self._get_parallel_executor() is None:
self.parallel_executor = parallel_executor.ParallelExecutor(
use_cuda=isinstance(self.place, core.CUDAPlace),
loss_name=self.train_func_outputs[0].name)
return self._get_parallel_executor()
def _clean_checkpoint(self):
assert self.checkpoint_cfg
clean_checkpoint(checkpoint_dir=self.checkpoint_cfg.checkpoint_dir)
def _get_checkpoint_load_args(self):
"""
epoch_id and step_id are runtime arguments, they are not variables, will load them independently.
"""
return ["epoch_id", "step_id"]
def _get_checkpoint_save_args(self, epoch_id, step_id):
"""
epoch_id and step_id are runtime arguments, they are not variables, will save them independently.
"""
trainer_args = {}
trainer_args["epoch_id"] = epoch_id
trainer_args["step_id"] = step_id
return trainer_args
def _save_checkpoint(self, epoch_id, step_id):
assert self.checkpoint_cfg
if epoch_id % self.checkpoint_cfg.epoch_interval == 0 \
and step_id % self.checkpoint_cfg.step_interval == 0:
exe = executor.Executor(self.place)
save_checkpoint(
executor=exe,
checkpoint_dir=self.checkpoint_cfg.checkpoint_dir,
trainer_id=self.trainer_id,
trainer_args=self._get_checkpoint_save_args(epoch_id, step_id),
main_program=self.train_program,
max_num_checkpoints=self.checkpoint_cfg.max_num_checkpoints)
def _load_checkpoint(self):
with self._prog_and_scope_guard():
exe = executor.Executor(self.place)
load_checkpoint(
executor=exe,
checkpoint_dir=self.checkpoint_cfg.checkpoint_dir,
main_program=self.startup_program)
if not self.checkpoint_cfg.pserver_id:
load_trainer_args = self._get_checkpoint_load_args()
trainer_args = load_checkpoint(
executor=exe,
checkpoint_dir=self.checkpoint_cfg.checkpoint_dir,
main_program=self.startup_program,
role_id=self.trainer_id,
is_trainer=True,
load_trainer_args=load_trainer_args)
if len(trainer_args) != 2:
raise ValueError(
"the return trainer_args length do not equal _get_checkpoint_load_args"
)
self.checkpoint_cfg.epoch_id = int(trainer_args[0])
self.checkpoint_cfg.step_id = int(trainer_args[1])
else:
if self.checkpoint_cfg.lookup_table_name:
load_checkpoint(
executor=exe,
checkpoint_dir=self.checkpoint_cfg.checkpoint_dir,
main_program=self.startup_program,
role_id=self.checkpoint_cfg.pserver_id,
is_trainer=False,
load_trainer_args=None,
load_lookup_table=self.checkpoint_cfg.lookup_table_name)
def build_feed_var_list(program, feed_order):
if not isinstance(program, framework.Program):
raise TypeError("The 'program' should be an object of Program")
if isinstance(feed_order, list):
feed_var_list = [
program.global_block().var(var_name) for var_name in feed_order
]
else:
if not isinstance(feed_order, dict):
raise TypeError(
"The 'feed_order' should be either None, list or dict.")
if not sorted(feed_order.values()) == range(len(feed_order)):
raise ValueError(
"The values of 'feed_order' should be a permutation of [0, len(feed_order))"
)
sorted_pair_list = sorted(feed_order.items(), key=lambda item: item[1])
feed_var_list = [
program.global_block().var(pair[0]) for pair in sorted_pair_list
]
return feed_var_list
# move Checkpoint APIs from io.py to trainer.py, make all of them are private.
SUCCESS_MARK_FILENAME = "_SUCCESS"
CHECKPOINT_PREFIX = "checkpoint"
MODEL_DIR = "__model__"
LOOKUP_TABLE_DIR = "__lookup_table__"
TRAINER_PREFIX = "trainer"
CHECKPOINT_SEPARATOR = "_"
def save_checkpoint(executor,
checkpoint_dir,
trainer_id,
main_program,
trainer_args=None,
max_num_checkpoints=3,
lookup_table=None,
pserver_endpoints=None):
"""
This function filters out all checkpoint variables from the give
main_program and then saves these variables to the `checkpoint_dir`
directory.
In the training precess, we generally save a checkpoint in each
iteration. So there might be a lot of checkpoints in the
`checkpoint_dir`. To avoid them taking too much disk space, the
`max_num_checkpoints` are introduced to limit the total number of
checkpoints. If the number of existing checkpints is greater than
the `max_num_checkpoints`, oldest ones will be scroll deleted.
A variable is a checkpoint variable and will be saved if it meets
all following conditions:
1. It's persistable.
2. It's type is not FEED_MINIBATCH nor FETCH_LIST nor RAW.
3. It's name contains no "@GRAD" nor ".trainer_" nor ".block".
Args:
executor(Executor): The executor to run for save checkpoint.
checkpoint_dir(str): The folder where to save checkpoints.
trainer_id(int): currect trainer id, if id is equal to 0, the trainer
is chief.
trainer_args(dict|None): Current training arguments. Such as 'epoch_id'
and 'step_id'.
Defaut: None
main_program(Program): The program whose checkpoint variables will
be saved.
max_num_checkpoints(int): The max number of total number of existing
checkpoints.
Default: 3
lookup_table(string|None): the lookup table name, when use distribute
lookup table, we can get lookup table name by DistributeTranspiler.
table_name
pserver_endpoints(list|None): the parameter server ip:port list.
when use distribute lookup table, we can get pserver_endpoints by
distribute arguments.
Returns:
None
Raises:
ValueError: If `checkpoint_dir` is None.
AssertionError: If `trainer_args` is not a dict.
Examples:
.. code-block:: python
exe = fluid.Executor(fluid.CPUPlace())
path = "./checkpoints"
prog = fluid.default_main_program()
trainer_args = {"epoch_id": 200,
"step_id": 20} # just an example
table_name = "share_w"
ps_endpoints = ["127.0.0.1:6000","127.0.0.1:6001"]
save_checkpoint(executor=exe,
checkpoint_dir=path,
trainer_id=0,
trainer_args=trainer_args,
main_program=prog,
max_num_checkpoints=3,
lookup_table=table_name,
pserver_endpoints = ps_endpoints)
"""
if checkpoint_dir is None:
raise ValueError("'checkpoint_dir' should not be None")
if main_program is None:
raise ValueError('main_program should not be None.')
if trainer_args:
assert isinstance(trainer_args, dict)
is_chief = trainer_id == 0
_make_chekcpoint_dirs(checkpoint_dir)
serial = _get_latest_checkpoint_serial(checkpoint_dir) + 1
cur_dir = _get_serial_dir(checkpoint_dir, serial)
_save_trainer_args(cur_dir, trainer_id, trainer_args)
if is_chief:
_save_persist_vars_without_grad(executor, cur_dir, main_program)
if is_chief and lookup_table and pserver_endpoints:
_save_pserver_vars_by_notify(executor, cur_dir, lookup_table,
pserver_endpoints)
_scroll_delete(checkpoint_dir, max_num_checkpoints)
def load_checkpoint(executor,
checkpoint_dir,
main_program,
role_id=0,
is_trainer=True,
load_trainer_args=None,
load_lookup_table=None):
"""
This function filters out all checkpoint variables from the give
main_program and then try to load these variables from the
`checkpoint_dir` directory.
In the training precess, we generally save a checkpoint in each
iteration. So there are more than one checkpoint in the
`checkpoint_dir` (each checkpoint has its own sub folder), use
`serial` to specify which serial of checkpoint you would like to
load.
A variable is a checkpoint variable and will be loaded if it meets
all following conditions:
1. It's persistable.
2. It's type is not FEED_MINIBATCH nor FETCH_LIST nor RAW.
3. It's name contains no "@GRAD" nor ".trainer_" nor ".block".
Args:
executor(Executor): The executor to run for loading checkpoint.
checkpoint_dir(str): The folder where all checkpoints are.
serial(int): The serial of checkpoint you would like to load.
main_program(Program): The program whose checkpoint variables will
be loaded.
role_id(int): the trainer id or the parameter server id.
is_trainer(bool): trainer is True and parameter server is False.
load_trainer_args(list|None): list about load trainer args.
load_lookup_table(str|None): the lookup table name
Returns:
None
Raises:
ValueError: If `checkpoint_dir` is None.
ValueError: If `main_program` is None.
Examples:
.. code-block:: python
exe = fluid.Executor(fluid.CPUPlace())
path = "./checkpoints"
prog = fluid.default_main_program()
load_checkpoint(executor=exe, checkpoint_dir=path,
serial=9, main_program=prog)
# In this example, `load_checkpoint` function
# will first filters out all checkpoint variables in the default
# main program, and then try to load these variables form the
# folder "./checkpoints/checkpoint_9/__model__".
"""
if checkpoint_dir is None:
raise ValueError("'checkpoint_dir' should not be None")
serial = _get_latest_checkpoint_serial(checkpoint_dir)
# there are nothing need to be loaded
if serial is None or serial < 0:
return
if main_program is None:
raise ValueError('main_program should not be None.')
if is_trainer and load_trainer_args is None:
cur_dir = _get_serial_dir(checkpoint_dir, serial)
_load_persist_vars_without_grad(executor, cur_dir, main_program, True)
return
if is_trainer and load_trainer_args:
return _load_trainer_args(checkpoint_dir, serial, role_id,
load_trainer_args)
if not is_trainer and load_lookup_table:
_load_lookup_table_vars(executor, checkpoint_dir, main_program, role_id,
load_lookup_table)
def clean_checkpoint(checkpoint_dir, delete_dir=False):
"""
clean the checkpoint dir, when the train exits normally,
the trainer will call clean_checkpoint to delete checkpoint directory saved before.
delete_dir only works when the directory is empty, otherwise, OSError is raised.
: param checkpoint_dir
: param delete_dir
"""
if checkpoint_dir is None:
raise ValueError("'checkpoint_dir' should not be None")
_scroll_delete(checkpoint_dir, max_num_checkpoints=0)
if delete_dir and not os.listdir(checkpoint_dir):
os.rmdir(checkpoint_dir)
def _load_persist_vars_without_grad(executor,
dirname,
program,
has_model_dir=False):
"""
This function filters out all checkpoint variables from the give
program and then trys to load these variables from the given directory.
A variable is a checkpoint variable if it meets all following
conditions:
1. It's persistable.
2. It's type is not FEED_MINIBATCH nor FETCH_LIST nor RAW.
3. It's name contains no "@GRAD" nor ".trainer_" nor ".block".
Args:
executor(Executor): The executor to run for loading variables.
dirname(str): The directory path.
program(Program): The program whose checkpoint variables will
be loaded.
has_model_dir(bool): if True, the function loads variables
from a sub directory named '__model__'.
Default: False
Returns:
None
Examples:
.. code-block:: python
exe = fluid.Executor(fluid.CPUPlace())
param_path = "./my_paddle_model"
prog = fluid.default_main_program()
_load_persist_vars_without_grad(executor=exe,
dirname=param_path, program=prog, has_model_dir=True)
# In this example, `_load_persist_vars_without_grad` function
# will first filters out all checkpoint variables in the default
# main program, and then trys to load these variables form the
# folder "./my_paddle_model/__model__".
"""
if has_model_dir:
dirname = _get_model_dir(dirname)
io.load_vars(
executor,
dirname=dirname,
main_program=program,
predicate=_is_checkpoint_var,
filename=None)
def _load_lookup_table_vars(executor, dirname, program, pserver_id, table_name):
"""
The parameter server will load lookup table's local file in
selectedrows variable.
Args:
executor(Executor): The executor to run for loading persistable variables
dirname(str): The directory path
main_program(Program): Find the variable named table_name in main_program
pserver_id(int): the serial number in pserver_endpoints list
table_name(str): lookup table name
Returns:
None
Examples:
.. code-block:: python
exe = fluid.Executor(fluid.CPUPlace())
dirname = "./checkpoints/checkpoint_9/"
prog = fluid.default_main_program()
pserver_id = 1
table_name = "share_w"
_load_lookup_table_vars(executor=exe,
dirname=dirname, program=prog, pserver_id=pserver_id,
table_name=table_name)
"""
for var in program.list_vars():
if var.name == table_name:
lookup_table_var = var
break
assert lookup_table_var is not None
lookup_table_dir = os.path.join(dirname, LOOKUP_TABLE_DIR)
table_file = table_name + CHECKPOINT_SEPARATOR + str(pserver_id)
load_prog = framework.Program()
load_block = load_prog.global_block()
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [lookup_table_var]},
attrs={'file_path': os.path.join(lookup_table_dir, table_file)})
executor.run(load_prog)
def _save_persist_vars_without_grad(executor, dirname, program):
"""
This function filters out all checkpoint variables from the give
program and then save these variables to a sub-folder '__model__' of
the given directory.
A variable is a checkpoint variable if it meets all following
conditions:
1. It's persistable.
2. It's type is not FEED_MINIBATCH nor FETCH_LIST nor RAW.
3. It's name contains no "@GRAD" nor ".trainer_" nor ".block".
Args:
executor(Executor): The executor to run for saving variables.
dirname(str): The directory path.
program(Program): The program whose checkpoint variables will
be saved.
Returns:
None
Examples:
.. code-block:: python
exe = fluid.Executor(fluid.CPUPlace())
param_path = "./my_paddle_model"
prog = fluid.default_main_program()
_save_persist_vars_without_grad(executor=exe,
dirname=param_path, program=prog)
# In this example, `_save_persist_vars_without_grad` function
# will first filters out all checkpoint variables in the default
# main program, and then saves these variables to the folder
# "./my_paddle_model/__model__".
"""
cur_dir = _get_model_dir(dirname)
io.save_vars(
executor,
dirname=cur_dir,
main_program=program,
vars=None,
predicate=_is_checkpoint_var,
filename=None)
_write_success(cur_dir)
def _save_pserver_vars_by_notify(executor, dirname, lookup_table,
ps_endpoint_list):
"""
This function will send checkpoint notify message from Trainer 0
to all the pservers.
The checkpoint notify message contains lookup table name,
the absolute path on pserver to save lookup_table.
Args:
executor(Executor): The executor to run for send checkpoint notify.
dirname(str): The folder where to save checkpoints.
lookup_table(string): the lookup table name, when use distribute
lookup table, we can get lookup table name by DistributeTranspiler.
table_name
ps_endpoint_list(list): the parameter server ip:port list.
when use distribute lookup table, we can get ps_endpoint_list by
distribute arguments.
Return:
None
Examples:
.. code-block:: python
exe = fluid.Executor(fluid.CPUPlace())
param_path = "./my_paddle_model"
prog = fluid.default_main_program()
table_name = "share_w"
ps_endpoints = ["127.0.0.1:6000","127.0.0.1:6001"]
_save_pserver_vars_by_notify(executor=exe,
dirname=param_path, lookup_table=table_name,
ps_endpoint_list=ps_endpoints)
"""
cur_dir = _get_lookuptable_dir(dirname)
checkpoint_notify_program = framework.Program()
checkpoint_notify_block = checkpoint_notify_program.global_block()
attrs = {}
attrs['epmap'] = ps_endpoint_list
attrs['dir'] = cur_dir
attrs['lookup_table'] = lookup_table
checkpoint_notify_block.append_op(
type='checkpoint_notify', inputs={}, outputs={}, attrs=attrs)
executor.run(checkpoint_notify_program)
def _save_trainer_args(dirname, trainer_id, trainer_args):
assert isinstance(trainer_args, dict)
cur_dir = _get_trainer_dir(dirname, trainer_id)
for name, value in trainer_args.iteritems():
args_file = os.path.join(cur_dir, name)
with open(args_file, 'w') as f:
f.write(str(value))
_write_success(cur_dir)
def _load_trainer_args(checkpoint_dir, serial, trainer_id, trainer_args):
"""
trainer will load some args from it's independent directory,
such as epoch_id and step_id.
Args:
checkpoint_dir(str): The folder where all checkpoints are.
serial(int): The serial of checkpoint you would like to load.
trainer_id(int): current trainer id.
trainer_args(list): list about load trainer args
Return:
None
Examples:
.. code-block:: python
param_path = "./checkpoint/"
serial = 7
trainer_id = 2
trainer_args = ["epoch_id", "step_id"]
_load_trainer_args(checkpoint_dir=param_path, serial=serial,
trainer_id=trainer_id, trainer_args=trainer_args)
"""
assert isinstance(trainer_args, list)
cur_dir = _get_serial_dir(checkpoint_dir, serial)
cur_dir = _get_trainer_dir(cur_dir, trainer_id)
ret_values = []
for arg in trainer_args:
cur_file = os.path.join(cur_dir, arg)
with open(cur_file, 'r') as f:
contents = f.read()
ret_values.append(contents.strip())
return ret_values
def _is_checkpoint_var(var):
"""
the checkpoint will not save or load all the variables.
var type is FEED_MINIBATCH/FETCH_LIST/RAW or var name ends with @GRAD are discarded.
: param var(Variable)
"""
if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \
var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \
var.desc.type() == core.VarDesc.VarType.RAW:
return False
# @GRAD are named for gradient variables, checkpoint will not save it.
if "@GRAD" in var.name:
return False
# .trainer_ are named for distribute train variables, checkpoint will not save it.
if ".trainer_" in var.name:
return False
# .block is named for distribute train variables, checkpoint will not save it.
if ".block" in var.name:
return False
return var.persistable
def _make_chekcpoint_dirs(dirs):
"""
_make_chekcpoint_dirs will makdir local directory directly, when the directory is exist, it will igore it.
"""
assert dirs is not None
if os.path.isfile(dirs):
raise OSError(errno.ENOTDIR, "dirs path shoule be a Directory.", dirs)
if not os.path.isdir(dirs):
try:
os.makedirs(dirs)
except OSError as err:
if err.errno != errno.EEXIST:
raise err
def _get_dir_serial(dirname):
_, serial = dirname.split(CHECKPOINT_SEPARATOR)
try:
serial_num = int(serial)
except ValueError:
serial_num = -1
return serial_num
def _get_serial_dir(dirname, serial):
serial_folder = CHECKPOINT_PREFIX + CHECKPOINT_SEPARATOR + str(serial)
serial_dir = os.path.join(dirname, serial_folder)
_make_chekcpoint_dirs(serial_dir)
return serial_dir
def _get_model_dir(dirname):
model_dir = os.path.join(dirname, MODEL_DIR)
_make_chekcpoint_dirs(model_dir)
return model_dir
def _get_lookuptable_dir(dirname):
lookuptable_dir = os.path.join(dirname, LOOKUP_TABLE_DIR)
_make_chekcpoint_dirs(lookuptable_dir)
return lookuptable_dir
def _get_trainer_dir(dirname, trainer_id):
trainer_folder = TRAINER_PREFIX + CHECKPOINT_SEPARATOR + str(trainer_id)
trainer_dir = os.path.join(dirname, trainer_folder)
_make_chekcpoint_dirs(trainer_dir)
return trainer_dir
def _scroll_delete(dirname, max_num_checkpoints=3):
dirs = os.listdir(dirname)
serial_map = {}
for serial in dirs:
serial_num = _get_dir_serial(serial)
serial_map[serial_num] = serial
if len(serial_map.keys()) <= max_num_checkpoints:
return
serials = serial_map.keys()
serials.sort(reverse=True)
serials = serials[max_num_checkpoints:]
for serial in serials:
cur_dir = _get_serial_dir(dirname, serial)
try:
shutil.rmtree(cur_dir)
except OSError as err:
if err.errno != errno.ENOENT:
raise err
def _write_success(dirname):
"""
write an empty file named "_SUCCESS" in checkpoint dir, indicate this checkpoint is correct.
: param dirname
"""
success_file = os.path.join(dirname, SUCCESS_MARK_FILENAME)
with open(success_file, 'a') as f:
now = time.ctime()
f.write(now)
def _get_latest_checkpoint_serial(checkpoint_dir):
"""
get the latest file in checkpoint directory, the _SUCCESS file must exist in the directory
: param checkpoint_dir
"""
if not checkpoint_dir:
return -1
def has_success(checkpoint_dir, cur_dir):
"""
is _SUCCESS in this dir
"""
serial = _get_dir_serial(cur_dir)
if serial == -1 or not os.path.isdir(
os.path.join(checkpoint_dir, cur_dir)):
return -1
success_path = os.path.join(
_get_serial_dir(checkpoint_dir, serial), MODEL_DIR,
SUCCESS_MARK_FILENAME)
if os.path.isfile(success_path):
return serial
if not os.path.isdir(checkpoint_dir):
return -1
current_dir = -1
dirs = os.listdir(checkpoint_dir)
for cur_dir in dirs:
success_num = has_success(checkpoint_dir, cur_dir)
if success_num > current_dir:
current_dir = success_num
return current_dir
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides base classes for working with storage
"""
# Backward compatibility for Python 2.5
from __future__ import with_statement
import os.path # pylint: disable-msg=W0404
import hashlib
from os.path import join as pjoin
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
import libcloud.utils.files
from libcloud.common.types import LibcloudError
from libcloud.common.base import ConnectionUserAndKey, BaseDriver
from libcloud.storage.types import ObjectDoesNotExistError
__all__ = [
'Object',
'Container',
'StorageDriver',
'CHUNK_SIZE',
'DEFAULT_CONTENT_TYPE'
]
CHUNK_SIZE = 8096
# Default Content-Type which is sent when uploading an object if one is not
# supplied and can't be detected when using non-strict mode.
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
class Object(object):
"""
Represents an object (BLOB).
"""
def __init__(self, name, size, hash, extra, meta_data, container,
driver):
"""
:param name: Object name (must be unique per container).
:type name: ``str``
:param size: Object size in bytes.
:type size: ``int``
:param hash: Object hash.
:type hash: ``str``
:param container: Object container.
:type container: :class:`Container`
:param extra: Extra attributes.
:type extra: ``dict``
:param meta_data: Optional object meta data.
:type meta_data: ``dict``
:param driver: StorageDriver instance.
:type driver: :class:`StorageDriver`
"""
self.name = name
self.size = size
self.hash = hash
self.container = container
self.extra = extra or {}
self.meta_data = meta_data or {}
self.driver = driver
def get_cdn_url(self):
return self.driver.get_object_cdn_url(obj=self)
def enable_cdn(self, **kwargs):
return self.driver.enable_object_cdn(obj=self, **kwargs)
def download(self, destination_path, overwrite_existing=False,
delete_on_failure=True):
return self.driver.download_object(self, destination_path,
overwrite_existing,
delete_on_failure)
def as_stream(self, chunk_size=None):
return self.driver.download_object_as_stream(self, chunk_size)
def delete(self):
return self.driver.delete_object(self)
def __repr__(self):
return ('<Object: name=%s, size=%s, hash=%s, provider=%s ...>' %
(self.name, self.size, self.hash, self.driver.name))
class Container(object):
"""
Represents a container (bucket) which can hold multiple objects.
"""
def __init__(self, name, extra, driver):
"""
:param name: Container name (must be unique).
:type name: ``str``
:param extra: Extra attributes.
:type extra: ``dict``
:param driver: StorageDriver instance.
:type driver: :class:`StorageDriver`
"""
self.name = name
self.extra = extra or {}
self.driver = driver
def iterate_objects(self):
return self.driver.iterate_container_objects(container=self)
def list_objects(self):
return self.driver.list_container_objects(container=self)
def get_cdn_url(self):
return self.driver.get_container_cdn_url(container=self)
def enable_cdn(self, **kwargs):
return self.driver.enable_container_cdn(container=self, **kwargs)
def get_object(self, object_name):
return self.driver.get_object(container_name=self.name,
object_name=object_name)
def upload_object(self, file_path, object_name, extra=None, **kwargs):
return self.driver.upload_object(
file_path, self, object_name, extra=extra, **kwargs)
def upload_object_via_stream(self, iterator, object_name, extra=None,
**kwargs):
return self.driver.upload_object_via_stream(
iterator, self, object_name, extra=extra, **kwargs)
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
return self.driver.download_object(
obj, destination_path, overwrite_existing=overwrite_existing,
delete_on_failure=delete_on_failure)
def download_object_as_stream(self, obj, chunk_size=None):
return self.driver.download_object_as_stream(obj, chunk_size)
def delete_object(self, obj):
return self.driver.delete_object(obj)
def delete(self):
return self.driver.delete_container(self)
def __repr__(self):
return ('<Container: name=%s, provider=%s>'
% (self.name, self.driver.name))
class StorageDriver(BaseDriver):
"""
A base StorageDriver to derive from.
"""
connectionCls = ConnectionUserAndKey
name = None
hash_type = 'md5'
supports_chunked_encoding = False
# When strict mode is used, exception will be thrown if no content type is
# provided and none can be detected when uploading an object
strict_mode = False
def iterate_containers(self):
"""
Return a generator of containers for the given account
:return: A generator of Container instances.
:rtype: ``generator`` of :class:`Container`
"""
raise NotImplementedError(
'iterate_containers not implemented for this driver')
def list_containers(self):
"""
Return a list of containers.
:return: A list of Container instances.
:rtype: ``list`` of :class:`Container`
"""
return list(self.iterate_containers())
def iterate_container_objects(self, container):
"""
Return a generator of objects for the given container.
:param container: Container instance
:type container: :class:`Container`
:return: A generator of Object instances.
:rtype: ``generator`` of :class:`Object`
"""
raise NotImplementedError(
'iterate_container_objects not implemented for this driver')
def list_container_objects(self, container):
"""
Return a list of objects for the given container.
:param container: Container instance.
:type container: :class:`Container`
:return: A list of Object instances.
:rtype: ``list`` of :class:`Object`
"""
return list(self.iterate_container_objects(container))
def get_container(self, container_name):
"""
Return a container instance.
:param container_name: Container name.
:type container_name: ``str``
:return: :class:`Container` instance.
:rtype: :class:`Container`
"""
raise NotImplementedError(
'get_object not implemented for this driver')
def get_container_cdn_url(self, container):
"""
Return a container CDN URL.
:param container: Container instance
:type container: :class:`Container`
:return: A CDN URL for this container.
:rtype: ``str``
"""
raise NotImplementedError(
'get_container_cdn_url not implemented for this driver')
def get_object(self, container_name, object_name):
"""
Return an object instance.
:param container_name: Container name.
:type container_name: ``str``
:param object_name: Object name.
:type object_name: ``str``
:return: :class:`Object` instance.
:rtype: :class:`Object`
"""
raise NotImplementedError(
'get_object not implemented for this driver')
def get_object_cdn_url(self, obj):
"""
Return an object CDN URL.
:param obj: Object instance
:type obj: :class:`Object`
:return: A CDN URL for this object.
:rtype: ``str``
"""
raise NotImplementedError(
'get_object_cdn_url not implemented for this driver')
def enable_container_cdn(self, container):
"""
Enable container CDN.
:param container: Container instance
:type container: :class:`Container`
:rtype: ``bool``
"""
raise NotImplementedError(
'enable_container_cdn not implemented for this driver')
def enable_object_cdn(self, obj):
"""
Enable object CDN.
:param obj: Object instance
:type obj: :class:`Object`
:rtype: ``bool``
"""
raise NotImplementedError(
'enable_object_cdn not implemented for this driver')
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
"""
Download an object to the specified destination path.
:param obj: Object instance.
:type obj: :class:`Object`
:param destination_path: Full path to a file or a directory where the
incoming file will be saved.
:type destination_path: ``str``
:param overwrite_existing: True to overwrite an existing file,
defaults to False.
:type overwrite_existing: ``bool``
:param delete_on_failure: True to delete a partially downloaded file if
the download was not successful (hash
mismatch / file size).
:type delete_on_failure: ``bool``
:return: True if an object has been successfully downloaded, False
otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'download_object not implemented for this driver')
def download_object_as_stream(self, obj, chunk_size=None):
"""
Return a generator which yields object data.
:param obj: Object instance
:type obj: :class:`Object`
:param chunk_size: Optional chunk size (in bytes).
:type chunk_size: ``int``
"""
raise NotImplementedError(
'download_object_as_stream not implemented for this driver')
def upload_object(self, file_path, container, object_name, extra=None,
verify_hash=True, headers=None):
"""
Upload an object currently located on a disk.
:param file_path: Path to the object on disk.
:type file_path: ``str``
:param container: Destination container.
:type container: :class:`Container`
:param object_name: Object name.
:type object_name: ``str``
:param verify_hash: Verify hash
:type verify_hash: ``bool``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:param headers: (optional) Additional request headers,
such as CORS headers. For example:
headers = {'Access-Control-Allow-Origin': 'http://mozilla.com'}
:type headers: ``dict``
:rtype: :class:`Object`
"""
raise NotImplementedError(
'upload_object not implemented for this driver')
def upload_object_via_stream(self, iterator, container,
object_name,
extra=None,
headers=None):
"""
Upload an object using an iterator.
If a provider supports it, chunked transfer encoding is used and you
don't need to know in advance the amount of data to be uploaded.
Otherwise if a provider doesn't support it, iterator will be exhausted
so a total size for data to be uploaded can be determined.
Note: Exhausting the iterator means that the whole data must be
buffered in memory which might result in memory exhausting when
uploading a very large object.
If a file is located on a disk you are advised to use upload_object
function which uses fs.stat function to determine the file size and it
doesn't need to buffer whole object in the memory.
:param iterator: An object which implements the iterator interface.
:type iterator: :class:`object`
:param container: Destination container.
:type container: :class:`Container`
:param object_name: Object name.
:type object_name: ``str``
:param extra: (optional) Extra attributes (driver specific). Note:
This dictionary must contain a 'content_type' key which represents
a content type of the stored object.
:type extra: ``dict``
:param headers: (optional) Additional request headers,
such as CORS headers. For example:
headers = {'Access-Control-Allow-Origin': 'http://mozilla.com'}
:type headers: ``dict``
:rtype: ``object``
"""
raise NotImplementedError(
'upload_object_via_stream not implemented for this driver')
def delete_object(self, obj):
"""
Delete an object.
:param obj: Object instance.
:type obj: :class:`Object`
:return: ``bool`` True on success.
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_object not implemented for this driver')
def create_container(self, container_name):
"""
Create a new container.
:param container_name: Container name.
:type container_name: ``str``
:return: Container instance on success.
:rtype: :class:`Container`
"""
raise NotImplementedError(
'create_container not implemented for this driver')
def delete_container(self, container):
"""
Delete a container.
:param container: Container instance
:type container: :class:`Container`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_container not implemented for this driver')
def _get_object(self, obj, callback, callback_kwargs, response,
success_status_code=None):
"""
Call passed callback and start transfer of the object'
:param obj: Object instance.
:type obj: :class:`Object`
:param callback: Function which is called with the passed
callback_kwargs
:type callback: :class:`function`
:param callback_kwargs: Keyword arguments which are passed to the
callback.
:type callback_kwargs: ``dict``
:param response: Response instance.
:type response: :class:`Response`
:param success_status_code: Status code which represents a successful
transfer (defaults to httplib.OK)
:type success_status_code: ``int``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
success_status_code = success_status_code or httplib.OK
if response.status == success_status_code:
return callback(**callback_kwargs)
elif response.status == httplib.NOT_FOUND:
raise ObjectDoesNotExistError(object_name=obj.name,
value='', driver=self)
raise LibcloudError(value='Unexpected status code: %s' %
(response.status),
driver=self)
def _save_object(self, response, obj, destination_path,
overwrite_existing=False, delete_on_failure=True,
chunk_size=None):
"""
Save object to the provided path.
:param response: RawResponse instance.
:type response: :class:`RawResponse`
:param obj: Object instance.
:type obj: :class:`Object`
:param destination_path: Destination directory.
:type destination_path: ``str``
:param delete_on_failure: True to delete partially downloaded object if
the download fails.
:type delete_on_failure: ``bool``
:param overwrite_existing: True to overwrite a local path if it already
exists.
:type overwrite_existing: ``bool``
:param chunk_size: Optional chunk size
(defaults to ``libcloud.storage.base.CHUNK_SIZE``, 8kb)
:type chunk_size: ``int``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
chunk_size = chunk_size or CHUNK_SIZE
base_name = os.path.basename(destination_path)
if not base_name and not os.path.exists(destination_path):
raise LibcloudError(
value='Path %s does not exist' % (destination_path),
driver=self)
if not base_name:
file_path = pjoin(destination_path, obj.name)
else:
file_path = destination_path
if os.path.exists(file_path) and not overwrite_existing:
raise LibcloudError(
value='File %s already exists, but ' % (file_path) +
'overwrite_existing=False',
driver=self)
bytes_transferred = 0
with open(file_path, 'wb') as file_handle:
for chunk in response._response.iter_content(chunk_size):
file_handle.write(b(chunk))
bytes_transferred += len(chunk)
if int(obj.size) != int(bytes_transferred):
# Transfer failed, support retry?
if delete_on_failure:
try:
os.unlink(file_path)
except Exception:
pass
return False
return True
def _upload_object(self, object_name, content_type, request_path,
request_method='PUT',
headers=None, file_path=None, stream=None,
upload_func=None, upload_func_kwargs=None,
chunked=False, multipart=False):
"""
Helper function for setting common request headers and calling the
passed in callback which uploads an object.
"""
headers = headers or {}
if file_path and not os.path.exists(file_path):
raise OSError('File %s does not exist' % (file_path))
if stream is not None and not hasattr(stream, 'next') and not \
hasattr(stream, '__next__'):
raise AttributeError('iterator object must implement next() ' +
'method.')
if not content_type:
if file_path:
name = file_path
else:
name = object_name
content_type, _ = libcloud.utils.files.guess_file_mime_type(name)
if not content_type:
if self.strict_mode:
raise AttributeError('File content-type could not be '
'guessed and no content_type value '
'is provided')
else:
# Fallback to a content-type
content_type = DEFAULT_CONTENT_TYPE
headers['Content-Type'] = content_type
if stream:
response = self.connection.request(
request_path,
method=request_method, data=stream,
headers=headers, raw=True)
stream_hash, stream_length = self._hash_buffered_stream(
stream,
self._get_hash_function())
else:
with open(file_path, 'rb') as file_stream:
response = self.connection.request(
request_path,
method=request_method, data=file_stream,
headers=headers, raw=True)
with open(file_path, 'rb') as file_stream:
stream_hash, stream_length = self._hash_buffered_stream(
file_stream,
self._get_hash_function())
if not response.success():
raise LibcloudError(
value='Object upload failed, Perhaps a timeout?', driver=self)
if upload_func:
upload_func(**upload_func_kwargs)
return {'response': response,
'bytes_transferred': stream_length,
'data_hash': stream_hash}
def _hash_buffered_stream(self, stream, hasher, blocksize=65536):
total_len = 0
if hasattr(stream, '__next__'):
data = libcloud.utils.files.exhaust_iterator(iterator=stream)
hasher.update(b(data))
total_len = len(data)
return (hasher.hexdigest(), total_len)
if not hasattr(stream, '__exit__'):
for s in stream:
hasher.update(s)
total_len = total_len + len(s)
return (hasher.hexdigest(), total_len)
with stream:
buf = stream.read(blocksize)
while len(buf) > 0:
total_len = total_len + len(buf)
hasher.update(buf)
buf = stream.read(blocksize)
return (hasher.hexdigest(), total_len)
def _get_hash_function(self):
"""
Return instantiated hash function for the hash type supported by
the provider.
"""
try:
func = getattr(hashlib, self.hash_type)()
except AttributeError:
raise RuntimeError('Invalid or unsupported hash type: %s' %
(self.hash_type))
return func
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.dtypes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _is_numeric_dtype_enum(datatype_enum):
non_numeric_dtypes = [types_pb2.DT_VARIANT,
types_pb2.DT_VARIANT_REF,
types_pb2.DT_INVALID,
types_pb2.DT_RESOURCE,
types_pb2.DT_RESOURCE_REF]
return datatype_enum not in non_numeric_dtypes
class TypesTest(test_util.TensorFlowTestCase):
def testAllTypesConstructible(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(datatype_enum,
dtypes.DType(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dt = dtypes.as_dtype(datatype_enum)
self.assertEqual(datatype_enum, dt.as_datatype_enum)
def testAllTypesConvertibleToNumpyDtype(self):
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
if dtype.base_dtype != dtypes.bfloat16:
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
self.assertEqual(
dtypes.as_dtype(datatype_enum).base_dtype,
dtypes.as_dtype(numpy_dtype))
def testInvalid(self):
with self.assertRaises(TypeError):
dtypes.DType(types_pb2.DT_INVALID)
with self.assertRaises(TypeError):
dtypes.as_dtype(types_pb2.DT_INVALID)
def testNumpyConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(np.float32))
self.assertIs(dtypes.float64, dtypes.as_dtype(np.float64))
self.assertIs(dtypes.int32, dtypes.as_dtype(np.int32))
self.assertIs(dtypes.int64, dtypes.as_dtype(np.int64))
self.assertIs(dtypes.uint8, dtypes.as_dtype(np.uint8))
self.assertIs(dtypes.uint16, dtypes.as_dtype(np.uint16))
self.assertIs(dtypes.int16, dtypes.as_dtype(np.int16))
self.assertIs(dtypes.int8, dtypes.as_dtype(np.int8))
self.assertIs(dtypes.complex64, dtypes.as_dtype(np.complex64))
self.assertIs(dtypes.complex128, dtypes.as_dtype(np.complex128))
self.assertIs(dtypes.string, dtypes.as_dtype(np.object_))
self.assertIs(dtypes.string,
dtypes.as_dtype(np.array(["foo", "bar"]).dtype))
self.assertIs(dtypes.bool, dtypes.as_dtype(np.bool_))
with self.assertRaises(TypeError):
dtypes.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))
class AnObject(object):
dtype = "f4"
self.assertIs(dtypes.float32, dtypes.as_dtype(AnObject))
class AnotherObject(object):
dtype = np.dtype(np.complex64)
self.assertIs(dtypes.complex64, dtypes.as_dtype(AnotherObject))
def testRealDtype(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.bool, dtypes.uint8, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64
]:
self.assertIs(dtype.real_dtype, dtype)
self.assertIs(dtypes.complex64.real_dtype, dtypes.float32)
self.assertIs(dtypes.complex128.real_dtype, dtypes.float64)
def testStringConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype("float32"))
self.assertIs(dtypes.float64, dtypes.as_dtype("float64"))
self.assertIs(dtypes.int32, dtypes.as_dtype("int32"))
self.assertIs(dtypes.uint8, dtypes.as_dtype("uint8"))
self.assertIs(dtypes.uint16, dtypes.as_dtype("uint16"))
self.assertIs(dtypes.int16, dtypes.as_dtype("int16"))
self.assertIs(dtypes.int8, dtypes.as_dtype("int8"))
self.assertIs(dtypes.string, dtypes.as_dtype("string"))
self.assertIs(dtypes.complex64, dtypes.as_dtype("complex64"))
self.assertIs(dtypes.complex128, dtypes.as_dtype("complex128"))
self.assertIs(dtypes.int64, dtypes.as_dtype("int64"))
self.assertIs(dtypes.bool, dtypes.as_dtype("bool"))
self.assertIs(dtypes.qint8, dtypes.as_dtype("qint8"))
self.assertIs(dtypes.quint8, dtypes.as_dtype("quint8"))
self.assertIs(dtypes.qint32, dtypes.as_dtype("qint32"))
self.assertIs(dtypes.bfloat16, dtypes.as_dtype("bfloat16"))
self.assertIs(dtypes.float32_ref, dtypes.as_dtype("float32_ref"))
self.assertIs(dtypes.float64_ref, dtypes.as_dtype("float64_ref"))
self.assertIs(dtypes.int32_ref, dtypes.as_dtype("int32_ref"))
self.assertIs(dtypes.uint8_ref, dtypes.as_dtype("uint8_ref"))
self.assertIs(dtypes.int16_ref, dtypes.as_dtype("int16_ref"))
self.assertIs(dtypes.int8_ref, dtypes.as_dtype("int8_ref"))
self.assertIs(dtypes.string_ref, dtypes.as_dtype("string_ref"))
self.assertIs(dtypes.complex64_ref, dtypes.as_dtype("complex64_ref"))
self.assertIs(dtypes.complex128_ref, dtypes.as_dtype("complex128_ref"))
self.assertIs(dtypes.int64_ref, dtypes.as_dtype("int64_ref"))
self.assertIs(dtypes.bool_ref, dtypes.as_dtype("bool_ref"))
self.assertIs(dtypes.qint8_ref, dtypes.as_dtype("qint8_ref"))
self.assertIs(dtypes.quint8_ref, dtypes.as_dtype("quint8_ref"))
self.assertIs(dtypes.qint32_ref, dtypes.as_dtype("qint32_ref"))
self.assertIs(dtypes.bfloat16_ref, dtypes.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
dtypes.as_dtype("not_a_type")
def testDTypesHaveUniqueNames(self):
dtypez = []
names = set()
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = dtypes.as_dtype(datatype_enum)
dtypez.append(dtype)
names.add(dtype.name)
self.assertEqual(len(dtypez), len(names))
def testIsInteger(self):
self.assertEqual(dtypes.as_dtype("int8").is_integer, True)
self.assertEqual(dtypes.as_dtype("int16").is_integer, True)
self.assertEqual(dtypes.as_dtype("int32").is_integer, True)
self.assertEqual(dtypes.as_dtype("int64").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint8").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint16").is_integer, True)
self.assertEqual(dtypes.as_dtype("complex64").is_integer, False)
self.assertEqual(dtypes.as_dtype("complex128").is_integer, False)
self.assertEqual(dtypes.as_dtype("float").is_integer, False)
self.assertEqual(dtypes.as_dtype("double").is_integer, False)
self.assertEqual(dtypes.as_dtype("string").is_integer, False)
self.assertEqual(dtypes.as_dtype("bool").is_integer, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint32").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint16").is_integer, False)
def testIsFloating(self):
self.assertEqual(dtypes.as_dtype("int8").is_floating, False)
self.assertEqual(dtypes.as_dtype("int16").is_floating, False)
self.assertEqual(dtypes.as_dtype("int32").is_floating, False)
self.assertEqual(dtypes.as_dtype("int64").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex64").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex128").is_floating, False)
self.assertEqual(dtypes.as_dtype("float32").is_floating, True)
self.assertEqual(dtypes.as_dtype("float64").is_floating, True)
self.assertEqual(dtypes.as_dtype("string").is_floating, False)
self.assertEqual(dtypes.as_dtype("bool").is_floating, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_floating, True)
self.assertEqual(dtypes.as_dtype("qint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint32").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint16").is_floating, False)
def testIsComplex(self):
self.assertEqual(dtypes.as_dtype("int8").is_complex, False)
self.assertEqual(dtypes.as_dtype("int16").is_complex, False)
self.assertEqual(dtypes.as_dtype("int32").is_complex, False)
self.assertEqual(dtypes.as_dtype("int64").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("complex64").is_complex, True)
self.assertEqual(dtypes.as_dtype("complex128").is_complex, True)
self.assertEqual(dtypes.as_dtype("float32").is_complex, False)
self.assertEqual(dtypes.as_dtype("float64").is_complex, False)
self.assertEqual(dtypes.as_dtype("string").is_complex, False)
self.assertEqual(dtypes.as_dtype("bool").is_complex, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint32").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint16").is_complex, False)
def testIsUnsigned(self):
self.assertEqual(dtypes.as_dtype("int8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("uint8").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("uint16").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("float32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("float64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bool").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("string").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex128").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint16").is_unsigned, False)
def testMinMax(self):
# make sure min/max evaluates for all data types that have min/max
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
# ignore types for which there are no minimum/maximum (or we cannot
# compute it, such as for the q* types)
if (dtype.is_quantized or dtype.base_dtype == dtypes.bool or
dtype.base_dtype == dtypes.string or
dtype.base_dtype == dtypes.complex64 or
dtype.base_dtype == dtypes.complex128):
continue
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
# check some values that are known
if numpy_dtype == np.bool_:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 1)
if numpy_dtype == np.int8:
self.assertEquals(dtype.min, -128)
self.assertEquals(dtype.max, 127)
if numpy_dtype == np.int16:
self.assertEquals(dtype.min, -32768)
self.assertEquals(dtype.max, 32767)
if numpy_dtype == np.int32:
self.assertEquals(dtype.min, -2147483648)
self.assertEquals(dtype.max, 2147483647)
if numpy_dtype == np.int64:
self.assertEquals(dtype.min, -9223372036854775808)
self.assertEquals(dtype.max, 9223372036854775807)
if numpy_dtype == np.uint8:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 255)
if numpy_dtype == np.uint16:
if dtype == dtypes.uint16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 65535)
elif dtype == dtypes.bfloat16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint32:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint64:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 18446744073709551615)
if numpy_dtype in (np.float16, np.float32, np.float64):
self.assertEquals(dtype.min, np.finfo(numpy_dtype).min)
self.assertEquals(dtype.max, np.finfo(numpy_dtype).max)
if numpy_dtype == dtypes.bfloat16.as_numpy_dtype:
self.assertEquals(dtype.min, float.fromhex("-0x1.FEp127"))
self.assertEquals(dtype.max, float.fromhex("0x1.FEp127"))
def testRepr(self):
self.skipTest("b/142725777")
for enum, name in dtypes._TYPE_TO_STRING.items():
if enum > 100:
continue
dtype = dtypes.DType(enum)
self.assertEquals(repr(dtype), "tf." + name)
import tensorflow as tf
dtype2 = eval(repr(dtype))
self.assertEquals(type(dtype2), dtypes.DType)
self.assertEquals(dtype, dtype2)
def testEqWithNonTFTypes(self):
self.assertNotEqual(dtypes.int32, int)
self.assertNotEqual(dtypes.float64, 2.1)
def testPythonLongConversion(self):
self.assertIs(dtypes.int64, dtypes.as_dtype(np.array(2**32).dtype))
def testPythonTypesConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(float))
self.assertIs(dtypes.bool, dtypes.as_dtype(bool))
def testReduce(self):
for enum in dtypes._TYPE_TO_STRING:
dtype = dtypes.DType(enum)
ctor, args = dtype.__reduce__()
self.assertEquals(ctor, dtypes.as_dtype)
self.assertEquals(args, (dtype.name,))
reconstructed = ctor(*args)
self.assertEquals(reconstructed, dtype)
def testAsDtypeInvalidArgument(self):
with self.assertRaises(TypeError):
dtypes.as_dtype((dtypes.int32, dtypes.float32))
if __name__ == "__main__":
googletest.main()
|
|
"""
This paver file is intented to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building a fancy dmg from scratch
=================================
Clone the numpy-macosx-installer git repo from on github into the source tree
(numpy-macosx-installer should be in the same directory as setup.py). Then, do
as follows::
git clone git://github.com/cournape/macosx-numpy-installer
# remove build dir, and everything generated by previous paver calls
# (included generated installers). Use with care !
paver nuke
paver bootstrap && source bootstrap/bin/activate
# Installing numpy is necessary to build the correct documentation (because
# of autodoc)
python setupegg.py install
paver dmg
Building a simple (no-superpack) windows installer from wine
============================================================
It assumes that blas/lapack are in c:\local\lib inside drive_c. Build python
2.5 and python 2.6 installers.
paver bdist_wininst_simple
You will have to configure your wine python locations (WINE_PYS).
The superpack requires all the atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into NOTES.txt, and write the Changelog
which can be uploaded to sourceforge.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
# What need to be installed to build everything on mac os x:
# - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH
# - paver + virtualenv
# - full texlive
import os
import sys
import shutil
import subprocess
import re
try:
from hashlib import md5
except ImportError:
from md5 import md5
import paver
from paver.easy import \
options, Bunch, task, call_task, sh, needs, cmdopts, dry
sys.path.insert(0, os.path.dirname(__file__))
try:
setup_py = __import__("setup")
FULLVERSION = setup_py.FULLVERSION
finally:
sys.path.pop(0)
DEFAULT_PYTHON = "2.6"
# Where to put the final installers, as put on sourceforge
SUPERPACK_BUILD = 'build-superpack'
SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
virtualenv=Bunch(packages_to_install=["sphinx", "numpydoc"], no_site_packages=True),
sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
superpack=Bunch(builddir="build-superpack"),
installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),
doc=Bunch(doc_root="doc",
sdir=os.path.join("doc", "source"),
bdir=os.path.join("doc", "build"),
bdir_latex=os.path.join("doc", "build", "latex"),
destdir_pdf=os.path.join("build_doc", "pdf")
),
html=Bunch(builddir=os.path.join("build", "html")),
dmg=Bunch(python_version=DEFAULT_PYTHON),
bdist_wininst_simple=Bunch(python_version=DEFAULT_PYTHON),
)
MPKG_PYTHON = {
"2.5": ["/Library/Frameworks/Python.framework/Versions/2.5/bin/python"],
"2.6": ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python"]
}
SSE3_CFG = {'ATLAS': r'C:\local\lib\yop\sse3'}
SSE2_CFG = {'ATLAS': r'C:\local\lib\yop\sse2'}
NOSSE_CFG = {'BLAS': r'C:\local\lib\yop\nosse', 'LAPACK': r'C:\local\lib\yop\nosse'}
SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
if sys.platform =="darwin":
WINDOWS_PYTHON = {
"2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"],
"2.5": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python25/python.exe"]
}
WINDOWS_ENV = os.environ
WINDOWS_ENV["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/X11/lib:/usr/lib"
MAKENSIS = ["wine", "makensis"]
elif sys.platform == "win32":
WINDOWS_PYTHON = {
"2.6": ["C:\Python26\python.exe"],
"2.5": ["C:\Python25\python.exe"],
}
# XXX: find out which env variable is necessary to avoid the pb with python
# 2.6 and random module when importing tempfile
WINDOWS_ENV = os.environ
MAKENSIS = ["makensis"]
else:
WINDOWS_PYTHON = {
"2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"],
"2.5": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python25/python.exe"]
}
WINDOWS_ENV = os.environ
MAKENSIS = ["wine", "makensis"]
# Start/end of the log (from git)
LOG_START = 'svn/tags/1.4.0'
LOG_END = 'master'
RELEASE_NOTES = 'doc/release/1.5.0-notes.rst'
#-------------------
# Windows installers
#-------------------
def superpack_name(pyver, numver):
"""Return the filename of the superpack installer."""
return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
def internal_wininst_name(arch):
"""Return the name of the wininst as it will be inside the superpack (i.e.
with the arch encoded."""
ext = '.exe'
return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
def wininst_name(pyver):
"""Return the name of the installer built by wininst command."""
ext = '.exe'
return "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
def prepare_nsis_script(pyver, numver):
if not os.path.exists(SUPERPACK_BUILD):
os.makedirs(SUPERPACK_BUILD)
tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
installer_name = superpack_name(pyver, numver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
internal_wininst_name(arch))
target.write(cnt)
def bdist_wininst_arch(pyver, arch):
"""Arch specific wininst build."""
if os.path.exists("build"):
shutil.rmtree("build")
_bdist_wininst(pyver, SITECFG[arch])
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_superpack(options):
"""Build all arch specific wininst installers."""
pyver = options.python_version
def copy_bdist(arch):
# Copy the wininst in dist into the release directory
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
os.rename(source, target)
bdist_wininst_arch(pyver, 'nosse')
copy_bdist("nosse")
bdist_wininst_arch(pyver, 'sse2')
copy_bdist("sse2")
bdist_wininst_arch(pyver, 'sse3')
copy_bdist("sse3")
idirs = options.installers.installersdir
pyver = options.python_version
prepare_nsis_script(pyver, FULLVERSION)
subprocess.check_call(MAKENSIS + ['numpy-superinstaller.nsi'],
cwd=SUPERPACK_BUILD)
# Copy the superpack into installers dir
if not os.path.exists(idirs):
os.makedirs(idirs)
source = os.path.join(SUPERPACK_BUILD, superpack_name(pyver, FULLVERSION))
target = os.path.join(idirs, superpack_name(pyver, FULLVERSION))
shutil.copy(source, target)
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_nosse(options):
"""Build the nosse wininst installer."""
bdist_wininst_arch(options.python_version, 'nosse')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse2(options):
"""Build the sse2 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse2')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse3(options):
"""Build the sse3 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse3')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_simple():
"""Simple wininst-based installer."""
pyver = options.bdist_wininst_simple.python_version
_bdist_wininst(pyver)
def _bdist_wininst(pyver, cfg_env=None):
cmd = WINDOWS_PYTHON[pyver] + ['setup.py', 'build', '-c', 'mingw32', 'bdist_wininst']
if cfg_env:
for k, v in WINDOWS_ENV.items():
cfg_env[k] = v
else:
cfg_env = WINDOWS_ENV
subprocess.check_call(cmd, env=cfg_env)
#----------------
# Bootstrap stuff
#----------------
@task
def bootstrap(options):
"""create virtualenv in ./bootstrap"""
try:
import virtualenv
except ImportError, e:
raise RuntimeError("virtualenv is needed for bootstrap")
bdir = options.bootstrap_dir
if not os.path.exists(bdir):
os.makedirs(bdir)
bscript = "boostrap.py"
options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
bscript)
options.virtualenv.no_site_packages = False
options.bootstrap.no_site_packages = False
call_task('paver.virtual.bootstrap')
sh('cd %s; %s %s' % (bdir, sys.executable, bscript))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'numpy.egg-info']
for i in d:
if os.path.exists(i):
shutil.rmtree(i)
bdir = os.path.join('doc', options.sphinx.builddir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
def clean_bootstrap():
bdir = os.path.join(options.bootstrap.bootstrap_dir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
@needs('clean', 'clean_bootstrap')
def nuke(options):
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
for d in [options.superpack.builddir, options.installers.releasedir]:
if os.path.exists(d):
shutil.rmtree(d)
#---------------------
# Documentation tasks
#---------------------
@task
def html(options):
"""Build numpy documentation and put it into build/docs"""
# Don't use paver html target because of numpy bootstrapping problems
bdir = os.path.join("doc", options.sphinx.builddir, "html")
if os.path.exists(bdir):
shutil.rmtree(bdir)
subprocess.check_call(["make", "html"], cwd="doc")
html_destdir = options.html.builddir
if os.path.exists(html_destdir):
shutil.rmtree(html_destdir)
shutil.copytree(bdir, html_destdir)
@task
def latex():
"""Build numpy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
sdir = options.doc.sdir
bdir = options.doc.bdir
bdir_latex = options.doc.bdir_latex
destdir_pdf = options.doc.destdir_pdf
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(bdir_latex))
dry("Build pdf doc", build_pdf)
if os.path.exists(destdir_pdf):
shutil.rmtree(destdir_pdf)
os.makedirs(destdir_pdf)
user = os.path.join(bdir_latex, "numpy-user.pdf")
shutil.copy(user, os.path.join(destdir_pdf, "userguide.pdf"))
ref = os.path.join(bdir_latex, "numpy-ref.pdf")
shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf"))
#------------------
# Mac OS X targets
#------------------
def dmg_name(fullversion, pyver):
return "numpy-%s-py%s-python.org.dmg" % (fullversion, pyver)
def macosx_version():
if not sys.platform == 'darwin':
raise ValueError("Not darwin ??")
st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
out = st.stdout.readlines()
ver = re.compile("ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for i in out:
m = ver.match(i)
if m:
return m.groups()
def mpkg_name(pyver):
maj, min = macosx_version()[:2]
return "numpy-%s-py%s-macosx%s.%s.mpkg" % (FULLVERSION, pyver, maj, min)
def _build_mpkg(pyver):
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first"
ldflags += " -L%s" % os.path.join(os.path.dirname(__file__), "build")
if pyver == "2.5":
sh("CC=gcc-4.0 LDFLAGS='%s' %s setupegg.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver])))
else:
sh("LDFLAGS='%s' %s setupegg.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver])))
@task
def simple_dmg():
pyver = "2.6"
src_dir = "dmg-source"
# Clean the source dir
if os.path.exists(src_dir):
shutil.rmtree(src_dir)
os.makedirs(src_dir)
# Build the mpkg
clean()
_build_mpkg(pyver)
# Build the dmg
shutil.copytree(os.path.join("dist", mpkg_name(pyver)),
os.path.join(src_dir, mpkg_name(pyver)))
_create_dmg(pyver, src_dir, "NumPy Universal %s" % FULLVERSION)
@task
def bdist_mpkg(options):
call_task("clean")
try:
pyver = options.bdist_mpkg.python_version
except AttributeError:
pyver = options.python_version
_build_mpkg(pyver)
def _create_dmg(pyver, src_dir, volname=None):
# Build the dmg
image_name = dmg_name(FULLVERSION, pyver)
if os.path.exists(image_name):
os.remove(image_name)
cmd = ["hdiutil", "create", image_name, "-srcdir", src_dir]
if volname:
cmd.extend(["-volname", "'%s'" % volname])
sh(" ".join(cmd))
@task
@needs("pdf")
@cmdopts([("python-version=", "p", "python version")])
def dmg(options):
try:
pyver = options.dmg.python_version
except:
pyver = DEFAULT_PYTHON
idirs = options.installers.installersdir
call_task("clean")
_build_mpkg(pyver)
macosx_installer_dir = "tools/numpy-macosx-installer"
dmg = os.path.join(macosx_installer_dir, dmg_name(FULLVERSION, pyver))
if os.path.exists(dmg):
os.remove(dmg)
# Clean the image source
content = os.path.join(macosx_installer_dir, 'content')
if os.path.exists(content):
shutil.rmtree(content)
os.makedirs(content)
# Copy mpkg into image source
mpkg_source = os.path.join("dist", mpkg_name(pyver))
mpkg_target = os.path.join(content, "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver))
shutil.copytree(mpkg_source, mpkg_target)
# Copy docs into image source
pdf_docs = os.path.join(content, "Documentation")
if os.path.exists(pdf_docs):
shutil.rmtree(pdf_docs)
os.makedirs(pdf_docs)
user = os.path.join(options.doc.destdir_pdf, "userguide.pdf")
shutil.copy(user, os.path.join(pdf_docs, "userguide.pdf"))
ref = os.path.join(options.doc.destdir_pdf, "reference.pdf")
shutil.copy(ref, os.path.join(pdf_docs, "reference.pdf"))
# Build the dmg
cmd = ["./new-create-dmg", "--pkgname", os.path.basename(mpkg_target),
"--volname", "numpy", os.path.basename(dmg), "./content"]
st = subprocess.check_call(cmd, cwd=macosx_installer_dir)
source = dmg
target = os.path.join(idirs, os.path.basename(dmg))
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy(source, target)
#--------------------------
# Source distribution stuff
#--------------------------
def tarball_name(type='gztar'):
root = 'numpy-%s' % FULLVERSION
if type == 'gztar':
return root + '.tar.gz'
elif type == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist(options):
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
sh('python setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
idirs = options.installers.installersdir
if not os.path.exists(idirs):
os.makedirs(idirs)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(idirs, tarball_name(t))
shutil.copy(source, target)
def compute_md5(idirs):
released = paver.path.path(idirs).listdir()
checksums = []
for f in released:
m = md5(open(f, 'r').read())
checksums.append('%s %s' % (m.hexdigest(), f))
return checksums
def write_release_task(options, filename='NOTES.txt'):
idirs = options.installers.installersdir
source = paver.path.path(RELEASE_NOTES)
target = paver.path.path(filename)
if target.exists():
target.remove()
source.copy(target)
ftarget = open(str(target), 'a')
ftarget.writelines("""
Checksums
=========
""")
ftarget.writelines(['%s\n' % c for c in compute_md5(idirs)])
def write_log_task(options, filename='Changelog'):
st = subprocess.Popen(
['git', 'svn', 'log', '%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0]
a = open(filename, 'w')
a.writelines(out)
a.close()
@task
def write_release(options):
write_release_task(options)
@task
def write_log(options):
write_log_task(options)
@task
def write_release_and_log(options):
rdir = options.installers.releasedir
write_release_task(options, os.path.join(rdir, 'NOTES.txt'))
write_log_task(options, os.path.join(rdir, 'Changelog'))
|
|
"""
HDMI CEC component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/hdmi_cec/
"""
import logging
import multiprocessing
import os
from collections import defaultdict
from functools import reduce
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.components.media_player import DOMAIN as MEDIA_PLAYER
from homeassistant.components.switch import DOMAIN as SWITCH
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (EVENT_HOMEASSISTANT_START, STATE_UNKNOWN,
EVENT_HOMEASSISTANT_STOP, STATE_ON,
STATE_OFF, CONF_DEVICES, CONF_PLATFORM,
STATE_PLAYING, STATE_IDLE,
STATE_PAUSED, CONF_HOST)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pyCEC==0.4.13']
DOMAIN = 'hdmi_cec'
_LOGGER = logging.getLogger(__name__)
DEFAULT_DISPLAY_NAME = "HomeAssistant"
CONF_TYPES = 'types'
ICON_UNKNOWN = 'mdi:help'
ICON_AUDIO = 'mdi:speaker'
ICON_PLAYER = 'mdi:play'
ICON_TUNER = 'mdi:nest-thermostat'
ICON_RECORDER = 'mdi:microphone'
ICON_TV = 'mdi:television'
ICONS_BY_TYPE = {
0: ICON_TV,
1: ICON_RECORDER,
3: ICON_TUNER,
4: ICON_PLAYER,
5: ICON_AUDIO
}
CEC_DEVICES = defaultdict(list)
CMD_UP = 'up'
CMD_DOWN = 'down'
CMD_MUTE = 'mute'
CMD_UNMUTE = 'unmute'
CMD_MUTE_TOGGLE = 'toggle mute'
CMD_PRESS = 'press'
CMD_RELEASE = 'release'
EVENT_CEC_COMMAND_RECEIVED = 'cec_command_received'
EVENT_CEC_KEYPRESS_RECEIVED = 'cec_keypress_received'
ATTR_PHYSICAL_ADDRESS = 'physical_address'
ATTR_TYPE_ID = 'type_id'
ATTR_VENDOR_NAME = 'vendor_name'
ATTR_VENDOR_ID = 'vendor_id'
ATTR_DEVICE = 'device'
ATTR_COMMAND = 'command'
ATTR_TYPE = 'type'
ATTR_KEY = 'key'
ATTR_DUR = 'dur'
ATTR_SRC = 'src'
ATTR_DST = 'dst'
ATTR_CMD = 'cmd'
ATTR_ATT = 'att'
ATTR_RAW = 'raw'
ATTR_DIR = 'dir'
ATTR_ABT = 'abt'
ATTR_NEW = 'new'
ATTR_ON = 'on'
ATTR_OFF = 'off'
ATTR_TOGGLE = 'toggle'
_VOL_HEX = vol.Any(vol.Coerce(int), lambda x: int(x, 16))
SERVICE_SEND_COMMAND = 'send_command'
SERVICE_SEND_COMMAND_SCHEMA = vol.Schema({
vol.Optional(ATTR_CMD): _VOL_HEX,
vol.Optional(ATTR_SRC): _VOL_HEX,
vol.Optional(ATTR_DST): _VOL_HEX,
vol.Optional(ATTR_ATT): _VOL_HEX,
vol.Optional(ATTR_RAW): vol.Coerce(str)
}, extra=vol.PREVENT_EXTRA)
SERVICE_VOLUME = 'volume'
SERVICE_VOLUME_SCHEMA = vol.Schema({
vol.Optional(CMD_UP): vol.Any(CMD_PRESS, CMD_RELEASE, vol.Coerce(int)),
vol.Optional(CMD_DOWN): vol.Any(CMD_PRESS, CMD_RELEASE, vol.Coerce(int)),
vol.Optional(CMD_MUTE): vol.Any(ATTR_ON, ATTR_OFF, ATTR_TOGGLE),
}, extra=vol.PREVENT_EXTRA)
SERVICE_UPDATE_DEVICES = 'update'
SERVICE_UPDATE_DEVICES_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({})
}, extra=vol.PREVENT_EXTRA)
SERVICE_SELECT_DEVICE = 'select_device'
SERVICE_POWER_ON = 'power_on'
SERVICE_STANDBY = 'standby'
# pylint: disable=unnecessary-lambda
DEVICE_SCHEMA = vol.Schema({
vol.All(cv.positive_int):
vol.Any(lambda devices: DEVICE_SCHEMA(devices), cv.string)
})
CONF_DISPLAY_NAME = 'osd_name'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_DEVICES):
vol.Any(DEVICE_SCHEMA, vol.Schema({
vol.All(cv.string): vol.Any(cv.string)})),
vol.Optional(CONF_PLATFORM): vol.Any(SWITCH, MEDIA_PLAYER),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_DISPLAY_NAME): cv.string,
vol.Optional(CONF_TYPES, default={}):
vol.Schema({cv.entity_id: vol.Any(MEDIA_PLAYER, SWITCH)})
})
}, extra=vol.ALLOW_EXTRA)
def pad_physical_address(addr):
"""Right-pad a physical address."""
return addr + [0] * (4 - len(addr))
def parse_mapping(mapping, parents=None):
"""Parse configuration device mapping."""
if parents is None:
parents = []
for addr, val in mapping.items():
if isinstance(addr, (str,)) and isinstance(val, (str,)):
from pycec.network import PhysicalAddress
yield (addr, PhysicalAddress(val))
else:
cur = parents + [addr]
if isinstance(val, dict):
yield from parse_mapping(val, cur)
elif isinstance(val, str):
yield (val, pad_physical_address(cur))
def setup(hass: HomeAssistant, base_config):
"""Set up the CEC capability."""
from pycec.network import HDMINetwork
from pycec.commands import CecCommand, KeyReleaseCommand, KeyPressCommand
from pycec.const import KEY_VOLUME_UP, KEY_VOLUME_DOWN, KEY_MUTE_ON, \
KEY_MUTE_OFF, KEY_MUTE_TOGGLE, ADDR_AUDIOSYSTEM, ADDR_BROADCAST, \
ADDR_UNREGISTERED
from pycec.cec import CecAdapter
from pycec.tcp import TcpAdapter
# Parse configuration into a dict of device name to physical address
# represented as a list of four elements.
device_aliases = {}
devices = base_config[DOMAIN].get(CONF_DEVICES, {})
_LOGGER.debug("Parsing config %s", devices)
device_aliases.update(parse_mapping(devices))
_LOGGER.debug("Parsed devices: %s", device_aliases)
platform = base_config[DOMAIN].get(CONF_PLATFORM, SWITCH)
loop = (
# Create own thread if more than 1 CPU
hass.loop if multiprocessing.cpu_count() < 2 else None)
host = base_config[DOMAIN].get(CONF_HOST, None)
display_name = base_config[DOMAIN].get(
CONF_DISPLAY_NAME, DEFAULT_DISPLAY_NAME)
if host:
adapter = TcpAdapter(host, name=display_name, activate_source=False)
else:
adapter = CecAdapter(name=display_name, activate_source=False)
hdmi_network = HDMINetwork(adapter, loop=loop)
def _volume(call):
"""Increase/decrease volume and mute/unmute system."""
mute_key_mapping = {ATTR_TOGGLE: KEY_MUTE_TOGGLE, ATTR_ON: KEY_MUTE_ON,
ATTR_OFF: KEY_MUTE_OFF}
for cmd, att in call.data.items():
if cmd == CMD_UP:
_process_volume(KEY_VOLUME_UP, att)
elif cmd == CMD_DOWN:
_process_volume(KEY_VOLUME_DOWN, att)
elif cmd == CMD_MUTE:
hdmi_network.send_command(
KeyPressCommand(mute_key_mapping[att],
dst=ADDR_AUDIOSYSTEM))
hdmi_network.send_command(
KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))
_LOGGER.info("Audio muted")
else:
_LOGGER.warning("Unknown command %s", cmd)
def _process_volume(cmd, att):
if isinstance(att, (str,)):
att = att.strip()
if att == CMD_PRESS:
hdmi_network.send_command(
KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))
elif att == CMD_RELEASE:
hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))
else:
att = 1 if att == "" else int(att)
for _ in range(0, att):
hdmi_network.send_command(
KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))
hdmi_network.send_command(
KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))
def _tx(call):
"""Send CEC command."""
data = call.data
if ATTR_RAW in data:
command = CecCommand(data[ATTR_RAW])
else:
if ATTR_SRC in data:
src = data[ATTR_SRC]
else:
src = ADDR_UNREGISTERED
if ATTR_DST in data:
dst = data[ATTR_DST]
else:
dst = ADDR_BROADCAST
if ATTR_CMD in data:
cmd = data[ATTR_CMD]
else:
_LOGGER.error("Attribute 'cmd' is missing")
return False
if ATTR_ATT in data:
if isinstance(data[ATTR_ATT], (list,)):
att = data[ATTR_ATT]
else:
att = reduce(lambda x, y: "%s:%x" % (x, y), data[ATTR_ATT])
else:
att = ""
command = CecCommand(cmd, dst, src, att)
hdmi_network.send_command(command)
def _standby(call):
hdmi_network.standby()
def _power_on(call):
hdmi_network.power_on()
def _select_device(call):
"""Select the active device."""
from pycec.network import PhysicalAddress
addr = call.data[ATTR_DEVICE]
if not addr:
_LOGGER.error("Device not found: %s", call.data[ATTR_DEVICE])
return
if addr in device_aliases:
addr = device_aliases[addr]
else:
entity = hass.states.get(addr)
_LOGGER.debug("Selecting entity %s", entity)
if entity is not None:
addr = entity.attributes['physical_address']
_LOGGER.debug("Address acquired: %s", addr)
if addr is None:
_LOGGER.error("Device %s has not physical address",
call.data[ATTR_DEVICE])
return
if not isinstance(addr, (PhysicalAddress,)):
addr = PhysicalAddress(addr)
hdmi_network.active_source(addr)
_LOGGER.info("Selected %s (%s)", call.data[ATTR_DEVICE], addr)
def _update(call):
"""
Update if device update is needed.
Called by service, requests CEC network to update data.
"""
hdmi_network.scan()
def _new_device(device):
"""Handle new devices which are detected by HDMI network."""
key = '{}.{}'.format(DOMAIN, device.name)
hass.data[key] = device
ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)
discovery.load_platform(
hass, ent_platform, DOMAIN, discovered={ATTR_NEW: [key]},
hass_config=base_config)
def _shutdown(call):
hdmi_network.stop()
def _start_cec(event):
"""Register services and start HDMI network to watch for devices."""
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))[DOMAIN]
hass.services.register(DOMAIN, SERVICE_SEND_COMMAND, _tx,
descriptions[SERVICE_SEND_COMMAND],
SERVICE_SEND_COMMAND_SCHEMA)
hass.services.register(DOMAIN, SERVICE_VOLUME, _volume,
descriptions[SERVICE_VOLUME],
SERVICE_VOLUME_SCHEMA)
hass.services.register(DOMAIN, SERVICE_UPDATE_DEVICES, _update,
descriptions[SERVICE_UPDATE_DEVICES],
SERVICE_UPDATE_DEVICES_SCHEMA)
hass.services.register(DOMAIN, SERVICE_POWER_ON, _power_on)
hass.services.register(DOMAIN, SERVICE_STANDBY, _standby)
hass.services.register(DOMAIN, SERVICE_SELECT_DEVICE, _select_device)
hdmi_network.set_new_device_callback(_new_device)
hdmi_network.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_cec)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
return True
class CecDevice(Entity):
"""Representation of a HDMI CEC device entity."""
def __init__(self, hass: HomeAssistant, device, logical):
"""Initialize the device."""
self._device = device
self.hass = hass
self._icon = None
self._state = STATE_UNKNOWN
self._logical_address = logical
self.entity_id = "%s.%d" % (DOMAIN, self._logical_address)
device.set_update_callback(self._update)
def update(self):
"""Update device status."""
self._update()
def _update(self, device=None):
"""Update device status."""
if device:
from pycec.const import STATUS_PLAY, STATUS_STOP, STATUS_STILL, \
POWER_OFF, POWER_ON
if device.power_status == POWER_OFF:
self._state = STATE_OFF
elif device.status == STATUS_PLAY:
self._state = STATE_PLAYING
elif device.status == STATUS_STOP:
self._state = STATE_IDLE
elif device.status == STATUS_STILL:
self._state = STATE_PAUSED
elif device.power_status == POWER_ON:
self._state = STATE_ON
else:
_LOGGER.warning("Unknown state: %d", device.power_status)
self.schedule_update_ha_state()
@property
def name(self):
"""Return the name of the device."""
return (
"%s %s" % (self.vendor_name, self._device.osd_name)
if (self._device.osd_name is not None and
self.vendor_name is not None and self.vendor_name != 'Unknown')
else "%s %d" % (self._device.type_name, self._logical_address)
if self._device.osd_name is None
else "%s %d (%s)" % (self._device.type_name, self._logical_address,
self._device.osd_name))
@property
def vendor_id(self):
"""Return the ID of the device's vendor."""
return self._device.vendor_id
@property
def vendor_name(self):
"""Return the name of the device's vendor."""
return self._device.vendor
@property
def physical_address(self):
"""Return the physical address of device in HDMI network."""
return str(self._device.physical_address)
@property
def type(self):
"""Return a string representation of the device's type."""
return self._device.type_name
@property
def type_id(self):
"""Return the type ID of device."""
return self._device.type
@property
def icon(self):
"""Return the icon for device by its type."""
return (self._icon if self._icon is not None else
ICONS_BY_TYPE.get(self._device.type)
if self._device.type in ICONS_BY_TYPE else ICON_UNKNOWN)
@property
def device_state_attributes(self):
"""Return the state attributes."""
state_attr = {}
if self.vendor_id is not None:
state_attr[ATTR_VENDOR_ID] = self.vendor_id
state_attr[ATTR_VENDOR_NAME] = self.vendor_name
if self.type_id is not None:
state_attr[ATTR_TYPE_ID] = self.type_id
state_attr[ATTR_TYPE] = self.type
if self.physical_address is not None:
state_attr[ATTR_PHYSICAL_ADDRESS] = self.physical_address
return state_attr
|
|
# Copyright 2014 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import time
from datetime import datetime
from enum import Enum
import numpy as np
import pandas as pd
import pytz
from dateutil import parser
from pandas import Timestamp
current_milli_time = lambda: int(round(time.time() * 1000))
def get_epoch():
return parser.parse("1970-01-01 00:00:00+00:00")
def date_to_int(value):
epoch = get_epoch()
date = parser.parse(value)
delta = date.replace(tzinfo=pytz.utc) - epoch
return int(delta.total_seconds() * 1000.0)
def pandas_timestamp_to_int(value):
epoch = get_epoch()
date = value.to_pydatetime()
delta = date.replace(tzinfo=pytz.utc) - epoch
return int(delta.total_seconds() * 1000.0)
def datetime_to_number(value):
if isinstance(value, Timestamp):
return pandas_timestamp_to_int(value)
else:
return date_to_int(value)
def unix_time(dt):
if isinstance(dt, Timestamp):
j_object = {
'type': 'Date',
'timestamp': pandas_timestamp_to_int(dt)
}
return j_object
else:
return date_to_int(dt)
def date_time_2_millis(dt):
return unix_time(dt)
class BaseObject:
def __init__(self, **kwargs):
self.type = self.__class__.__name__
def transform(self):
model = json.dumps(self, cls=ObjectEncoder)
return json.loads(model)
def transformBack(self, dict):
self.__dict__ = dict
class Color:
white = None
WHITE = None
lightGray = None
LIGHT_GRAY = None
gray = None
GRAY = None
darkGray = None
DARK_GRAY = None
black = None
BLACK = None
red = None
RED = None
pink = None
PINK = None
orange = None
ORANGE = None
yellow = None
YELLOW = None
green = None
GREEN = None
darkGreen = None
DARK_GREEN = None
magenta = None
MAGENTA = None
cyan = None
CYAN = None
blue = None
BLUE = None
def __init__(self, r, g, b, a=255):
self.R = r
self.B = b
self.G = g
self.A = a
self.value = ((a & 0xFF) << 24) | ((r & 0xFF) << 16) | (
(g & 0xFF) << 8) | (b & 0xFF)
if self.value < 0:
self.value = 0xFFFFFFFF + self.value + 1
def hex(self):
return '#%02x' % self.value
def shorthex(self):
return '#%06x' % (self.value & 0x00FFFFFF)
Color.white = Color(255, 255, 255)
Color.WHITE = Color.white
Color.lightGray = Color(192, 192, 192)
Color.LIGHT_GRAY = Color.lightGray
Color.gray = Color(128, 128, 128)
Color.GRAY = Color.gray
Color.darkGray = Color(64, 64, 64)
Color.DARK_GRAY = Color.darkGray
Color.black = Color(0, 0, 0)
Color.BLACK = Color.black
Color.red = Color(255, 0, 0)
Color.RED = Color.red
Color.pink = Color(255, 175, 175)
Color.PINK = Color.pink
Color.orange = Color(255, 200, 0)
Color.ORANGE = Color.orange
Color.yellow = Color(255, 255, 0)
Color.YELLOW = Color.yellow
Color.green = Color(0, 255, 0)
Color.GREEN = Color.green
Color.darkGreen = Color(0, 100, 0)
Color.DARK_GREEN = Color.darkGreen
Color.magenta = Color(255, 0, 255)
Color.MAGENTA = Color.magenta
Color.cyan = Color(0, 255, 255)
Color.CYAN = Color.cyan
Color.blue = Color(0, 0, 255)
Color.BLUE = Color.blue
def getValue(obj, value, defaultValue=None):
if value in obj:
return obj[value]
else:
return defaultValue
def getColor(color):
if isinstance(color, list):
values = []
for c in color:
values.append(getColor(c))
return values
elif isinstance(color, Color):
return color.hex()
else:
return color
def padYs(g, gMax):
currentSize = len(g.y)
maxSize = len(gMax.y)
diff = maxSize - currentSize
if (diff > 0):
lastY = g.y[currentSize - 1]
g.y = g.y + [lastY] * diff
g.x = g.x + gMax.x[currentSize:]
class ObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return self.default(date_time_2_millis(obj))
elif isinstance(obj, Enum):
return self.default(obj.value)
elif isinstance(obj, Color):
return self.default(obj.hex())
elif isinstance(obj, pd.Series):
return self.default(obj.tolist())
elif isinstance(obj, np.ndarray):
return self.default(obj.tolist())
elif isinstance(obj, (np.int64, np.bool_)):
return self.default(obj.item())
elif hasattr(obj, "__dict__"):
d = dict(
(key, value)
for key, value in inspect.getmembers(obj)
if value is not None
and not key == "Position"
and not key == "colorProvider"
and not key == "toolTipBuilder"
and not key == "parent"
and not key.startswith("__")
and not inspect.isabstract(value)
and not inspect.isbuiltin(value)
and not inspect.isfunction(value)
and not inspect.isgenerator(value)
and not inspect.isgeneratorfunction(value)
and not inspect.ismethod(value)
and not inspect.ismethoddescriptor(value)
and not inspect.isroutine(value)
)
return self.default(d)
return obj
class ColorUtils:
@staticmethod
def interpolateColor(color1, color2, fraction):
fraction = min(fraction, 1.0)
fraction = max(fraction, 0.0)
red1 = color1.R
green1 = color1.G
blue1 = color1.B
alpha1 = color1.A
red2 = color2.R
green2 = color2.G
blue2 = color2.B
alpha2 = color2.A
delta_red = red2 - red1
delta_green = green2 - green1
delta_blue = blue2 - blue1
delta_alpha = alpha2 - alpha1
red = red1 + (delta_red * fraction)
green = green1 + (delta_green * fraction)
blue = blue1 + (delta_blue * fraction)
alpha = alpha1 + (delta_alpha * fraction)
red = min(red, 255.0)
red = max(red, 0.0)
green = min(green, 255.0)
green = max(green, 0.0)
blue = min(blue, 255.0)
blue = max(blue, 0.0)
alpha = min(alpha, 255.0)
alpha = max(alpha, 0.0)
return Color(round(red), round(green), round(blue), round(alpha))
class KeyboardCodes():
BACKSPACE = 'BACKSPACE'
TAB = 'TAB'
ENTER = 'ENTER'
SHIFT = 'SHIFT'
CTRL = 'CTRL'
ALT = 'ALT'
PAUSE_BREAK = 'PAUSE_BREAK'
CAPS_LOCK = 'CAPS_LOCK'
ESCAPE = 'ESCAPE'
SPACE = 'SPACE'
PAGE_UP = 'PAGE_UP'
PAGE_DOWN = 'PAGE_DOWN'
END = 'END'
HOME = 'HOME'
LEFT_ARROW = 'LEFT_ARROW'
UP_ARROW = 'UP_ARROW'
RIGHT_ARROW = 'RIGHT_ARROW'
DOWN_ARROW = 'DOWN_ARROW'
INSERT = 'INSERT'
DELETE = 'DELETE'
MULTIPLY = 'MULTIPLY'
ADD = 'ADD'
SUBTRACT = 'SUBTRACT'
DECIMAL_POINT = 'DECIMAL_POINT'
DIVIDE = 'DIVIDE'
F1 = 'F1'
F2 = 'F2'
F3 = 'F3'
F4 = 'F4'
F5 = 'F5'
F6 = 'F6'
F7 = 'F7'
F8 = 'F8'
F9 = 'F9'
F10 = 'F10'
F11 = 'F11'
F12 = 'F12'
NUM_LOCK = 'NUM_LOCK'
SCROLL_LOCK = 'SCROLL_LOCK'
EQUAL_SIGN = 'EQUAL_SIGN'
COMMA = 'COMMA'
DASH = 'DASH'
PERIOD = 'PERIOD'
FORWARD_SLASH = 'FORWARD_SLASH'
GRAVE_ACCENT = 'GRAVE_ACCENT'
OPEN_BRACKET = 'OPEN_BRACKET'
BACK_SLASH = 'BACK_SLASH'
CLOSE_BRAKET = 'CLOSE_BRAKET'
SINGLE_QUOTE = 'SINGLE_QUOTE'
|
|
## A script for finding every cox coefficient and pvalue for every mRNA in KIRC Tier 3 data downloaded Feb. 2015
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[8]],int(i[-16])]
if i[24]=='Alive':
clinical4.append([i[0],int(i[25]),'Alive'])
elif i[24]=='Dead':
clinical4.append([i[0],int(i[26]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
|
|
from __future__ import absolute_import
import functools
import itertools
from sentry.models import (
GroupSubscription, GroupSubscriptionReason, UserOption, UserOptionValue
)
from sentry.testutils import TestCase
class SubscribeTest(TestCase):
def test_simple(self):
group = self.create_group()
user = self.create_user()
GroupSubscription.objects.subscribe(group=group, user=user)
assert GroupSubscription.objects.filter(
group=group,
user=user,
).exists()
# should not error
GroupSubscription.objects.subscribe(group=group, user=user)
def test_bulk(self):
group = self.create_group()
user_ids = []
for i in range(20):
user = self.create_user()
user_ids.append(user.id)
GroupSubscription.objects.bulk_subscribe(group=group, user_ids=user_ids)
assert len(GroupSubscription.objects.filter(
group=group,
)) == 20
one_more = self.create_user()
user_ids.append(one_more.id)
# should not error
GroupSubscription.objects.bulk_subscribe(group=group, user_ids=user_ids)
assert len(GroupSubscription.objects.filter(
group=group,
)) == 21
def test_bulk_dupes(self):
group = self.create_group()
user_ids = []
user = self.create_user()
user_ids.append(user.id)
user_ids.append(user.id)
GroupSubscription.objects.bulk_subscribe(group=group, user_ids=user_ids)
assert len(GroupSubscription.objects.filter(
group=group,
)) == 1
def test_actor_user(self):
group = self.create_group()
user = self.create_user()
GroupSubscription.objects.subscribe_actor(group=group, actor=user)
assert GroupSubscription.objects.filter(
group=group,
user=user,
).exists()
# should not error
GroupSubscription.objects.subscribe_actor(group=group, actor=user)
def test_actor_team(self):
org = self.create_organization()
group = self.create_group(organization=org)
user = self.create_user()
team = self.create_team(organization=org)
self.create_member(
user=user,
email='bar@example.com',
organization=org,
role='owner',
teams=[team],
)
GroupSubscription.objects.subscribe_actor(group=group, actor=team)
assert GroupSubscription.objects.filter(
group=group,
user=user,
).exists()
# should not error
GroupSubscription.objects.subscribe_actor(group=group, actor=team)
class GetParticipantsTest(TestCase):
def test_simple(self):
org = self.create_organization()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
group = self.create_group(project=project)
user = self.create_user('foo@example.com')
user2 = self.create_user('bar@example.com')
self.create_member(user=user, organization=org, teams=[team])
self.create_member(user=user2, organization=org)
# implicit membership
users = GroupSubscription.objects.get_participants(group=group)
assert users == {
user: GroupSubscriptionReason.implicit,
}
# unsubscribed
GroupSubscription.objects.create(
user=user,
group=group,
project=project,
is_active=False,
)
users = GroupSubscription.objects.get_participants(group=group)
assert users == {}
# not participating by default
GroupSubscription.objects.filter(
user=user,
group=group,
).delete()
UserOption.objects.set_value(
user=user,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
users = GroupSubscription.objects.get_participants(group=group)
assert users == {}
# explicitly participating
GroupSubscription.objects.create(
user=user,
group=group,
project=project,
is_active=True,
reason=GroupSubscriptionReason.comment,
)
users = GroupSubscription.objects.get_participants(group=group)
assert users == {
user: GroupSubscriptionReason.comment,
}
def test_no_conversations(self):
org = self.create_organization()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
group = self.create_group(project=project)
user = self.create_user()
self.create_member(user=user, organization=org, teams=[team])
user_option_sequence = itertools.count(300) # prevent accidental overlap with user id
def clear_workflow_options():
UserOption.objects.filter(
user=user,
key='workflow:notifications',
).delete()
get_participants = functools.partial(
GroupSubscription.objects.get_participants,
group,
)
# Implicit subscription, ensure the project setting overrides the
# default global option.
with self.assertChanges(get_participants,
before={user: GroupSubscriptionReason.implicit},
after={}):
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=project,
key='workflow:notifications',
value=UserOptionValue.no_conversations,
)
clear_workflow_options()
# Implicit subscription, ensure the project setting overrides the
# explicit global option.
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=None,
key='workflow:notifications',
value=UserOptionValue.all_conversations,
)
with self.assertChanges(get_participants,
before={user: GroupSubscriptionReason.implicit},
after={}):
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=project,
key='workflow:notifications',
value=UserOptionValue.no_conversations,
)
clear_workflow_options()
# Explicit subscription, overridden by the global option.
GroupSubscription.objects.create(
user=user,
group=group,
project=project,
is_active=True,
reason=GroupSubscriptionReason.comment,
)
with self.assertChanges(get_participants,
before={user: GroupSubscriptionReason.comment},
after={}):
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=None,
key='workflow:notifications',
value=UserOptionValue.no_conversations,
)
clear_workflow_options()
# Explicit subscription, overridden by the project option.
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=None,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
with self.assertChanges(get_participants,
before={user: GroupSubscriptionReason.comment},
after={}):
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=project,
key='workflow:notifications',
value=UserOptionValue.no_conversations,
)
clear_workflow_options()
# Explicit subscription, overridden by the project option which also
# overrides the default option.
with self.assertChanges(get_participants,
before={user: GroupSubscriptionReason.comment},
after={}):
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=project,
key='workflow:notifications',
value=UserOptionValue.no_conversations,
)
def test_participating_only(self):
org = self.create_organization()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
group = self.create_group(project=project)
user = self.create_user()
self.create_member(user=user, organization=org, teams=[team])
user_option_sequence = itertools.count(300) # prevent accidental overlap with user id
def clear_workflow_options():
UserOption.objects.filter(
user=user,
key='workflow:notifications',
).delete()
get_participants = functools.partial(
GroupSubscription.objects.get_participants,
group,
)
# Implicit subscription, ensure the project setting overrides the
# default global option.
with self.assertChanges(get_participants,
before={user: GroupSubscriptionReason.implicit},
after={}):
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=project,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
clear_workflow_options()
# Implicit subscription, ensure the project setting overrides the
# explicit global option.
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=None,
key='workflow:notifications',
value=UserOptionValue.all_conversations,
)
with self.assertChanges(get_participants,
before={user: GroupSubscriptionReason.implicit},
after={}):
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=project,
key='workflow:notifications',
value=UserOptionValue.no_conversations,
)
clear_workflow_options()
# Ensure the global default is applied.
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=None,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
with self.assertChanges(get_participants,
before={},
after={user: GroupSubscriptionReason.comment}):
subscription = GroupSubscription.objects.create(
user=user,
group=group,
project=project,
is_active=True,
reason=GroupSubscriptionReason.comment,
)
subscription.delete()
clear_workflow_options()
# Ensure the project setting overrides the global default.
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=group.project,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
with self.assertChanges(get_participants,
before={},
after={user: GroupSubscriptionReason.comment}):
subscription = GroupSubscription.objects.create(
user=user,
group=group,
project=project,
is_active=True,
reason=GroupSubscriptionReason.comment,
)
subscription.delete()
clear_workflow_options()
# Ensure the project setting overrides the global setting.
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=None,
key='workflow:notifications',
value=UserOptionValue.all_conversations,
)
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=group.project,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
with self.assertChanges(get_participants,
before={},
after={user: GroupSubscriptionReason.comment}):
subscription = GroupSubscription.objects.create(
user=user,
group=group,
project=project,
is_active=True,
reason=GroupSubscriptionReason.comment,
)
subscription.delete()
clear_workflow_options()
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=None,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
UserOption.objects.create(
id=next(user_option_sequence),
user=user,
project=group.project,
key='workflow:notifications',
value=UserOptionValue.all_conversations,
)
with self.assertChanges(get_participants,
before={user: GroupSubscriptionReason.implicit},
after={user: GroupSubscriptionReason.comment}):
subscription = GroupSubscription.objects.create(
user=user,
group=group,
project=project,
is_active=True,
reason=GroupSubscriptionReason.comment,
)
def test_does_not_include_nonmember(self):
org = self.create_organization()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
group = self.create_group(project=project)
user = self.create_user('foo@example.com')
# implicit participation, included by default
users = GroupSubscription.objects.get_participants(group=group)
assert users == {}
GroupSubscription.objects.create(
user=user,
group=group,
project=project,
is_active=True,
reason=GroupSubscriptionReason.comment,
)
# explicit participation, included by default
users = GroupSubscription.objects.get_participants(group=group)
assert users == {}
UserOption.objects.set_value(
user=user,
project=project,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
# explicit participation, participating only
users = GroupSubscription.objects.get_participants(group=group)
assert users == {}
GroupSubscription.objects.filter(
user=user,
group=group,
).delete()
# implicit participation, participating only
users = GroupSubscription.objects.get_participants(group=group)
assert users == {}
UserOption.objects.set_value(
user=user,
project=project,
key='workflow:notifications',
value=UserOptionValue.all_conversations,
)
# explicit participation, explicit participating only
users = GroupSubscription.objects.get_participants(group=group)
assert users == {}
GroupSubscription.objects.filter(
user=user,
group=group,
).update(
reason=GroupSubscriptionReason.implicit,
)
# implicit participation, explicit participating only
users = GroupSubscription.objects.get_participants(group=group)
assert users == {}
|
|
'''
Kaveree Backup Program : Anjali Version
Command Line Input
Creator : M.Kumaran
Email : atv.kumar@gmail.com
ALL RIGHTS RESERVED 2013
'''
import os
import platform
import filecmp
import shutil
import hashlib
import logging , logging.handlers
#import sys
import datetime
import sqlite3
import zipfile
import argparse
__program__ = 'Anjali'
__version__ = 1.5
__fversions__ = 3
__logname__ = 'anjali.log'
def cacheFolder2(FolderPath,files = [],folders = []):
''' A recursive function to get directory content into arrays
Usage : x,y = cacheFolder2(directory/path)
Returns 2 arrays 1 files array and 1 folders array '''
try:
for eachitem in os.listdir(FolderPath):
filePath = os.path.join(FolderPath,eachitem)
if os.path.isdir(filePath) and not eachitem.startswith('.'):
folders.append(filePath)
cacheFolder2(filePath,files,folders)
elif os.path.isfile(filePath) and not eachitem.startswith('.'):
files.append(filePath)
elif os.path.islink(filePath):
pass
return files, folders
except OSError:
logging.getLogger('anjali').warning('Error accessing :%s',str(eachitem))
logging.getLogger('anjali').exception('Error:')
def checkOldBackup_hash(srcPath,filePath):
'''Function to check old backup files using hash
Usage : checkOldBackup_hash(sourcePath,backupPath)
Returns True if hash is matched False if hash is not matched'''
if os.path.isabs(filePath) and not os.path.basename(filePath).startswith('.') :
for index in xrange(__fversions__):
old_backup = '%s.%2.2d' % (filePath, index)
filename = os.path.basename(old_backup)
old_backup_dir = old_backup.replace(filename,'')
try:
if __fileExists(old_backup_dir,old_backup) and compareHash(srcPath, old_backup):
print 'BACKUP FOUND\n' + srcPath + '\n' + old_backup + '\n'
logging.getLogger('anjali').info('Backup Found %s , %s',str(srcPath),str(old_backup))
return True
except OSError:
logging.getLogger('anjali').warning('Error on :%s , %s',str(srcPath),str(old_backup))
logging.getLogger('anjali').exception('Error:')
return False
def checkOldBackup(srcPath,filePath):
'''Function to check old backup files
Usage : checkOldBackup(sourcePath,backupPath)
Returns True if file is found False if file is not found'''
if os.path.isabs(filePath) and not os.path.basename(filePath).startswith('.') :
for index in xrange(__fversions__):
old_backup = '%s.%2.2d' % (filePath, index)
filename = os.path.basename(old_backup)
old_backup_dir = old_backup.replace(filename,'')
try:
if __fileExists(old_backup_dir,old_backup) and filecmp.cmp(srcPath, old_backup, shallow=False):
print 'BACKUP FOUND\n' + srcPath + '\n' + old_backup + '\n'
logging.getLogger('anjali').info('Backup Found %s , %s',str(srcPath),str(old_backup))
return True
except OSError:
logging.getLogger('anjali').warning('Error on :%s , %s',str(srcPath),str(old_backup))
logging.getLogger('anjali').exception('Error:')
return False
def getBackupFileName_local(sourceFilePath,backupFilePath):
'''Function to get backup file's name from local storage
Usage : getBackupFileName_local(sourcePath,backupPath)
Returns Backup File's Name'''
if os.path.isabs(backupFilePath) and not os.path.basename(backupFilePath).startswith('.') :
for index in xrange(__fversions__):
old_backup = '%s.%2.2d' % (backupFilePath, index)
filename = os.path.basename(old_backup)
old_backup_dir = old_backup.replace(filename,'')
try:
if __fileExists(old_backup_dir,old_backup) and filecmp.cmp(sourceFilePath, old_backup, shallow=False):
#print 'BACKUP FOUND\n' + sourceFilePath + '\n' + old_backup + '\n'
logging.getLogger('anjali').info('Backup Found %s , %s',str(sourceFilePath),str(old_backup))
return old_backup
except OSError:
logging.getLogger('anjali').warning('Error on :%s , %s',str(sourceFilePath),str(old_backup))
logging.getLogger('anjali').exception('Error:')
return False #old_backup
def __calculateMD5(filepath):
'''Function to calculate binary hash value of a given file
Usage : x = calculateMD5(test/file.txt)
Returns : MD5 hexdigest of the given file'''
try:
md5 = hashlib.md5()
with open(filepath,'rb') as f:
for chunk in iter(lambda: f.read(16*1024*md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
except:
logging.getLogger('anjali').warning('Error in Calculating MD5 Hash')
logging.getLogger('anjali').exception('Error:')
def compareHash(src,dest):
'''Function to compare 2 files and their corresponding hash values
Usage : compareHash(source/file1.txt,backup/file1.txt.00)
Returns True if match is found False if match is not found'''
try:
srcHash = __calculateMD5(src)
destHash = __calculateMD5(dest)
#print src , srcHash
#print dest , destHash
if srcHash == destHash:
logging.getLogger('anjali').info('%s :%s , %s :%s', src, str(srcHash), dest, str(destHash))
return True
return False
except:
logging.getLogger('anjali').warning('Error in Comparing MD5 Hash')
logging.getLogger('anjali').exception('Error:')
def __fileExists(filePath,fileName):
'''Alternative function to the os.path.isfile function
function created due to a bug is isfile function
Usage : fileExists(rootFolder,file)
Returns True if file exists False if does not'''
try:
for dir, subdirs, files in os.walk(filePath):
if not os.path.exists(fileName):
return False
if os.path.exists(fileName):
return True
except:
logging.getLogger('anjali').warning('Error in Checking File Exists')
logging.getLogger('anjali').exception('Error:')
def checkBackupFiles(List):
'''Main function to check backup file versions of a given list of source files
Usage : x = checkBackupFiles(Files)
Returns a new 2 dimensional list of files that has NO backup files
[sourceFile,backupFile]'''
try:
newList = []
for index in xrange(len(List)):
if not checkOldBackup(List[index][0],List[index][1]):
x = [List[index][0],List[index][1]]
newList.append(x)
logging.getLogger('anjali').info('Source :%s Destination :%s',str(x[0]),str(x[1]))
if len(newList) > 0:
return newList
except:
logging.getLogger('anjali').warning('Error in Checking Backup Files')
logging.getLogger('anjali').exception('Error:')
def checkBackupFiles_hash(List):
'''Main function to check backup file versions of a given list of source files using Hash compare
Usage : x = checkBackupFiles_hash(Files)
Returns a new 2 dimensional list of files that has NO backup files
[sourceFile,backupFile]'''
try:
newList = []
for index in xrange(len(List)):
if not checkOldBackup_hash(List[index][0],List[index][1]):
x = [List[index][0],List[index][1]]
newList.append(x)
logging.getLogger('anjali').info('Source :%s Destination :%s',str(x[0]),str(x[1]))
if len(newList) > 0:
return newList
except:
logging.getLogger('anjali').warning('Error in Checking Backup Files Hash')
logging.getLogger('anjali').exception('Error:')
def createBackupFolders(List):
'''Function to create Backup Folders defined in the source directory
Usage : createBackupFolders(Folders)
Returns None'''
for folder in List:
try:
os.makedirs(folder)
print 'Creating Folder:'+folder
logging.getLogger('anjali').info('Creating Folder :%s',str(folder))
except OSError:
print 'Skipping Folder:'+folder
logging.getLogger('anjali').warning('Skipping Folder :%s',str(folder))
def createBackupFiles2(List):
'''Function to create Backup files defined in the list
Usage : createBackupFiles(Files) // Need to supply 2 dimension array
Returns List with backupFile'''
try:
newList = []
for item in List:
for x in xrange(__fversions__):
backup = '%s.%2.2d' % (item[1], x)
if not checkOldBackup(item[0],backup):
shutil.copy2(item[0],backup)
newList.append([item[0],backup])
print '\n Creating backup file :'+os.path.basename(backup)
logging.getLogger('anjali').info('Creating Backup File :%s',str(backup))
break
return newList
except:
logging.getLogger('anjali').warning('Error in Creating Backup Files')
logging.getLogger('anjali').exception('Error:')
def addBackupPath(rootDir,List, backupPath = '.backup'):
'''Function to add the backup path to the file path or folder path list given
Usage : Files = addBackupPath(sourceRootPath,Files,backupRootPath)
Returns : List of Files or List of Folders depending on List given'''
newList = []
for f in set(List):
FolderName = os.path.basename(os.path.dirname(f))
FileName = os.path.basename(f)
if os.path.isabs(backupPath):
if os.path.isfile(f):
filePath = f.replace(os.path.dirname(rootDir),'')
if platform.system() == 'Darwin' : newBackupPath = os.path.join(backupPath,filePath[1::])
if platform.system() == 'Windows': newBackupPath = os.path.join(backupPath,filePath).replace("/","\\")
z = [f,newBackupPath]
elif os.path.isdir(f) and rootDir != backupPath:
newBackupPath = backupPath + f.replace(os.path.dirname(rootDir),'')
z = newBackupPath
elif os.path.isdir(f) and rootDir == backupPath:
newBackupPath = os.path.join(f,'.backup')
z = newBackupPath
else:
FolderName = os.path.dirname(f)
if os.path.isfile(f):
newBackupPath = os.path.join(FolderName,backupPath,FileName)
z = [f,newBackupPath]
elif os.path.isdir(f) and os.path.isabs(backupPath):
newBackupPath = os.path.join(FolderName,backupPath)
z = newBackupPath
elif os.path.isdir(f) and not os.path.isabs(backupPath):
newBackupPath = os.path.join(f,backupPath)
z = newBackupPath
try:
newList.append(z)
logging.getLogger('anjali').info('Source :%s Destination :%s', str(z[0]),str(z[1]))
except any:
raise 'List given contains either links or is wrong'
logging.getLogger('anjali').critical('Incorrect File or Link :%s',str(f))
logging.getLogger('anjali').exception('Error:')
if newList != []:
return newList
else:
return
def backup_hash(var,files,folders):
'''Main backup Function Hash Version
Usage : backup_hash('.backup',files,folders)
Returns None'''
try:
databaseConnection = startupCheckDB()
databaseCursor = databaseConnection.cursor()
foldersToCreate = addBackupPath(folders[0],folders,var)
filesToCreate = checkBackupFiles_hash(addBackupPath(folders[0],files,var))
if filesToCreate:
createBackupFolders(foldersToCreate)
x = createBackupFiles2(filesToCreate)
updateDatabase(x,databaseCursor)
databaseConnection.close()
except:
logging.getLogger('anjali').warning('Error in Main Backup Hash Loop')
logging.getLogger('anjali').exception('Error:')
def backup(var,files,folders):
'''Main backup Function
Usage : backup('.backup',files,folders)
Returns None'''
try:
databaseConnection = startupCheckDB()
databaseCursor = databaseConnection.cursor()
foldersToCreate = addBackupPath(folders[0],folders,var)
filesToCreate = checkBackupFiles(addBackupPath(folders[0],files,var))
if filesToCreate:
createBackupFolders(foldersToCreate)
x = createBackupFiles2(filesToCreate)
updateDatabase(x,databaseCursor)
databaseConnection.close()
except:
logging.getLogger('anjali').warning('Error in Main Backup Loop')
logging.getLogger('anjali').exception('Error:')
def setupLogger(logname=__logname__,size=20,days=7):
'''Function to setup the logger
Usage : setupLogger(myLog.log,50,15)
Returns None - Creates a blank log'''
logging_format = logging.Formatter('%(lineno)d - %(funcName)s - %(asctime)s - %(levelname)s - %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p')
handler = logging.handlers.RotatingFileHandler(__logname__, maxBytes=size*1024*1024, backupCount=days)
#handler = logging.handlers.TimedRotatingFileHandler(__logname__,when='S',interval=1,backupCount=days,encoding=None, delay=False,utc=False)
handler.setFormatter(logging_format)
#logging.basicConfig(filename=__logname__,format='%(funcName)s - %(lineno)d - %(asctime)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
logger = logging.getLogger('anjali')
logger.addHandler(handler)
logger.setLevel(logging.INFO)
#for x in xrange(200000):
# logger.info('is when this event was logged.')
def calculateSize(bytes,blocks):
'''Function to calculate size given bytes and blocks i.e Folder or Directory
Usage :calculateSize(total_bytes,total_blocks)
Windows: calculateSize(total_bytes,0)
Returns Size String in KB/MB/GB/TB'''
try:
if platform.system() == 'Darwin':
if bytes < 1024 * 1024: #KB
return str(int(round(blocks * 512e-3,1))) + ' KB'
elif bytes < 1024 * 1024 * 1024: #MB
return str(int(round(blocks * 512e-6,2))) + ' MB'
elif bytes < 1024 * 1024 * 1024 * 1024: #GB
return str(round(blocks * 512e-6 / 1000,2)) + ' GB'
elif bytes < 1024 * 1024 * 1024 * 1024 * 1024 : #TB
return str(round(blocks * 512e-6 / 1000 / 1000,2)) + ' TB'
elif platform.system() == 'Windows':
if bytes < 1024 * 1024: #KB
return str(int(round(bytes / 1024.0,1))) + ' KB'
elif bytes < 1024 * 1024 * 1024: #MB
return str(int(round(bytes / 1024 / 1024.0,2))) + ' MB'
elif bytes < 1024 * 1024 * 1024 * 1024: #GB
return str(round(bytes / 1024 / 1024 / 1024.0,2)) + ' GB'
elif bytes < 1024 * 1024 * 1024 * 1024 * 1024 : #TB
return str(round(bytes / 1024 / 1024 / 1024 / 1024.0,2)) + ' TB'
except:
logging.getLogger('anjali').warning('Error in calculate size')
logging.getLogger('anjali').exception('Error:')
def calculateSize2(filepath):
'''Function to calculate size of a file
Usage :calculateSize('filename.txt')
Returns Size String in KB/MB/GB/TB'''
try:
if platform.system() == 'Darwin' : file_blocks = os.stat(filepath).st_blocks
if platform.system() == 'Windows': file_blocks = 0
file_bytes = os.stat(filepath).st_size
if platform.system() == 'Darwin':
if file_bytes < 1024 * 1024: #KB
return str(round(file_blocks * 512e-3,1)) + ' KB'
elif file_bytes < 1024 * 1024 * 1024: #MB
return str(round(file_blocks * 512e-6,1)) + ' MB'
elif file_bytes < 1024 * 1024 * 1024 * 1024: #GB
return str(round(file_blocks * 512e-6 / 1000,2)) + ' GB'
elif file_bytes < 1024 * 1024 * 1024 * 1024 * 1024 : #TB
return str(round(file_blocks * 512e-6 / 1000 / 1000,2)) + ' TB'
elif platform.system() == 'Windows':
if file_bytes < 1024 * 1024: #KB
return str(round(file_bytes / 1024.0,1)) + ' KB'
elif file_bytes < 1024 * 1024 * 1024: #MB
return str(round(file_bytes / 1024 / 1024.0,1)) + ' MB'
elif file_bytes < 1024 * 1024 * 1024 * 1024: #GB
return str(round(file_bytes / 1024 / 1024 / 1024.0,2)) + ' GB'
elif file_bytes < 1024 * 1024 * 1024 * 1024 * 1024 : #TB
return str(round(file_bytes / 1024 / 1024 / 1024 / 1024.0,2)) + ' TB'
except:
logging.getLogger('anjali').warning('Error in calculate size 2')
logging.getLogger('anjali').exception('Error:')
def printTree(directory,padding,print_files=False,dump=False,fobject=None):
'''Function to print tree structure in console or dump to file
Usage : printTree('Path/To/Folder',' ',True,True,file)
printTree('Path/To/Folder',' ',True)
Returns None'''
try:
item = [directory]
if dump:
x = padding[:1] + '+-' + os.path.basename(os.path.abspath(directory)) + '/' + '\n'
fobject.write(str(x))
print padding[:1] + '+-' + os.path.basename(os.path.abspath(directory)) + '/'
padding = padding + ' '
treefiles = []
if print_files:
treefiles = os.listdir(directory)
else:
treefiles = [x for x in os.listdir(directory) if os.path.isdir(directory+os.sep+x)]
count = 0
for afile in treefiles:
count += 1
if dump:
x = padding + '|' + '\n'
fobject.write(str(x))
print padding + '|'
path = directory + os.sep + afile
if os.path.isdir(path):
if count == len(treefiles):
printTree(path,padding + ' ',print_files,dump,fobject) #Changed to False(dump)
else:
printTree(path,padding + '|',print_files,dump,fobject) #Changed to False(dump)
else:
item.append(str(afile))
if dump:
x = padding + '+-' + afile + '\n'
object.write(str(x))
print padding + '+-' + afile
except:
logging.getLogger('anjali').warning('Error in Print Tree')
logging.getLogger('anjali').exception('Error:')
def printFiles(folderPath,dump = False,dumpFile = None):
'''Function to print out a list of files in a given folder
with total files and total size
Usage :printFiles('/Path/To/Folder',True,'test.txt')
printFiles('/Path/To/Folder')
Returns None'''
try:
filesList , FolderList = cacheFolder2(folderPath)
if dump:
f = open(dumpFile,'w')
totalBytes = 0
totalBlocks = 0
for eachfile in filesList:
if dump:
f.write(os.path.normcase(eachfile) + '\n')
print os.path.normcase(eachfile)
if platform.system() == 'Darwin' : totalBlocks += os.stat(eachfile).st_blocks
totalBytes += os.stat(eachfile).st_size
if dump:
f.write('Total files :'+str(len(filesList))+'\n')
f.write('Total size :'+str(calculateSize(totalBytes,totalBlocks))+'\n')
f.close()
print 'Total files :', len(filesList)
print 'Total size :', calculateSize(totalBytes,totalBlocks)
except:
logging.getLogger('anjali').warning('Error in Print Files')
logging.getLogger('anjali').exception('Error:')
def printFullList(sourceDir,dump = False, dumpFile = None):
'''Function to print out a full list of files in a given folder
with total files and total size
Usage :fullList('/Path/To/Folder',True,'test.txt')
fullList('/Path/To/Folder')
Returns None'''
try:
totalBytes = 0
totalBlocks = 0
filesList , FolderList = cacheFolder2(sourceDir)
if dump:
f = open(dumpFile,'w')
f.write('-------------------------------------------------------------------------------------\n')
f.write('File Name | File Size | Modified Date\n')
f.write('-------------------------------------------------------------------------------------\n')
print '-------------------------------------------------------------------------------------'
print 'File Name | File Size | Modified Date'
print '-------------------------------------------------------------------------------------'
largestFileLen = max([len(os.path.basename(eachfile)) for eachfile in filesList ])
for eachfile in filesList:
if platform.system() == 'Darwin' :fileSize = calculateSize(os.stat(eachfile).st_size,os.stat(eachfile).st_blocks)
if platform.system() == 'Windows':fileSize = calculateSize(os.stat(eachfile).st_size,0)
fileModifiedDate = getModifiedDate(eachfile)
if platform.system() == 'Darwin' : totalBlocks += os.stat(eachfile).st_blocks
totalBytes += os.stat(eachfile).st_size
conFmtStr = '{0:'+str(largestFileLen)+'s} {1:8s} {2:17s}'
x = conFmtStr.format(os.path.basename(eachfile), fileSize,fileModifiedDate)
print conFmtStr.format(os.path.basename(eachfile), fileSize,fileModifiedDate)
if dump: f.write(str(x + '\n'))
print '-------------------------------------------------------------------------------------'
print 'Total files :', len(filesList)
print 'Total size :', calculateSize(totalBytes,totalBlocks)
print '-------------------------------------------------------------------------------------'
if dump:
f.write('-------------------------------------------------------------------------------------\n')
f.write(str('Total files :' + str(len(filesList)) + '\n'))
f.write(str('Total size :' + str(calculateSize(totalBytes,totalBlocks)))+ '\n')
f.write('-------------------------------------------------------------------------------------\n')
f.close()
except:
logging.getLogger('anjali').warning('Error in Print Full List')
logging.getLogger('anjali').exception('Error:')
def zipBackup(files_List, basedir, archivename):
try:
print 'Starting to Zip...'
databaseConnection = startupCheckDB()
databaseCursor = databaseConnection.cursor()
newList = []
z = zipfile.ZipFile(archivename, "w", zipfile.ZIP_DEFLATED)
for afile in files_List:
newList.append([afile,archivename])
zfn = afile[len(basedir)+len(os.sep):] #XXX: relative path
z.write(afile, zfn)
logging.getLogger('anjali').info('file %s added to zip %s' % (afile,archivename))
print 'Zipping %s' % zfn
z.close()
updateDatabase(newList,databaseCursor)
databaseConnection.close()
print 'Zip Done...'
except:
logging.getLogger('anjali').warning('Error in Zip Backup')
logging.getLogger('anjali').exception('Error:')
def getModifiedDate(filepath):
''' Function to get the Modified Date of the file
Usage : getModifiedDate('filename.txt')
Returns : String formatted from the date tuple into dd/mm/yyyy HH:MM:SS AM/PM'''
try:
return datetime.datetime.fromtimestamp(os.stat(filepath).st_mtime).strftime("%d/%m/%Y %H:%M:%S %p")
except:
logging.getLogger('anjali').warning('Error in Get Modified Date')
logging.getLogger('anjali').exception('Error:')
def string2DateTime(dateString):
''' Function to get the Date tuple from String
Usage : string2DateTime(file's datestring)
Returns : DateTime tuple from string dd/mm/yyyy HH:MM:SS AM/PM'''
try:
return datetime.datetime.strptime(dateString,"%d/%m/%Y %H:%M:%S %p")
except:
logging.getLogger('anjali').warning('Error in String 2 DateTime')
logging.getLogger('anjali').exception('Error:')
def datetime2String(date):
''' Function to get Date String from tuple
Usage : datetime2String(datetime tuple)
Returns : String formatted from the date tuple into dd/mm/yyyy HH:MM:SS AM/PM'''
try:
return date.strftime("%d/%m/%Y %H:%M:%S %p")
except:
logging.getLogger('anjali').warning('Error in DateTime 2 String')
logging.getLogger('anjali').exception('Error:')
def createTable(databaseCursor,tableName):
''' Function to create Table using the given database cursor
Usage : createTable(databaseCursor,'Table Name')
Returns : None'''
try:
databaseCursor.execute('''CREATE TABLE "main"."%s" (
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
"filename" text NOT NULL,
"source_path" text NOT NULL,
"size" text NOT NULL,
"modified_date" text NOT NULL,
"backup_path" text NULL);''' % tableName)
except:
logging.getLogger('anjali').warning('Error in Create Table')
logging.getLogger('anjali').exception('Error:')
def connectDatabase(filename):
''' Function to connect to a Database with given filename
Usage : connectDatabase('anjali.db')
Returns : Database Connection without isolation level'''
try:
if os.path.isfile(filename):
logging.getLogger('anjali').info('Database file found %s' % filename)
databaseConnection = sqlite3.connect(filename)
elif not os.path.isfile(filename):
logging.getLogger('anjali').critical('Database file not found %s' % filename)
databaseConnection = sqlite3.connect(filename)
databaseCursor = databaseConnection.cursor()
databaseConnection.isolation_level = None # Autocommit Mode
return databaseConnection
except:
logging.getLogger('anjali').warning('Error in Connect Database')
logging.getLogger('anjali').exception('Error:')
def startupCheckDB():
''' Function to connect and do startup Checks with a database
Usage : startupCheckDB()
Returns : Database Connection without isolation level'''
try:
databaseConnection = connectDatabase('anjali.db')
databaseCursor = databaseConnection.cursor()
todayDate = datetime.datetime.today().strftime("%d-%m-%Y")
databaseCursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
databaseTableNames = databaseCursor.fetchall()
if databaseTableNames == []:
createTable(databaseCursor,todayDate)
logging.getLogger('anjali').info('Empty Database, Created Table %s' % todayDate)
return databaseConnection
for databaseTable in databaseTableNames:
if todayDate in databaseTable:
logging.getLogger('anjali').info('Table %s found' % todayDate)
return databaseConnection
createTable(databaseCursor,todayDate)
logging.getLogger('anjali').info('Created Table %s' % todayDate)
return databaseConnection
except:
logging.getLogger('anjali').warning('Error in Startup Check Database')
logging.getLogger('anjali').exception('Error:')
def updateDatabase(fileList,databaseCursor):
''' Function to update Database with a given file List
Usage : updateDatabase(fileList,databaseCursor)
Returns : None'''
try:
todayDate = datetime.datetime.today().strftime("%d-%m-%Y")
for afile in fileList:
fileName = os.path.basename(afile[0])
sourcePath = afile[0]
filesize = calculateSize2(afile[0])
modifiedDate = getModifiedDate(afile[0])
backup_path = afile[1]
databaseCursor.execute("""insert into "%s" values(NULL,?,?,?,?,?)""" % todayDate,(fileName,sourcePath,filesize,modifiedDate,backup_path))
logging.getLogger('anjali').info('Updating into database %s' % fileName)
except:
logging.getLogger('anjali').warning('Error in Update Database')
logging.getLogger('anjali').exception('Error:')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=str(__program__), description=str(__program__) + ' ' + str(__version__), epilog='Copyright @2013 M.Kumaran atv.kumar@gmail.com')
parser.add_argument('-s','--source',action='store', dest='source', required=True, help='Your specified SOURCE directory path')
parser.add_argument('-d','--destination',action='store', dest='destination', required=False, help='Your specified DESTINATION directory path')
parser.add_argument('-z','--zip',action='store', dest='zipBackup', required=False, help='If you want to zip files at DESTINATION')
parser.add_argument('-f','--fulllist',action='store_true', dest='fullList', required=False, help='If you want to view FULL DETIALS in SOURCE')
parser.add_argument('-l','--listFiles',action='store_true', dest='listFiles', required=False, help='If you want to list files specified in the SOURCE')
parser.add_argument('-t','--listTree',action='store_true', dest='listTree', required=False, help='If you want to tree view the SOURCE directory')
parser.add_argument('-w','--dump',action='store', dest='dumpToFile', required=False, help='File you want to dump output to file')
results = parser.parse_args()
setupLogger()
logging.getLogger('anjali').info('Started Main Loop')
if results.source and results.destination:
files_List, folders_List = [],[]
folders_List.append(results.source)
files_List,folders_List = cacheFolder2(results.source,files_List,folders_List)
backup(results.destination,files_List,folders_List)
if results.source and results.destination == '.':
files_List, folders_List = [],[]
folders_List.append(results.source)
files_List,folders_List = cacheFolder2(results.source,files_List,folders_List)
backup(results.source,files_List,folders_List)
if results.source and len(str(results.destination)) > 1 and str(results.destination).startswith('.'):
files_List, folders_List = [],[]
folders_List.append(results.source)
files_List,folders_List = cacheFolder2(results.source,files_List,folders_List)
backup(results.destination,files_List,folders_List)
if results.source and results.zipBackup:
files_List, folders_List = [],[]
files_List,folders_List = cacheFolder2(results.source,files_List,folders_List)
zipBackup(files_List, results.source ,results.zipBackup)
if results.source and results.fullList:
printFullList(results.source)
if results.dumpToFile:
printFullList(results.source,results.dumpToFile)
if results.source and results.listFiles:
printFiles(results.source)
if results.dumpToFile:
printFiles(results.source,results.dumpToFile)
if results.source and results.listTree:
printTree(results.source,' ',True)
if results.dumpToFile:
f = open(results.dumpToFile,'w')
printTree(results.source,' ',True,True,f)
f.close()
|
|
# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_db.sqlalchemy.compat import utils as compat_utils
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import utils as oslodbutils
from sqlalchemy import Integer, String
from sqlalchemy import MetaData, Table, Column
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import sql
from sqlalchemy.types import UserDefinedType
from nova.db.sqlalchemy import api as db
from nova.db.sqlalchemy import utils
from nova import exception
SA_VERSION = compat_utils.SQLA_VERSION
class CustomType(UserDefinedType):
"""Dummy column type for testing unsupported types."""
def get_col_spec(self):
return "CustomType"
class TestMigrationUtilsSQLite(test_base.DbTestCase):
"""Class for testing utils that are used in db migrations."""
def setUp(self):
super(TestMigrationUtilsSQLite, self).setUp()
self.meta = MetaData(bind=self.engine)
def test_delete_from_select(self):
table_name = "__test_deletefromselect_table__"
uuidstrs = []
for unused in range(10):
uuidstrs.append(uuid.uuid4().hex)
conn = self.engine.connect()
test_table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True,
nullable=False, autoincrement=True),
Column('uuid', String(36), nullable=False))
test_table.create()
# Add 10 rows to table
for uuidstr in uuidstrs:
ins_stmt = test_table.insert().values(uuid=uuidstr)
conn.execute(ins_stmt)
# Delete 4 rows in one chunk
column = test_table.c.id
query_delete = sql.select([column],
test_table.c.id < 5).order_by(column)
delete_statement = utils.DeleteFromSelect(test_table,
query_delete, column)
result_delete = conn.execute(delete_statement)
# Verify we delete 4 rows
self.assertEqual(result_delete.rowcount, 4)
query_all = sql.select([test_table])\
.where(test_table.c.uuid.in_(uuidstrs))
rows = conn.execute(query_all).fetchall()
# Verify we still have 6 rows in table
self.assertEqual(len(rows), 6)
def test_check_shadow_table(self):
table_name = 'test_check_shadow_table'
table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer),
Column('c', String(256)))
table.create()
# check missing shadow table
self.assertRaises(NoSuchTableError,
utils.check_shadow_table, self.engine, table_name)
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, self.meta,
Column('id', Integer),
Column('a', Integer))
shadow_table.create()
# check missing column
self.assertRaises(exception.NovaException,
utils.check_shadow_table, self.engine, table_name)
# check when all is ok
c = Column('c', String(256))
shadow_table.create_column(c)
self.assertTrue(utils.check_shadow_table(self.engine, table_name))
# check extra column
d = Column('d', Integer)
shadow_table.create_column(d)
self.assertRaises(exception.NovaException,
utils.check_shadow_table, self.engine, table_name)
def test_check_shadow_table_different_types(self):
table_name = 'test_check_shadow_table_different_types'
table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer))
table.create()
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', String(256)))
shadow_table.create()
self.assertRaises(exception.NovaException,
utils.check_shadow_table, self.engine, table_name)
@test_base.backend_specific('sqlite')
def test_check_shadow_table_with_unsupported_sqlite_type(self):
table_name = 'test_check_shadow_table_with_unsupported_sqlite_type'
table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer),
Column('c', CustomType))
table.create()
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer),
Column('c', CustomType))
shadow_table.create()
self.assertTrue(utils.check_shadow_table(self.engine, table_name))
def test_create_shadow_table_by_table_instance(self):
table_name = 'test_create_shadow_table_by_table_instance'
table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer),
Column('b', String(256)))
table.create()
utils.create_shadow_table(self.engine, table=table)
self.assertTrue(utils.check_shadow_table(self.engine, table_name))
def test_create_shadow_table_by_name(self):
table_name = 'test_create_shadow_table_by_name'
table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer),
Column('b', String(256)))
table.create()
utils.create_shadow_table(self.engine, table_name=table_name)
self.assertTrue(utils.check_shadow_table(self.engine, table_name))
@test_base.backend_specific('sqlite')
def test_create_shadow_table_not_supported_type(self):
table_name = 'test_create_shadow_table_not_supported_type'
table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', CustomType))
table.create()
# reflection of custom types has been fixed upstream
if SA_VERSION < (0, 9, 0):
self.assertRaises(oslodbutils.ColumnError,
utils.create_shadow_table,
self.engine, table_name=table_name)
utils.create_shadow_table(self.engine,
table_name=table_name,
a=Column('a', CustomType()))
self.assertTrue(utils.check_shadow_table(self.engine, table_name))
def test_create_shadow_both_table_and_table_name_are_none(self):
self.assertRaises(exception.NovaException,
utils.create_shadow_table, self.engine)
def test_create_shadow_both_table_and_table_name_are_specified(self):
table_name = ('test_create_shadow_both_table_and_table_name_are_'
'specified')
table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer))
table.create()
self.assertRaises(exception.NovaException,
utils.create_shadow_table,
self.engine, table=table, table_name=table_name)
def test_create_duplicate_shadow_table(self):
table_name = 'test_create_duplicate_shadow_table'
table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer))
table.create()
utils.create_shadow_table(self.engine, table_name=table_name)
self.assertRaises(exception.ShadowTableExists,
utils.create_shadow_table,
self.engine, table_name=table_name)
class TestMigrationUtilsPostgreSQL(TestMigrationUtilsSQLite,
test_base.PostgreSQLOpportunisticTestCase):
pass
class TestMigrationUtilsMySQL(TestMigrationUtilsSQLite,
test_base.MySQLOpportunisticTestCase):
pass
|
|
class Cosmology(object):
"""
A class that nicely deals with cosmological parameters.
Most cosmological parameters are merely input and are made available as
attributes in the class. However, more complicated relations such as
the interrelation of omegab, omegac, omegam, omegav for example are dealt
with in a more robust manner.
The secondary purpose of this class is to provide simple mappings of the
parameters to common python cosmology libraries (for now just `cosmolopy`
and `pycamb`). It has been found by the authors that using more than one
library becomes confusing in terms of naming all the parameters, so this
class helps deal with that.
.. note :: Currently, only several combinations of the density parameters
are valid:
1. ``omegab`` and ``omegac``
#. ``omegam``
#. ``omegab_h2`` and ``omegac_h2``
#. None of them
To this one may add ``omegav`` (dark-energy density) at will. More
combinations will be added in the future.
Parameters
----------
default : str, {``None``, ``"planck1_base"``}
Defines a set of default parameters, based on a published set from WMAP
or Planck. These defaults are applied in a smart way, so as not to
override any user-set parameters.
Current options are
1. ``None``
#. ``"planck1_base"``: The cosmology of first-year PLANCK mission (with no lensing or WP)
force_flat : bool, default ``False``
If ``True``, enforces a flat cosmology :math:`(\Omega_m+\Omega_\lambda=1)`.
This will modify ``omegav`` only, never ``omegam``.
\*\*kwargs :
The list of available keyword arguments is as follows:
1. ``sigma_8``: The normalisation. Mass variance in top-hat spheres with :math:`R=8Mpc h^{-1}`
#. ``n``: The spectral index
#. ``w``: The dark-energy equation of state
#. ``cs2_lam``: The constant comoving sound speed of dark energy
#. ``t_cmb``: Temperature of the CMB
#. ``y_he``: Helium fraction
#. ``N_nu``: Number of massless neutrino species
#. ``N_nu_massive``:Number of massive neutrino species
#. ``z_reion``: Redshift of reionization
#. ``tau``: Optical depth at reionization
#. ``delta_c``: The critical overdensity for collapse
#. ``h``: The hubble parameter
#. ``H0``: The hubble constant
#. ``omegan``: The normalised density of neutrinos
#. ``omegam``: The normalised density of matter
#. ``omegav``: The normalised density of dark energy
#. ``omegab``: The normalised baryon density
#. ``omegac``: The normalised CDM density
#. ``omegab_h2``: The normalised baryon density by ``h**2``
#. ``omegac_h2``: The normalised CDM density by ``h**2``
.. note :: The reason these are implemented as `kwargs` rather than the
usual arguments, is because the code can't tell *a priori* which
combination of density parameters the user will input.
"""
# A dictionary of bounds for each parameter
# This also forms a list of all parameters possible
# Note that just because a parameter is within these bounds doesn't mean
# it will actually work in say CAMB.
_bounds = {"sigma_8":[0.1, 10],
"n":[-3, 4],
"w":[-1.5, 0],
"cs2_lam":[-1, 2],
"t_cmb":[0, 10.0],
"y_he":[0, 1],
"N_nu":[1, 10],
"N_nu_massive":[0, 3],
"z_reion":[2, 1000],
"tau":[0, 1],
"omegan":[0, 1],
"h":[0.05, 5],
"H0":[5, 500],
"omegab":[0.0001, 1],
"omegac":[0, 2],
"omegav":[0, 2],
"omegam":[0.0001, 3],
"omegab_h2":[0.0001, 1],
"omegac_h2":[0, 2]}
def __init__(self, default=None, force_flat=False, **kwargs):
# Map the 'default' cosmology to its dictionary
self.default = default
if default == "planck1_base":
self.__base = dict(planck1_base, **extras)
# Set some simple parameters
self.force_flat = force_flat
self.crit_dens = 27.755e10
#=======================================================================
# Check values in kwargs
#=======================================================================
for k in kwargs:
if k not in Cosmology._bounds:
raise ValueError(k + " is not a valid parameter for Cosmology")
#=======================================================================
# First set the "easy" values (no dependence on anything else
#=======================================================================
easy_params = ["sigma_8", "n", 'w', 'cs2_lam', 't_cmb', 'y_he', "N_nu",
"z_reion", "tau", "omegan", 'delta_c', "N_nu_massive"]
for p in easy_params:
if p in kwargs:
self.__dict__.update({p:kwargs.pop(p)})
elif default is not None:
self.__dict__.update({p:self.__base[p]})
#=======================================================================
# Now the hard parameters (multi-dependent)
#=======================================================================
################### h/H0 ###############################################
if "h" in kwargs and "H0" in kwargs:
if kwargs['h'] != kwargs["H0"] / 100.0:
print "h and H0 specified inconsistently, using h"
if "H0" in kwargs:
self.H0 = kwargs.pop("H0")
self.h = self.H0 / 100.0
if "h" in kwargs:
self.h = kwargs.pop("h")
self.H0 = 100 * self.h
if not hasattr(self, "h") and default is not None:
self.H0 = self.__base["H0"]
self.h = self.H0 / 100.0
################### The omegas #########################################
if "omegav" in kwargs:
self.omegav = kwargs.pop("omegav")
if len(kwargs) == 0:
if self.force_flat and hasattr(self, "omegav"):
self.omegam = 1 - self.omegav
self.omegak = 0.0
elif default is not None:
self.omegab_h2 = self.__base["omegab_h2"]
self.omegac_h2 = self.__base["omegac_h2"]
self.omegab = self.omegab_h2 / self.h ** 2
self.omegac = self.omegac_h2 / self.h ** 2
self.omegam = self.omegab + self.omegac
elif "omegab" in kwargs and "omegac" in kwargs and len(kwargs) == 2:
self.omegab = kwargs["omegab"]
self.omegac = kwargs["omegac"]
self.omegam = self.omegab + self.omegac
if hasattr(self, "h"):
self.omegab_h2 = self.omegab * self.h ** 2
self.omegac_h2 = self.omegac * self.h ** 2
elif "omegam" in kwargs and len(kwargs) == 1:
self.omegam = kwargs["omegam"]
elif "omegab_h2" in kwargs and "omegac_h2" in kwargs and len(kwargs) == 2:
if not hasattr(self, 'h'):
raise AttributeError("You need to specify h as well")
self.omegab_h2 = kwargs["omegab_h2"]
self.omegac_h2 = kwargs["omegac_h2"]
self.omegab = self.omegab_h2 / self.h ** 2
self.omegac = self.omegac_h2 / self.h ** 2
self.omegam = self.omegab + self.omegac
else:
raise AttributeError("your input omegaXXX arguments were invalid" + str(kwargs))
if hasattr(self, "omegam"):
self.mean_dens = self.crit_dens * self.omegam
if self.force_flat:
self.omegav = 1 - self.omegam
self.omegak = 0.0
elif default is not None and not hasattr(self, "omegav"):
self.omegav = self.__base["omegav"]
if hasattr(self, "omegav") and not self.force_flat:
self.omegak = 1 - self.omegav - self.omegam
# Check all their values
for k, v in Cosmology._bounds.iteritems():
if k in self.__dict__:
self._check_bounds(k, v[0], v[1])
def pycamb_dict(self):
"""
Collect parameters into a dictionary suitable for pycamb.
Returns
-------
dict
Dictionary of values appropriate for pycamb
"""
map = {"w":"w_lam",
"t_cmb":"TCMB",
"y_he":"yhe",
# "tau":"reion__optical_depth",
"z_reion":"reion__redshift",
"N_nu":"Num_Nu_massless",
"omegab":"omegab",
"omegac":"omegac",
"H0":"H0",
"omegav":"omegav",
"omegak":"omegak",
"omegan":"omegan",
"cs2_lam":"cs2_lam",
"n":"scalar_index",
"N_nu_massive":"Num_Nu_massive"
}
return_dict = {}
for k, v in self.__dict__.iteritems():
if k in map:
return_dict.update({map[k]: v})
return return_dict
def cosmolopy_dict(self):
"""
Collect parameters into a dictionary suitable for cosmolopy.
Returns
-------
dict
Dictionary of values appropriate for cosmolopy
"""
map = {"tau":"tau",
"z_reion":"z_reion",
"omegab":"omega_b_0",
"h":"h",
"omegav":"omega_lambda_0",
"omegak":"omega_k_0",
"sigma_8":"sigma_8",
"omegam":"omega_M_0",
"n":"n",
"omegan":"omega_n_0",
"N_nu_massive":"N_nu",
"w":"w"}
return_dict = {}
for k, v in self.__dict__.iteritems():
if k in map:
return_dict.update({map[k]: v})
return return_dict
def _check_bounds(self, item, low=None, high=None):
if low is not None and high is not None:
if self.__dict__[item] < low or self.__dict__[item] > high:
raise ValueError(item + " must be between " + str(low) + " and " + str(high))
elif low is not None:
if self.__dict__[item] < low:
raise ValueError(item + " must be less than " + str(low))
elif high is not None:
if self.__dict__[item] > high:
raise ValueError(item + " must be greater than " + str(high))
#===============================================================================
# SOME BASE COSMOLOGIES
#===============================================================================
# The extras dict has common parameter defaults between all bases
extras = {"w" :-1,
"omegan" : 0.0,
'cs2_lam' : 1,
't_cmb' : 2.725,
'y_he' : 0.24,
'N_nu' : 3.04,
"delta_c" : 1.686,
"N_nu_massive":0.0,
}
# # Base Planck (no extra things like lensing and WP)
planck1_base = {"omegab_h2" : 0.022068,
"omegac_h2" : 0.12029,
"omegav" : 0.6825,
"H0" : 67.11,
'z_reion': 11.35,
'tau': 0.0925,
"sigma_8":0.8344,
"n":0.9624,
}
|
|
# -*- coding: utf-8 -*-
import marshmallow as ma
from marshmallow.exceptions import ValidationError
from marshmallow.compat import iteritems, PY2
from .fields import BaseRelationship, Meta, _META_LOAD_FROM
from .exceptions import IncorrectTypeError
from .utils import resolve_params
TYPE = 'type'
ID = 'id'
def plain_function(f):
"""Ensure that ``callable`` is a plain function rather than an unbound method."""
if PY2 and f:
return f.im_func
# Python 3 doesn't have bound/unbound methods, so don't need to do anything
return f
class SchemaOpts(ma.SchemaOpts):
def __init__(self, meta, *args, **kwargs):
super(SchemaOpts, self).__init__(meta, *args, **kwargs)
self.type_ = getattr(meta, 'type_', None)
self.inflect = plain_function(getattr(meta, 'inflect', None))
self.self_url = getattr(meta, 'self_url', None)
self.self_url_kwargs = getattr(meta, 'self_url_kwargs', None)
self.self_url_many = getattr(meta, 'self_url_many', None)
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop('include_data', ())
super(Schema, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
if field_name in self.include_data:
if not isinstance(field, BaseRelationship):
raise ValueError('Can only include relationships. "{}" is a "{}"'
.format(field_name, field.__class__.__name__))
field.include_data = True
elif isinstance(field, BaseRelationship):
field.include_data = False
for field_name in self.include_data:
if field_name not in self.fields:
raise ValueError('Unknown field "{}"'.format(field_name))
if not self.opts.type_:
raise ValueError('Must specify type_ class Meta option')
if 'id' not in self.fields:
raise ValueError('Must have an `id` field')
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError('Must specify `self_url` Meta option when '
'`self_url_kwargs` is specified')
self.included_data = {}
OPTIONS_CLASS = SchemaOpts
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data['included'] = list(self.included_data.values())
return data
def unwrap_item(self, item):
if 'type' not in item:
raise ma.ValidationError([
{
'detail': '`data` object must include `type` key.',
'source': {
'pointer': '/data'
}
}
])
if item['type'] != self.opts.type_:
raise IncorrectTypeError(actual=item['type'], expected=self.opts.type_)
payload = self.dict_class()
if 'id' in item:
payload['id'] = item['id']
if 'meta' in item:
payload[_META_LOAD_FROM] = item['meta']
for key, value in iteritems(item.get('attributes', {})):
payload[key] = value
for key, value in iteritems(item.get('relationships', {})):
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many):
if 'data' not in data:
raise ma.ValidationError([{
'detail': 'Object must include `data` key.',
'source': {
'pointer': '/',
},
}])
data = data['data']
if many:
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set load_from to the
inflected form of field_name.
"""
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
return None
# overrides ma.Schema._do_load so that we can format errors as JSON API Error objects.
def _do_load(self, data, many=None, **kwargs):
many = self.many if many is None else bool(many)
try:
result, errors = super(Schema, self)._do_load(data, many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
error_messages = errors
if '_schema' in error_messages:
error_messages = error_messages['_schema']
formatted_messages = self.format_errors(error_messages, many=many)
return result, formatted_messages
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many:
for index, errors in iteritems(errors):
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message, index=index)
for message in field_errors
])
else:
for field_name, field_errors in iteritems(errors):
formatted_errors.extend([
self.format_error(field_name, message)
for message in field_errors
])
return {'errors': formatted_errors}
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship)
if relationship:
container = 'relationships'
else:
container = 'attributes'
inflected_name = self.inflect(field_name)
if index:
pointer = '/data/{}/{}/{}'.format(index, container, inflected_name)
else:
pointer = '/data/{}/{}'.format(container, inflected_name)
if relationship:
pointer = '{}/data'.format(pointer)
return {
'detail': message,
'source': {
'pointer': pointer
}
}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(self.fields[field].dump_to or field): field
for field in self.fields
}
for field_name, value in iteritems(item):
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], Meta):
if 'meta' not in ret:
ret['meta'] = self.dict_class()
ret['meta'].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if 'relationships' not in ret:
ret['relationships'] = self.dict_class()
ret['relationships'][self.inflect(field_name)] = value
else:
if 'attributes' not in ret:
ret['attributes'] = self.dict_class()
ret['attributes'][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret['links'] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get('links', {}).get('self', None)
return {'self': self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret['self'] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {'data': data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links['self']:
ret['links'] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format(**kwargs) if link else None
|
|
from __future__ import print_function
import json
import os
import socket
import time
class Chillog:
"""
Python library for building logging data structure based on GELF
More info check http://docs.graylog.org/en/2.1/pages/gelf.html
"""
LOG_MESSAGE_VERSION = 1
LOG_ALERT = 1
LOG_CRITICAL = 2
LOG_ERROR = 3
LOG_WARNING = 4
LOG_NOTICE = 5
LOG_INFO = 6
LOG_DEBUG = 7
def __init__(self, service_name=None, hostname=None, prettify_log=False):
"""
Init logger
Just do this one time and reuse object for best practice
Ex: `logger = Chillog()` --> use `logger` object to logging
:param service_name: From which service the log is coming. Default is get value of os environ 'SERVICE_NAME'
:param hostname: From which host the log is coming. Default is get value of hostname natively with python
"""
self.__service_name = service_name if service_name else os.environ.get('SERVICE_NAME')
self.__hostname = hostname if hostname else socket.gethostname()
self.__prettify_log = prettify_log
@staticmethod
def __get_current_millis():
"""
Get current time in milliseconds.
:return: Current time in milliseconds
"""
return int(round(time.time() * 1000))
@staticmethod
def __add_optional_fields(dict_to_add, **kwargs):
"""
Add optional field to dict
Additional field(s) will be preceded with underscore in front of the field name
:param dict_to_add: Dict to be added with optional field(s)
:param kwargs: Optional field(s)
:return: Dict with optional field(s)
"""
for key, value in kwargs.items():
key = '_' + str(key)
dict_to_add[key] = value
return dict_to_add
def __print_log(self, formatted_log): # pragma: no cover
"""
Print formatted log
:param formatted_log: Formatted JSON log
:return: Print to stdout
"""
if self.__prettify_log:
print(json.dumps(formatted_log, indent=4, sort_keys=True))
else:
print(json.dumps(formatted_log, sort_keys=True))
def build_log_message(self, log_level, short_message, **kwargs):
"""
Build log message in Chillog format
:param log_level: Level of log
:param short_message: Short message about the event
:param kwargs: Additional field(s)
:return: Dict of formatted log
"""
expected_level = [
self.LOG_ALERT,
self.LOG_CRITICAL,
self.LOG_ERROR,
self.LOG_WARNING,
self.LOG_NOTICE,
self.LOG_INFO,
self.LOG_DEBUG
]
if log_level not in expected_level:
log_level = self.LOG_INFO
formatted_log = {
'version': self.LOG_MESSAGE_VERSION,
'host': self.__hostname,
'service': self.__service_name,
'short_message': short_message,
'full_message': kwargs.get('full_message'),
'timestamp': self.__get_current_millis(),
'level': log_level
}
if kwargs.get('full_message'):
del kwargs['full_message']
formatted_log = self.__add_optional_fields(formatted_log, **kwargs)
return formatted_log
def debug(self, short_message, **kwargs): # pragma: no cover
"""
Format log with debug level
:param short_message: Short log message
:param kwargs: Additional param(s)
:return: Print formatted log to stdout
"""
formatted_log = self.build_log_message(log_level=self.LOG_DEBUG,
short_message=short_message,
**kwargs)
self.__print_log(formatted_log)
def info(self, short_message, **kwargs): # pragma: no cover
"""
Format log with info level
:param short_message: Short log message
:param kwargs: Additional param(s)
:return: Print formatted log to stdout
"""
formatted_log = self.build_log_message(log_level=self.LOG_INFO,
short_message=short_message,
**kwargs)
self.__print_log(formatted_log)
def notice(self, short_message, **kwargs): # pragma: no cover
"""
Format log with notice level
:param short_message: Short log message
:param kwargs: Additional param(s)
:return: Print formatted log to stdout
"""
formatted_log = self.build_log_message(log_level=self.LOG_NOTICE,
short_message=short_message,
**kwargs)
self.__print_log(formatted_log)
def warning(self, short_message, **kwargs): # pragma: no cover
"""
Format log with warning level
:param short_message: Short log message
:param kwargs: Additional param(s)
:return: Print formatted log to stdout
"""
formatted_log = self.build_log_message(log_level=self.LOG_WARNING,
short_message=short_message,
**kwargs)
self.__print_log(formatted_log)
def error(self, short_message, **kwargs): # pragma: no cover
"""
Format log with error level
:param short_message: Short log message
:param kwargs: Additional param(s)
:return: Print formatted log to stdout
"""
formatted_log = self.build_log_message(log_level=self.LOG_ERROR,
short_message=short_message,
**kwargs)
self.__print_log(formatted_log)
def critical(self, short_message, **kwargs): # pragma: no cover
"""
Format log with critical level
:param short_message: Short log message
:param kwargs: Additional param(s)
:return: Print formatted log to stdout
"""
formatted_log = self.build_log_message(log_level=self.LOG_CRITICAL,
short_message=short_message,
**kwargs)
self.__print_log(formatted_log)
def alert(self, short_message, **kwargs): # pragma: no cover
"""
Format log with alert level
:param short_message: Short log message
:param kwargs: Additional param(s)
:return: Print formatted log to stdout
"""
formatted_log = self.build_log_message(log_level=self.LOG_ALERT,
short_message=short_message,
**kwargs)
self.__print_log(formatted_log)
|
|
import unittest
from conans.test.utils.tools import TestClient, TestBufferConanOutput
import os
import zipfile
from conans.test.utils.test_files import temp_folder
from conans.util.files import load, save_files, save
from conans.client.remote_registry import RemoteRegistry, Remote
from mock import patch
from conans.client.rest.uploader_downloader import Downloader
from conans import tools
from conans.client.conf import ConanClientConfigParser
import shutil
win_profile = """[settings]
os: Windows
"""
linux_profile = """[settings]
os: Linux
"""
remotes = """myrepo1 https://myrepourl.net False
my-repo-2 https://myrepo2.com True
"""
registry = """myrepo1 https://myrepourl.net False
Pkg/1.0@user/channel myrepo1
"""
settings_yml = """os:
Windows:
Linux:
arch: [x86, x86_64]
"""
conan_conf = """
[log]
run_to_output = False # environment CONAN_LOG_RUN_TO_OUTPUT
level = 10 # environment CONAN_LOGGING_LEVEL
[general]
compression_level = 6 # environment CONAN_COMPRESSION_LEVEL
cpu_count = 1 # environment CONAN_CPU_COUNT
[proxies]
# Empty section will try to use system proxies.
# If don't want proxy at all, remove section [proxies]
# As documented in http://docs.python-requests.org/en/latest/user/advanced/#proxies
http = http://user:pass@10.10.1.10:3128/
no_proxy = mylocalhost
https = None
# http = http://10.10.1.10:3128
# https = http://10.10.1.10:1080
"""
myfuncpy = """def mycooladd(a, b):
return a + b
"""
def zipdir(path, zipfilename):
with zipfile.ZipFile(zipfilename, 'w', zipfile.ZIP_DEFLATED) as z:
for root, _, files in os.walk(path):
for f in files:
file_path = os.path.join(root, f)
if file_path == zipfilename:
continue
relpath = os.path.relpath(file_path, path)
z.write(file_path, relpath)
class ConfigInstallTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
registry_path = self.client.client_cache.registry
save(registry_path, """my-repo-2 https://myrepo2.com True
conan-center https://conan-center.com
MyPkg/0.1@user/channel my-repo-2
Other/1.2@user/channel conan-center
""")
save(os.path.join(self.client.client_cache.profiles_path, "default"), "#default profile empty")
save(os.path.join(self.client.client_cache.profiles_path, "linux"), "#empty linux profile")
def _create_profile_folder(self, folder=None):
folder = folder or temp_folder(path_with_spaces=False)
save_files(folder, {"settings.yml": settings_yml,
"remotes.txt": remotes,
"profiles/linux": linux_profile,
"profiles/windows": win_profile,
"config/conan.conf": conan_conf,
"pylintrc": "#Custom pylint",
"python/myfuncs.py": myfuncpy,
"python/__init__.py": ""})
return folder
def _create_zip(self, zippath=None):
folder = self._create_profile_folder()
zippath = zippath or os.path.join(folder, "myconfig.zip")
zipdir(folder, zippath)
return zippath
def _check(self, install_path):
settings_path = self.client.client_cache.settings_path
self.assertEqual(load(settings_path).splitlines(), settings_yml.splitlines())
registry_path = self.client.client_cache.registry
registry = RemoteRegistry(registry_path, TestBufferConanOutput())
self.assertEqual(registry.remotes,
[Remote("myrepo1", "https://myrepourl.net", False),
Remote("my-repo-2", "https://myrepo2.com", True),
])
self.assertEqual(registry.refs, {"MyPkg/0.1@user/channel": "my-repo-2"})
self.assertEqual(sorted(os.listdir(self.client.client_cache.profiles_path)),
sorted(["default", "linux", "windows"]))
self.assertEqual(load(os.path.join(self.client.client_cache.profiles_path, "linux")).splitlines(),
linux_profile.splitlines())
self.assertEqual(load(os.path.join(self.client.client_cache.profiles_path, "windows")).splitlines(),
win_profile.splitlines())
conan_conf = ConanClientConfigParser(self.client.client_cache.conan_conf_path)
self.assertEqual(conan_conf.get_item("log.run_to_output"), "False")
self.assertEqual(conan_conf.get_item("log.run_to_file"), "False")
self.assertEqual(conan_conf.get_item("log.level"), "10")
self.assertEqual(conan_conf.get_item("general.compression_level"), "6")
self.assertEqual(conan_conf.get_item("general.sysrequires_sudo"), "True")
self.assertEqual(conan_conf.get_item("general.cpu_count"), "1")
self.assertEqual(conan_conf.get_item("general.config_install"), install_path)
self.assertEqual(conan_conf.get_item("proxies.no_proxy"), "mylocalhost")
self.assertEqual(conan_conf.get_item("proxies.https"), "None")
self.assertEqual(conan_conf.get_item("proxies.http"), "http://user:pass@10.10.1.10:3128/")
self.assertEqual("#Custom pylint",
load(os.path.join(self.client.client_cache.conan_folder, "pylintrc")))
self.assertEqual("",
load(os.path.join(self.client.client_cache.conan_folder, "python",
"__init__.py")))
def reuse_python_test(self):
zippath = self._create_zip()
self.client.run('config install "%s"' % zippath)
conanfile = """from conans import ConanFile
from myfuncs import mycooladd
a = mycooladd(1, 2)
assert a == 3
class Pkg(ConanFile):
def build(self):
self.output.info("A is %s" % a)
"""
self.client.save({"conanfile.py": conanfile})
self.client.run("create . Pkg/0.1@user/testing")
self.assertIn("A is 3", self.client.out)
def install_file_test(self):
""" should install from a file in current dir
"""
zippath = self._create_zip()
self.client.run('config install "%s"' % zippath)
self._check(zippath)
self.assertTrue(os.path.exists(zippath))
def test_without_profile_folder(self):
shutil.rmtree(self.client.client_cache.profiles_path)
zippath = self._create_zip()
self.client.run('config install "%s"' % zippath)
self.assertEqual(sorted(os.listdir(self.client.client_cache.profiles_path)),
sorted(["linux", "windows"]))
self.assertEqual(load(os.path.join(self.client.client_cache.profiles_path, "linux")).splitlines(),
linux_profile.splitlines())
def install_url_test(self):
""" should install from a URL
"""
def my_download(obj, url, filename, **kwargs): # @UnusedVariable
self._create_zip(filename)
with patch.object(Downloader, 'download', new=my_download):
self.client.run("config install http://myfakeurl.com/myconf.zip")
self._check("http://myfakeurl.com/myconf.zip")
# repeat the process to check
self.client.run("config install http://myfakeurl.com/myconf.zip")
self._check("http://myfakeurl.com/myconf.zip")
def install_repo_test(self):
""" should install from a git repo
"""
folder = self._create_profile_folder()
with tools.chdir(folder):
self.client.runner('git init .')
self.client.runner('git add .')
self.client.runner('git config user.name myname')
self.client.runner('git config user.email myname@mycompany.com')
self.client.runner('git commit -m "mymsg"')
self.client.run('config install "%s/.git"' % folder)
self._check("%s/.git" % folder)
def reinstall_test(self):
""" should use configured URL in conan.conf
"""
zippath = self._create_zip()
self.client.run('config set general.config_install="%s"' % zippath)
self.client.run("config install")
self._check(zippath)
def reinstall_error_test(self):
""" should use configured URL in conan.conf
"""
error = self.client.run("config install", ignore_error=True)
self.assertTrue(error)
self.assertIn("Called config install without arguments", self.client.out)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
from pyspark import SparkContext, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc
from pyspark.mllib.linalg import Vectors, SparseVector, _convert_to_vector
from pyspark.sql import DataFrame
class MLUtils(object):
"""
Helper methods to load, save and pre-process data used in MLlib.
.. versionadded:: 1.0.0
"""
@staticmethod
def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in range(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values
@staticmethod
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in range(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in range(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
@staticmethod
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
.. versionadded:: 1.0.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context
path : str
file or directory path in any Hadoop-supported file system URI
numFeatures : int, optional
number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
minPartitions : int, optional
min number of partitions
Returns
-------
:py:class:`pyspark.RDD`
labeled data stored as an RDD of LabeledPoint
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
@staticmethod
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
.. versionadded:: 1.0.0
Parameters
----------
data : :py:class:`pyspark.RDD`
an RDD of LabeledPoint to be saved
dir : str
directory to save the data
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
@staticmethod
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
.. versionadded:: 1.0.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context
path : str
file or directory path in any Hadoop-supported file system URI
minPartitions : int, optional
min number of partitions
Returns
-------
:py:class:`pyspark.RDD`
labeled data stored as an RDD of LabeledPoint
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
@staticmethod
@since("1.5.0")
def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0))
@staticmethod
@since("1.5.0")
def loadVectors(sc, path):
"""
Loads vectors saved using `RDD[Vector].saveAsTextFile`
with the default number of partitions.
"""
return callMLlibFunc("loadVectors", sc, path)
@staticmethod
def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with old vector columns converted to the
new vector type
Examples
--------
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
@staticmethod
def convertVectorColumnsFromML(dataset, *cols):
"""
Converts vector columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Vector` type from the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Vector columns to be converted.
Old vector columns will be ignored. If unspecified, all new
vector columns will be converted except nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with new vector columns converted to the
old vector type
Examples
--------
>>> import pyspark
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsFromML", dataset, list(cols))
@staticmethod
def convertMatrixColumnsToML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Matrix` type to the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Matrix columns to be converted.
New matrix columns will be ignored. If unspecified, all old
matrix columns will be converted excepted nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with old matrix columns converted to the
new matrix type
Examples
--------
>>> import pyspark
>>> from pyspark.mllib.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsToML", dataset, list(cols))
@staticmethod
def convertMatrixColumnsFromML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Matrix` type from the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Matrix columns to be converted.
Old matrix columns will be ignored. If unspecified, all new
matrix columns will be converted except nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with new matrix columns converted to the
old matrix type
Examples
--------
>>> import pyspark
>>> from pyspark.ml.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsFromML", dataset, list(cols))
class Saveable(object):
"""
Mixin for models and transformers which may be saved as files.
.. versionadded:: 1.3.0
"""
def save(self, sc, path):
"""
Save this model to the given path.
This saves:
* human-readable (JSON) model metadata to path/metadata/
* Parquet formatted data to path/data/
The model may be loaded using :py:meth:`Loader.load`.
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context used to save model data.
path : str
Path specifying the directory in which to save
this model. If the directory already exists,
this method throws an exception.
"""
raise NotImplementedError
@inherit_doc
class JavaSaveable(Saveable):
"""
Mixin for models that provide save() through their Scala
implementation.
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def save(self, sc, path):
"""Save this model to the given path."""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, str):
raise TypeError("path should be a string, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
class Loader(object):
"""
Mixin for classes which can load saved models from files.
.. versionadded:: 1.3.0
"""
@classmethod
def load(cls, sc, path):
"""
Load a model from the given path. The model should have been
saved using :py:meth:`Saveable.save`.
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context used for loading model files.
path : str
Path specifying the directory to which the model was saved.
Returns
-------
object
model instance
"""
raise NotImplementedError
@inherit_doc
class JavaLoader(Loader):
"""
Mixin for classes which can load saved models using its Scala
implementation.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
"""
Returns the full class name of the Java loader. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = cls.__module__.replace("pyspark", "org.apache.spark")
return ".".join([java_package, cls.__name__])
@classmethod
def _load_java(cls, sc, path):
"""
Load a Java model from the given path.
"""
java_class = cls._java_loader_class()
java_obj = sc._jvm
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj.load(sc._jsc.sc(), path)
@classmethod
@since("1.3.0")
def load(cls, sc, path):
"""Load a model from the given path."""
java_model = cls._load_java(sc, path)
return cls(java_model)
class LinearDataGenerator(object):
"""Utils for generating linear data.
.. versionadded:: 1.5.0
"""
@staticmethod
def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
.. versionadded:: 1.5.0
Parameters
----------
intercept : float
bias factor, the term c in X'w + c
weights : :py:class:`pyspark.mllib.linalg.Vector` or convertible
feature vector, the term w in X'w + c
xMean : :py:class:`pyspark.mllib.linalg.Vector` or convertible
Point around which the data X is centered.
xVariance : :py:class:`pyspark.mllib.linalg.Vector` or convertible
Variance of the given data
nPoints : int
Number of points to be generated
seed : int
Random Seed
eps : float
Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns
-------
list
of :py:class:`pyspark.mllib.regression.LabeledPoints` of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps)))
@staticmethod
@since("1.5.0")
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.util tests")\
.getOrCreate()
globs['spark'] = spark
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class DatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testAsSerializedGraph(self):
dataset = dataset_ops.Dataset.range(10)
graph = graph_pb2.GraphDef().FromString(
self.evaluate(dataset._as_serialized_graph()))
self.assertTrue(any(node.op == "RangeDataset" for node in graph.node))
def testAsSerializedGraphStateful(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda _: random_ops.random_uniform(()))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(
dataset._as_serialized_graph(external_state_policy=distribute_options
.ExternalStatePolicy.FAIL))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(init_from_file=[True, False])))
def testLookupTableGraphSerialization(self, init_from_file):
if init_from_file:
file = os.path.join(self.get_temp_dir(), "lookup_table_graph_serialize")
with open(file, "w") as f:
f.write("10\n11\n")
initializer = lookup_ops.TextFileInitializer(
file, dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER,
dtypes.int64, lookup_ops.TextFileIndex.WHOLE_LINE)
else:
keys_tensor = constant_op.constant([0, 1], dtype=dtypes.int64)
vals_tensor = constant_op.constant([10, 11])
initializer = lookup_ops.KeyValueTensorInitializer(
keys_tensor, vals_tensor)
table = lookup_ops.StaticHashTable(initializer, -1)
dataset = dataset_ops.Dataset.range(3)
dataset = dataset.map(table.lookup)
self.evaluate(lookup_ops.tables_initializer())
round_tripped = self.graphRoundTrip(dataset)
del table
del dataset
self.assertDatasetProduces(
round_tripped, [10, 11, -1], requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testAsFunctionWithMap(self):
if not context.executing_eagerly():
self.skipTest("Only works executing eagerly")
with ops.device("CPU"):
original_dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)
fn = original_dataset._trace_variant_creation()
variant = fn()
revived_dataset = dataset_ops._VariantDataset(
variant, original_dataset.element_spec)
self.assertDatasetProduces(revived_dataset, range(0, 10, 2))
@combinations.generate(test_base.default_test_combinations())
def testAsFunctionWithMapInFlatMap(self):
if not context.executing_eagerly():
self.skipTest("Only works executing eagerly")
with ops.device("CPU"):
original_dataset = dataset_ops.Dataset.range(5).flat_map(
lambda x: dataset_ops.Dataset.range(5).map(lambda x: x * 2))
fn = original_dataset._trace_variant_creation()
variant = fn()
revived_dataset = dataset_ops._VariantDataset(
variant, original_dataset.element_spec)
self.assertDatasetProduces(revived_dataset, list(original_dataset))
def _testNumInputs(self, dataset, num_inputs):
self.assertLen(dataset._inputs(), num_inputs)
@combinations.generate(test_base.default_test_combinations())
def testFixedLengthRecordInputs(self):
dataset = readers.FixedLengthRecordDataset("", 42)
self._testNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorInputs(self):
def gen():
yield 42
dataset = dataset_ops.Dataset.from_generator(gen, dtypes.int32)
self._testNumInputs(dataset, 1)
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsInputs(self):
dataset = dataset_ops.Dataset.from_tensors([42])
self._testNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testRangeInputs(self):
dataset = dataset_ops.Dataset.range(10)
self._testNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testTextLineInputs(self):
dataset = readers.TextLineDataset("")
self._testNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testTFRecordInputs(self):
dataset = readers.TFRecordDataset("")
self._testNumInputs(dataset, 1)
@combinations.generate(
combinations.combine(tf_api_version=1, mode=["eager", "graph"]))
def testDatasetComplexSourceInputs(self):
dataset_fn = dataset_ops.Dataset.from_sparse_tensor_slices(
sparse_tensor.SparseTensor(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])))
self.assertEmpty(dataset_fn._inputs())
def _testUnaryInputs(self, dataset_fn):
input_dataset = dataset_ops.Dataset.range(0)
self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
@combinations.generate(test_base.default_test_combinations())
def testBatchInputs(self):
self._testUnaryInputs(lambda x: x.batch(10))
@combinations.generate(test_base.default_test_combinations())
def testCacheInputs(self):
self._testUnaryInputs(lambda x: x.cache())
@combinations.generate(test_base.default_test_combinations())
def testFilterInputs(self):
self._testUnaryInputs(lambda x: x.filter(lambda x: True))
@combinations.generate(test_base.default_test_combinations())
def testFlatMapInputs(self):
self._testUnaryInputs(
lambda x: x.flat_map(lambda x: dataset_ops.Dataset.range(0)))
@combinations.generate(test_base.default_test_combinations())
def testMapInputs(self):
self._testUnaryInputs(lambda x: x.map(lambda x: x))
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchInputs(self):
self._testUnaryInputs(lambda x: x.padded_batch(10, []))
@combinations.generate(test_base.default_test_combinations())
def testParallelMapInputs(self):
self._testUnaryInputs(lambda x: x.map(lambda x: x, num_parallel_calls=2))
@combinations.generate(test_base.default_test_combinations())
def testRepeatInputs(self):
self._testUnaryInputs(lambda x: x.repeat())
@combinations.generate(test_base.default_test_combinations())
def testShuffleInputs(self):
self._testUnaryInputs(lambda x: x.shuffle(10))
@combinations.generate(test_base.default_test_combinations())
def testSkipInputs(self):
self._testUnaryInputs(lambda x: x.skip(1))
@combinations.generate(test_base.default_test_combinations())
def testTakeInputs(self):
self._testUnaryInputs(lambda x: x.take(1))
@combinations.generate(test_base.default_test_combinations())
def testWindowInputs(self):
self._testUnaryInputs(lambda x: x.window(10))
@combinations.generate(test_base.default_test_combinations())
def testUnaryTransformationInputsApply(self):
input_dataset = dataset_ops.Dataset.range(0)
dataset = input_dataset.apply(lambda dataset: dataset.cache())
self.assertEqual([input_dataset], dataset._inputs())
def _testInputsWithInterleaveFn(self, dataset_fn, interleave_parallelism):
input_dataset = dataset_ops.Dataset.range(0)
dataset = input_dataset.interleave(
lambda x: dataset_ops.Dataset.range(0),
cycle_length=2,
num_parallel_calls=interleave_parallelism)
self.assertEqual([input_dataset], dataset._inputs())
@combinations.generate(test_base.default_test_combinations())
def testParallelInterleaveInputs(self):
self._testInputsWithInterleaveFn(lambda: dataset_ops.range(0), 2)
@combinations.generate(test_base.default_test_combinations())
def testInterleaveInputs(self):
self._testInputsWithInterleaveFn(lambda: dataset_ops.range(0), None)
@combinations.generate(test_base.default_test_combinations())
def testNoWarnings(self):
with test.mock.patch.object(warnings, "warn") as mock_log:
dataset_ops.Dataset.range(0).interleave(
lambda x: dataset_ops.Dataset.range(0), cycle_length=2)
self.assertEmpty(mock_log.call_args_list)
def _testBinaryInputs(self, dataset_fn):
input1 = dataset_ops.Dataset.range(0)
input2 = dataset_ops.Dataset.range(1)
self.assertEqual([input1, input2], dataset_fn(input1, input2)._inputs())
@combinations.generate(test_base.default_test_combinations())
def testConcatenateInputs(self):
self._testBinaryInputs(lambda x, y: x.concatenate(y))
def _testVariadicInputs(self, dataset_fn, input_datasets):
self.assertEqual(
nest.flatten(input_datasets),
dataset_fn(input_datasets)._inputs())
@combinations.generate(test_base.default_test_combinations())
def testZipOneInputs(self):
input_datasets = dataset_ops.Dataset.range(0)
self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets)
@combinations.generate(test_base.default_test_combinations())
def testZipNestInputs(self):
input_datasets = (dataset_ops.Dataset.range(0),
(dataset_ops.Dataset.range(1),
dataset_ops.Dataset.range(2)))
self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets)
@combinations.generate(test_base.default_test_combinations())
def testZipTupleInputs(self):
input_datasets = (dataset_ops.Dataset.range(0),
dataset_ops.Dataset.range(1))
self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets)
@combinations.generate(test_base.default_test_combinations())
def testFunctions(self):
dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)
self.assertLen(dataset._functions(), 1)
@combinations.generate(test_base.default_test_combinations())
def testCollectInputs(self):
ds1 = dataset_ops.Dataset.range(0)
ds2 = ds1.concatenate(ds1)
ds3 = dataset_ops.Dataset.zip((ds2, ds1, ds2))
inputs = []
queue = [ds3]
while queue:
ds = queue[0]
queue = queue[1:]
queue.extend(ds._inputs())
inputs.append(ds)
self.assertEqual(5, inputs.count(ds1))
self.assertEqual(2, inputs.count(ds2))
self.assertEqual(1, inputs.count(ds3))
def _testDatasetSpec(self, tf_value, expected_element_structure):
dataset = dataset_ops.Dataset.from_tensors(0).map(lambda _: tf_value)
dataset_structure = structure.type_spec_from_value(dataset)
self.assertIsInstance(dataset_structure, dataset_ops.DatasetSpec)
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset), expected_element_structure))
self.assertEqual([dtypes.variant],
structure.get_flat_tensor_types(dataset_structure))
self.assertEqual([tensor_shape.TensorShape([])],
structure.get_flat_tensor_shapes(dataset_structure))
# Assert that the `Dataset` survives a round-trip via _from_tensor_list()
# and _to_tensor_list().
round_trip_dataset = dataset_structure._from_tensor_list(
dataset_structure._to_tensor_list(dataset))
value = tf_value
if isinstance(value, dataset_ops.Dataset):
self.assertDatasetsEqual(value, dataset.flat_map(lambda x: x))
elif isinstance(value, optional_ops.Optional):
self.assertDatasetProduces(
round_trip_dataset.map(lambda opt: opt.get_value()),
[self.evaluate(value.get_value())],
requires_initialization=True)
else:
self.assertDatasetProduces(
round_trip_dataset, [self.evaluate(tf_value)],
requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testTensorDatasetSpec(self):
self._testDatasetSpec(
constant_op.constant(37.0), tensor_spec.TensorSpec([], dtypes.float32))
@combinations.generate(test_base.default_test_combinations())
def testSparseTensorDatasetSpec(self):
self._testDatasetSpec(
sparse_tensor.SparseTensor(
indices=[[0]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]), sparse_tensor.SparseTensorSpec([1], dtypes.int32))
@combinations.generate(test_base.default_test_combinations())
def testNestDatasetSpec(self):
self._testDatasetSpec(
{
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (
tensor_spec.TensorSpec([1], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string),
)
})
@combinations.generate(test_base.default_test_combinations())
def testDatasetDatasetSpec(self):
self._testDatasetSpec(
dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([1, 2, 3])),
dataset_ops.DatasetSpec(tensor_spec.TensorSpec([], dtypes.int32)))
@combinations.generate(test_base.default_test_combinations())
def testOptionalDatasetSpec(self):
self._testDatasetSpec(
optional_ops.Optional.from_value(37.0),
optional_ops.OptionalSpec(tensor_spec.TensorSpec([], dtypes.float32)))
@combinations.generate(test_base.graph_only_combinations())
def testSameGraphError(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "must be from the same graph"):
dataset = dataset.batch(2)
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testSameGraphErrorOneShot(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError, "Please ensure that all datasets in the pipeline are "
"created in the same graph as the iterator."):
_ = dataset_ops.make_one_shot_iterator(dataset)
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testSameGraphErrorInitializable(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError, "Please ensure that all datasets in the pipeline are "
"created in the same graph as the iterator."):
_ = dataset_ops.make_initializable_iterator(dataset)
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(execution_mode=[context.ASYNC, context.SYNC])))
def testEagerIteration(self, execution_mode):
with context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
for foo in dataset:
self.assertEqual(val, foo.numpy())
val += 1
@combinations.generate(test_base.default_test_combinations())
def testDatasetAsFunctionArgument(self):
@def_function.function
def _uses_dataset(d):
accumulator = array_ops.zeros([], dtype=dtypes.int64)
for value in d:
accumulator += value
return accumulator
with ops.device("CPU"):
first_dataset = dataset_ops.Dataset.range(10)
self.assertEqual(45, self.evaluate(_uses_dataset(first_dataset)))
second_dataset = dataset_ops.Dataset.range(11)
self.assertEqual(55, self.evaluate(_uses_dataset(second_dataset)))
first_concrete = _uses_dataset.get_concrete_function(first_dataset)
# The dataset should not be a captured input
self.assertEmpty(first_concrete.graph.captures)
# The two datasets have the same structure and so should re-use a trace.
self.assertIs(first_concrete,
_uses_dataset.get_concrete_function(second_dataset))
# With a different structure we should use a different trace.
self.assertIsNot(
first_concrete,
_uses_dataset.get_concrete_function(
dataset_ops.Dataset.zip((first_dataset, second_dataset))))
@combinations.generate(test_base.default_test_combinations())
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(ds):
trace_count[0] += 1
counter = np.int64(0)
for elem in ds:
counter += elem
return counter
dataset = dataset_ops.Dataset.range(5)
dataset2 = dataset_ops.Dataset.range(10)
for _ in range(10):
self.assertEqual(self.evaluate(f(dataset)), 10)
self.assertEqual(self.evaluate(f(dataset2)), 45)
self.assertEqual(trace_count[0], 1)
# pylint: disable=g-long-lambda,unnecessary-lambda
@combinations.generate(test_base.default_test_combinations())
def testLegacyStructureAPI(self):
components = (np.array([1, 2, 3], dtype=np.int64), (np.array([4., 5.]),
np.array([6., 7.])),
np.array([8, 9, 10], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.shuffle(10, 10)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.repeat(-1)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.filter(lambda x, y, z: True)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.take(5)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
self.assertEqual(
((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual((([3], [3]), ([2], [2])),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.flat_map(lambda x, y: dataset_ops.Dataset.from_tensors(
((x[0], x[1]), (y[0], y[1]))))
self.assertEqual(
((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual((([3], [3]), ([2], [2])),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.batch(32)
self.assertEqual(
((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual(
(([None, 3], [None, 3]), ([None, 2], [None, 2])),
nest.pack_sequence_as(
dataset_output_shapes,
[s.as_list() for s in nest.flatten(dataset_output_shapes)]))
# Define a separate set of components with matching leading
# dimension for the from-slices constructor.
components_for_slices = (np.array([1, 2, 3],
dtype=np.int64), (np.array([4., 5., 6.]),
np.array([7., 8., 9.])),
np.array([10, 11, 12], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([], ([], []), []),
dataset_ops.get_legacy_output_shapes(dataset))
@combinations.generate(test_base.default_test_combinations())
def testNoneComponent(self):
dataset = dataset_ops.Dataset.from_tensors((42, None))
if context.executing_eagerly():
self.assertDatasetProduces(dataset, expected_output=[(42, None)])
else:
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_first, next_second = iterator.get_next()
self.assertEqual(next_second, None)
with self.cached_session() as sess:
self.assertEqual(sess.run(next_first), 42)
@combinations.generate(test_base.default_test_combinations())
def testNoneComponentInFunction(self):
@def_function.function
def fn(ds):
total = 0
it = iter(ds)
for elem in it:
x, _ = elem
total += x
return total
dataset = dataset_ops.Dataset.range(
10, output_type=dtypes.int32).map(lambda x: (x, None))
self.assertEqual(self.evaluate(fn(dataset)), 45)
@combinations.generate(test_base.default_test_combinations())
def testIncorrectPythonStructure(self):
# Tests that an exception is raised (as opposed to a segfault) when the
# Python structure assigned to a dataset is incorrect.
dataset = dataset_ops.Dataset.range(10)
spec = tensor_spec.TensorSpec([], dtypes.int64)
new_structure = (spec, spec)
dataset = dataset_ops._RestructuredDataset(dataset, new_structure)
dataset = dataset.map(lambda x, y: y)
with self.assertRaisesOpError(""):
self.getDatasetOutput(dataset)
@combinations.generate(test_base.default_test_combinations())
def testNamedTupleStructure(self):
Foo = collections.namedtuple("Foo", ["a", "b"])
x = Foo(a=3, b="test")
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset_ops.Dataset.from_tensor_slices([dataset, dataset])
self.assertEqual(
str(dataset.element_spec),
"DatasetSpec(Foo(a=TensorSpec(shape=(), dtype=tf.int32, name=None), "
"b=TensorSpec(shape=(), dtype=tf.string, name=None)), TensorShape([]))")
@combinations.generate(test_base.eager_only_combinations())
def testDebugModeEagerExecution(self):
dataset_ops.toggle_debug_mode(True)
counter = []
ds = dataset_ops.Dataset.range(10)
def map_fn(x):
counter.append(1)
return x
ds = ds.map(map_fn)
self.assertDatasetProduces(ds, list(range(10)))
# The body of `map_fn` will be executed 11 times since the implementation
# traces the function to figure out what the types and shapes of its
# outputs are.
self.assertLen(counter, 11)
dataset_ops.toggle_debug_mode(False)
@combinations.generate(test_base.eager_only_combinations())
def testDebugModeSequentialExecution(self):
dataset_ops.toggle_debug_mode(True)
ds = dataset_ops.Dataset.range(10)
ds = ds.apply(
testing.assert_next(["Interleave", "Map", "Batch", "FiniteTake"]))
ds = ds.interleave(
lambda x: dataset_ops.Dataset.from_tensors(x),
cycle_length=10,
num_parallel_calls=10)
ds = ds.map(lambda x: x * x, num_parallel_calls=10)
ds = ds.batch(batch_size=5, num_parallel_calls=2)
ds = ds.prefetch(buffer_size=2)
ds = ds.take(2)
self.assertDatasetProduces(ds, [[0, 1, 4, 9, 16], [25, 36, 49, 64, 81]])
dataset_ops.toggle_debug_mode(False)
if __name__ == "__main__":
test.main()
|
|
###########################################################################################
# Author: Josh Joseph joshmd@bu.edu
# 4/29/16
# This is the main server file for PCR hero....
from bottle import route, run, template, get, post, request, response, redirect, static_file
import m3
import os
pcrDB = m3.get_db("pcrhero")
HASHWORD = 'applesauce'
HOSTIP = 'http://www.pcrhero.org:8000'
HOMEDIR = '/home/ubuntu/pythonproject/'
###########################################################################################
### File get path functions -- This section can be cleaned up if all file requests are listed
### with their appropriate file path after the root directory... #TODO
############################################################################################
@get('/static/<filename:path>')
def static(filename):
return static_file(filename, root='/home/ubuntu/pythonproject/static/')
##This is a filepath to static addresses on the site. You will need to use an appropriate
##address (or a system link for security purposes) when using on a different host
@get('/badges/<filename:path>')
def badge(filename):
return static_file(filename, root='/home/ubuntu/pythonproject/badges/')
##This is a filepath to static addresses on the site. You will need to use an appropriate
##address (or a system link for security purposes) when using on a different host
@get('/issuers/<filename:path>')
def issuer(filename):
return static_file(filename, root='/home/ubuntu/pythonproject/issuers/')
##This is a filepath to static addresses on the site. You will need to use an appropriate
##address (or a system link for security purposes) when using on a different host
@get('/users/<filename:path>')
def issuer(filename):
return static_file(filename, root='/home/ubuntu/pythonproject/users/')
##This is a filepath to static addresses on the site. You will need to use an appropriate
##address (or a system link for security purposes) when using on a different host
@get('/images/<filename:path>')
def image(filename):
return static_file(filename, root='/home/ubuntu/pythonproject/images/')
##This is a filepath to static addresses on the site. You will need to use an appropriate
##address (or a system link for security purposes) when using on a different host
@get('/criteria/<filename:path>')
def criteria(filename):
return static_file(filename, root='/home/ubuntu/pythonproject/criteria/')
##This is a filepath to static addresses on the site. You will need to use an appropriate
##address (or a system link for security purposes) when using on a different host
@get('/awardedbadges/<filename:path>')
def awardedbadge(filename):
return static_file(filename, root='/home/ubuntu/pythonproject/awardedbadges/')
##This is a filepath to static addresses on the site. You will need to use an appropriate
##address (or a system link for security purposes) when using on a different host
##########################################################################################
#### MAIN ROUTING FUNCTIONS
##########################################################################################
@route('/')
def home():
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce')) + '''\
<h1>PCR Hero - your journey to achievement begins here!</h1>
</body>
'''
####################### TO DO - put remainder of register logic into a tpl file rather than expanding here
@get('/register')
def show_registration():
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce')) + '''\
<h1>Thanks for registering with PCR Hero - your journey to achievement begins here!</h1>
<form action="" method="POST">
<p>
<label for="name">What is your name?</label>
<input type="text" name="name"/> </p>
<p>
<label for="email">What is your email?</label>
<input type="email" name="email"/> </p>
<p>
<label for="password">Enter a strong password:</label>
<input type="password" name="password"/> </p>
<p>
<label for="password">Reenter that strong password:</label>
<input type="password" name="passwordcheck"/> </p>
<input type="submit"/>
</form>
</body>
'''
@post('/register')
def show_name():
name = request.params.name
email = request.params.email
password = request.params.password
passwordcheck = request.params.passwordcheck
if(password != passwordcheck):
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce')) + '''\
<h1>Thanks for registering with PCR Hero - your journey to achievement begins here!</h1>
<form action="" method="POST">
<p>
<label for="name">What is your name?</label>
<input type="text" name="name" required/> </p>
<p>
<label for="email">What is your email?</label>
<input type="email" name="email" required/> </p>
<p>
<label for="password">Enter a strong password:</label>
<input type="password" name="password" required/> </p>
<p>
<label for="password">Reenter that strong password:
<input type="password" name="passwordcheck" required/>
<div style = "color: red; display: inline;"> Passwords need to match! </div> </label></p>
<input type="submit"/>
</form>
</body>
'''
elif(m3.get_person(pcrDB, email) != None):
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce')) + '''\
<h1>Thanks for registering with PCR Hero - your journey to achievement begins here!</h1>
<form action="" method="POST">
<p>
<label for="name">What is your name?
<input type="text" name="name"/>
</label></p>
<p>
<label for="email">What is your email?</label>
<input type="email" name="email" required/>
<div style = "color: red; display: inline;"> That email is taken! </div></p>
<p>
<label for="password">Enter a strong password:</label>
<input type="password" name="password" required/> </p>
<p>
<label for="password">Reenter that strong password:</label>
<input type="password" name="passwordcheck" required/>
</p>
<input type="submit"/>
</form>
</body>
'''
else:
## It worked!
## Hash the password
hashword = m3.shaHash(password, "deadsea")
## create the new user object
newUser = m3.PCRUser(email, name, hashword)
m3.add_person(pcrDB, newUser)
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce')) + '''\
<h2>Hello, {}!</h2><p>Thanks for registering.</p>
</body>
</html>
'''.format(request.POST.name)
########## END TODO (reminder, putting this in a tpl will save like ~70 lines of code :)
@get('/myprofile')
def profile():
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
userbadges = m3.get_users_badges(pcrDB, useremail)
userapps = m3.get_users_apps(pcrDB, useremail)
applist = {}
for appname in userapps:
applist[appname] = (m3.get_app(pcrDB, appname))
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero - {}</h1>
'''.format(useremail) + template('profile.tpl', badges=userbadges, apps=applist) + "</body>"
else:
redirect("/login")
@get('/login')
def show_registration():
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce')) + '''\
<h1>Welcome to PCR Hero - please login here!</h1>
<form action="" method="POST">
<p>
<label for="email">Email:</label>
<input type="email" name="email" required/> </p>
<p>
<label for="password">Password:</label>
<input type="password" name="password" required/> </p>
<p>
<input type="submit"/>
</form>
</body>
'''
@post('/login')
def show_name():
email = request.params.email
password = request.params.password
hashword = m3.shaHash(password, "deadsea")
### need to begin with checking for username (email) - otherwise we'll get a keyerror
if(m3.get_person(pcrDB, email) == None):
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce')) + "Sorry - this username is not registered!"
else:
### need to load up the user's hashword for comparison purposes
loginHashword = m3.get_user_hashword(pcrDB, email)
if(hashword != loginHashword):
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce')) + "Sorry - your password is incorrect!"
elif(hashword == loginHashword):
response.set_cookie('loggedin', email, max_age= 600, secret='applesauce', path='/')
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce')) + "<h2>Hello, {}!<p>Welcome back!</p></h2>".format(request.POST.email)
else:
return template('base.tpl', title='PCR Hero', email=request.get_cookie('loggedin', secret='applesauce'))+ "Sorry, something went wrong!"
@get('/admin-badge')
def badge_menu():
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
userbadges = m3.get_users_badges(pcrDB, useremail)
issuers = m3.get_issuers(pcrDB)
image_path = "/home/ubuntu/pythonproject/images"
available_images = os.listdir(image_path)
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
'''.format(useremail) + template('admin-badge.tpl', badges=userbadges, issuers=issuers, images=available_images) + "</body>"
else:
redirect("/login")
@post('/admin-badge')
def badge_submit():
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
userbadges = m3.get_users_badges(pcrDB, useremail)
issuers = m3.get_issuers(pcrDB)
image_path = "/home/ubuntu/pythonproject/images"
available_images = os.listdir(image_path)
## return args
name = request.params.name
if(m3.find_badge(pcrDB, name) != None):
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
<h2 style="color:red">A badge with that name already exists!</h2>
'''.format(useremail) + template('admin-badge.tpl', badges=userbadges, issuers=issuers, images=available_images) + "</body>"
else:
description = request.params.description
image = request.params.image
criteria = request.params.criteria
tags = request.params.tags
issuer = request.params.issuer
newBadge = m3.OpenBadge(name, description, image, criteria, tags, issuer)
newBadge.establish_here()
newBadge.add_badge(pcrDB)
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
<h2 style="color:blue">Your badge was successfully created!</h2>
'''.format(useremail) + template('admin-badge.tpl', badges=userbadges, issuers=issuers, images=available_images) + "</body>"
else:
redirect("/login")
@get('/admin-issuer')
def issuer_create_menu():
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
userbadges = m3.get_users_badges(pcrDB, useremail)
issuers = m3.get_issuers(pcrDB)
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
'''.format(useremail) + template('admin-issuer.tpl', badges=userbadges, issuers=issuers) + "</body>"
else:
redirect("/login")
@post('/admin-issuer')
def issuer_create_submit():
name = request.params.name
description = request.params.description
url = request.params.url
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
userbadges = m3.get_users_badges(pcrDB, useremail)
issuers = m3.get_issuers(pcrDB)
if(m3.find_issuer(pcrDB, name) != None):
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1> <p style="color:red;">Sorry, that issuer is taken!</p>
'''.format(useremail) + template('admin-issuer.tpl', badges=userbadges, issuers=issuers) + "</body>"
else:
newIssuer = m3.PCRIssuer(name, description, url)
m3.add_issuer(pcrDB, newIssuer)
newIssuer.establish_here()
issuers = m3.get_issuers(pcrDB)
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1> <p style="color:blue;">Your issuer has been created!</p>
'''.format(useremail) + template('admin-issuer.tpl', badges=userbadges, issuers=issuers) + "</body>"
else:
redirect("/login")
@get('/admin-awards')
def badge_award_menu():
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
badge_list = m3.get_badges(pcrDB)
user_list = m3.get_users(pcrDB)
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
'''.format(useremail) + template('admin-award.tpl', badges=badge_list, users=user_list) + "</body>"
else:
redirect("/login")
@post('/admin-awards')
def badge_award_submit():
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
badge_list = m3.get_badges(pcrDB) # list of all badges
user_list = m3.get_users(pcrDB) # list of all users
current_user = request.params.user
current_user_badges = m3.get_users_badges(pcrDB, current_user)
current_badge = request.params.badge
## check that the user doesn't already have the badge
# if so, send back to the menu
if(current_badge in current_user_badges):
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
<h2 style="color:red;">That user already has that badge!</h2>
'''.format(useremail) + template('admin-award.tpl', badges=badge_list, users=user_list) + "</body>"
# if not, award the badge
## awarding badge magic
else:
m3.award_badge_to_user(pcrDB, current_badge, current_user)
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
<h2 style="color:blue;">Badge successfully awarded!<h2>
'''.format(useremail) + template('admin-award.tpl', badges=badge_list, users=user_list) + "</body>"
else:
redirect("/login")
@get('/admin-images')
def images_menu():
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
userbadges = m3.get_users_badges(pcrDB, useremail)
image_path = "/home/ubuntu/pythonproject/images"
available_images = os.listdir(image_path)
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
'''.format(useremail) + template('admin-images.tpl', badges=userbadges, images=available_images, image_path=image_path) + "</body>"
else:
redirect("/login")
@post('/admin-images')
def upload_image():
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
userbadges = m3.get_users_badges(pcrDB, useremail)
image_path = "/home/ubuntu/pythonproject/images"
available_images = os.listdir(image_path)
upload = request.files.image
name, ext = os.path.splitext(upload.filename)
if ext not in ('.png'):
return "File extension not allowed."
save_path = "/home/ubuntu/pythonproject/images"
file_path = "{path}/{file}".format(path=save_path, file=upload.filename)
upload.save(file_path)
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
<h2 style="color:blue">Image successfully uploaded!</h2>
'''.format(useremail) + template('admin-images.tpl', badges=userbadges, images=available_images, image_path=image_path) + "</body>"
else:
redirect("/login")
@get('/admin-tasks')
def tasks_menu():
if(request.get_cookie('loggedin')):
useremail = request.get_cookie('loggedin', secret='applesauce')
badge_list = m3.get_badges(pcrDB)
user_list = m3.get_users(pcrDB)
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
'''.format(useremail) + template('admin-tasks.tpl', badges=badge_list, users=user_list, typeselection = 0) + "</body>"
else:
redirect("/login")
@post('/admin-tasks')
def tasks_menu_post():
if(request.get_cookie('loggedin')):
submitted = request.params.flag
typeselection = request.params.typeselection
badge_list = m3.get_badges(pcrDB)
user_list = m3.get_users(pcrDB)
app_list = m3.get_all_apps(pcrDB)
useremail = request.get_cookie('loggedin', secret='applesauce')
if(submitted == "False"):
if(typeselection != 0):
app = request.params.app
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
'''.format(useremail) + template('admin-tasks.tpl', badges=badge_list, users=user_list, app_list=app_list, typeselection = typeselection, app = app) + "</body>"
else:
user = request.params.user
badge = request.params.badge
app = request.params.app
print("typeselection = %s " % typeselection)
### type handling for task assignment:
if(typeselection == "percent"):
circuit = request.params.circuit
score = float(request.params.score)
percent = int(request.params.percent)
NewTask = m3.PercentTask(user, badge, app, circuit, score, percent)
elif(typeselection == "repeat"):
circuit = request.params.circuit
repeat = int(request.params.repeat)
NewTask = m3.RepeatTask(user, badge, app, circuit, repeat)
elif(typeselection == "unique"):
unique = request.params.unique
NewTask = m3.UniqueTask(user, badge, app, unique)
elif(typeselection == "timetrial"):
days = int(request.params.days)
hours = int(request.params.hours)
minutes = int(request.params.minutes)
circuit = request.params.circuit
tasknum = int(request.params.tasknum)
NewTask = m3.TimeTrialTask(user, badge, app, days, hours, minutes, circuit, tasknum)
else: #performance
circuit = request.params.circuit
targetyield = int(request.params.targetyield)
cost = int(request.params.cost)
NewTask = m3.PerformanceTask(user, badge, app, circuit, targetyield, cost)
### task is assigned, now time to see if it's unique...
print(NewTask.output())
result = NewTask.assign(pcrDB)
if(result):
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
<h2 style="color:blue;">Task successfully started...</h2>
'''.format(useremail) + template('admin-tasks.tpl', badges=badge_list, users=user_list, typeselection = 0) + "</body>"
else:
return template('base.tpl', title='PCR Hero', email= useremail) + '''\
<h1>Welcome to PCR Hero's Admin Menu - {}</h1>
<h2 style="color:red;">Task already assigned to user...</h2>
'''.format(useremail) + template('admin-tasks.tpl', badges=badge_list, users=user_list, typeselection = 0) + "</body>"
else:
redirect("/login")
@post('/submit')
def submit():
username = request.params.user
appname = request.params.app
submittedcircuit = request.params.circuit
tasks = m3.get_users_tasks_for_app(pcrDB, username, appname)
taskarray = []
for task in tasks:
taskarray.append(task)
print('TaskList----')
for task in taskarray:
print(task)
print('\n')
# Step 1 - evaluate for tasks that have expired and remove them (time trials)
print('Check for timetrials...')
for task in taskarray:
if(task['type'] == 'timetrial'):
if(m3.check_task_datetime(pcrDB, task)):
## check_task_datetime returns True if time's up
print("%s's time is up!" % task['badge'])
m3.remove_task_by_id(pcrDB, task['_id']) ## delete task now that badge has been awarded
taskarray.remove(task) ## remove from taskarray
print("Task removed...")
# # Step 2 - evaluate badges and award them if completed
# ### Step 3 - evaluate for tasks that need unique submissions or multiple tasks (unique, repeat, timetrial)
for task in taskarray:
if(task['type'] == 'unique'):
pass ## This is the one circuit type that is going to require a little more work
## What is needed is for a mongodb call to $find the {circuit: circuit name} in the
elif(task['type'] == 'repeat'):
if(task['circuit'] == submittedcircuit):
m3.increment_task_by_id(pcrDB, task['_id'], "count")
## check if criteria met...
if(task['count'] >= task['repeatTarget']):
m3.award_badge_to_user(pcrDB, task['badge'], task['user'])
print("A new badge was awarded to %s!" % task['user'])
m3.remove_task_by_id(pcrDB, task['_id']) ## delete task now that badge has been awarded
taskarray.remove(task) ## remove from taskarray
print("Task removed...")
elif(task['type'] == 'timetrial'):
if(task['circuit'] == submittedcircuit):
m3.increment_task_by_id(pcrDB, task['_id'], "tasksDone")
## check if criteria met...
if(task['tasksDone'] >= task['tasknumGoal']):
m3.award_badge_to_user(pcrDB, task['badge'], task['user'])
print("A new badge was awarded to %s!" % task['user'])
m3.remove_task_by_id(pcrDB, task['_id']) ## delete task now that badge has been awarded
taskarray.remove(task) ## remove from taskarray
print("Task removed...")
### Step 4 - compare percentage scores
elif(task['type'] == 'percent'):
if(task['circuit'] == submittedcircuit):
newScore = reqeust.params.score
## check if criteria met...
if(newScore >= task['goalScore']):
m3.award_badge_to_user(pcrDB, task['badge'], task['user'])
print("A new badge was awarded to %s!" % task['user'])
m3.remove_task_by_id(pcrDB, task['_id']) ## delete task now that badge has been awarded
taskarray.remove(task) ## remove from taskarray
print("Task removed...")
## else, check if this is an improvement - this will be useful once the tasks badge is implemented
if(newScore >= task['score']):
m3.update_task_by_id(pcrDB, task['_id'], "score", newScore)
print("Score improved! Getting closer!")
### Step 5 - check cost/performance scores
elif(task['type'] == 'performance'):
if(task['circuit'] == submittedcircuit):
newScore = reqeust.params.score
newCost = request.params.cost
## check if criteria met...
if(newScore >= task['targetyield']):
if(newCost <= task['cost']):
m3.award_badge_to_user(pcrDB, task['badge'], task['user'])
print("A new badge was awarded to %s!" % task['user'])
m3.remove_task_by_id(pcrDB, task['_id']) ## delete task now that badge has been awarded
taskarray.remove(task) ## remove from taskarray
print("Task removed...")
else:
pass ## can always add new task types
@get('/logout')
def logout():
response.set_cookie('loggedin', '', path='/')
redirect("/")
run(host='172.31.57.1', port=8000, debug=True)
|
|
#!/usr/bin/env python
#
# Tree balls in a spinning bowl, with gravity and a ground
# OpenCascade contactors
#
# specification of center of mass & moments of inertia
#
from siconos.mechanics.collision.tools import Contactor
from siconos.io.mechanics_io import Hdf5
from siconos import numerics
# for osi specification:
# from siconos import kernel
from OCC.BRepAlgoAPI import BRepAlgoAPI_Cut
from OCC.BRepPrimAPI import BRepPrimAPI_MakeBox, BRepPrimAPI_MakeSphere
from OCC.gp import gp_Pnt, gp_Ax1, gp_Dir
from OCC.GProp import GProp_GProps
from OCC.BRepGProp import brepgprop_VolumeProperties
from math import pi
# original implementation with occ backend
import siconos.io.mechanics_io
siconos.io.mechanics_io.set_backend('occ')
# ball shape
ball = BRepPrimAPI_MakeSphere(.15).Shape()
ball_props = GProp_GProps()
brepgprop_VolumeProperties(ball, ball_props)
ball_mass = ball_props.Mass()
ball_com = ball_props.CentreOfMass()
ball_inertia = ball_props.MatrixOfInertia()
ball_I1 = ball_props.MomentOfInertia(gp_Ax1(ball_com, gp_Dir(1, 0, 0)))
ball_I2 = ball_props.MomentOfInertia(gp_Ax1(ball_com, gp_Dir(0, 1, 0)))
ball_I3 = ball_props.MomentOfInertia(gp_Ax1(ball_com, gp_Dir(0, 0, 1)))
print 'ball mass:', ball_mass
print 'ball center of mass:', (ball_com.Coord(1),
ball_com.Coord(2),
ball_com.Coord(3))
print 'ball moment of inertia:', (ball_I1, ball_I2, ball_I3)
# the ground
ground = BRepPrimAPI_MakeBox(gp_Pnt(-20, -20, 0), 40., 40., .5).Shape()
# bowl shape
hspherei = BRepPrimAPI_MakeSphere(.9, pi).Shape()
hsphereo = BRepPrimAPI_MakeSphere(1., pi).Shape()
bowl = BRepAlgoAPI_Cut(hsphereo, hspherei).Shape()
bowl_props = GProp_GProps()
brepgprop_VolumeProperties(bowl, bowl_props)
bowl_mass = bowl_props.Mass()
bowl_com = bowl_props.CentreOfMass()
bowl_inertia = bowl_props.MatrixOfInertia()
bowl_I1 = bowl_props.MomentOfInertia(gp_Ax1(bowl_com, gp_Dir(1, 0, 0)))
bowl_I2 = bowl_props.MomentOfInertia(gp_Ax1(bowl_com, gp_Dir(0, 1, 0)))
bowl_I3 = bowl_props.MomentOfInertia(gp_Ax1(bowl_com, gp_Dir(0, 0, 1)))
print 'bowl mass:', bowl_mass
print 'bowl center of mass:', (bowl_com.Coord(1),
bowl_com.Coord(2),
bowl_com.Coord(3))
print 'bowl moment of inertia:', (bowl_I1, bowl_I2, bowl_I3)
# Creation of the hdf5 file for input/output
with Hdf5() as io:
io.addOccShape('Contact', bowl)
io.addOccShape('Ground', ground)
io.addOccShape('Ball', ball)
io.addObject('bowl',
[Contactor('Contact',
contact_type='Face',
contact_index=0,
),
Contactor('Contact',
contact_type='Face',
contact_index=3,
),
Contactor('Contact',
contact_type='Edge',
contact_index=0,
)],
mass=bowl_mass,
orientation=([1, 0, 0], -pi / 2),
translation=[0, 0, 2],
velocity=[0, 0, 0, 0, 2, 0],
center_of_mass=[bowl_com.Coord(1),
bowl_com.Coord(2),
bowl_com.Coord(3)],
inertia=[bowl_I1, bowl_I2, bowl_I3])
#
# balls
#
io.addObject('ball1',
[Contactor('Ball',
instance_name='Ball1',
contact_type='Face',
contact_index=0)],
translation=[0, .3, 2],
mass=.1,
inertia=[ball_I1, ball_I2, ball_I3])
io.addObject('ball2',
[Contactor('Ball',
instance_name='Ball2',
contact_type='Face',
contact_index=0)],
translation=[0, 0, 2], mass=.1,
inertia=[ball_I1, ball_I2, ball_I3])
io.addObject('ball3',
[Contactor('Ball',
instance_name='Ball3',
contact_type='Face',
contact_index=0)],
translation=[0, -.3, 2],
mass=.1,
inertia=[ball_I1, ball_I2, ball_I3])
#
# ground, static object (mass=0)
#
io.addObject('ground',
[Contactor('Ground',
contact_type='Face',
contact_index=5)],
mass=0,
translation=[0, 0, 0])
#
# interactions, order ball -> bowl is important
# ball -> ground if some balls are ejected
io.addInteraction('bowl-ground',
'bowl', 'Contact-0',
'ground', 'Ground-0',
distance_calculator='cadmbtb',
offset=0.01)
io.addInteraction('bowl-ball1',
'ball1', 'Ball1',
'bowl', 'Contact-1',
distance_calculator='cadmbtb',
offset=0.05)
io.addInteraction('bowl-ball2',
'ball2', 'Ball2',
'bowl', 'Contact-1',
distance_calculator='cadmbtb',
offset=0.05)
io.addInteraction('bowl-ball3',
'ball3', 'Ball3',
'bowl', 'Contact-1',
distance_calculator='cadmbtb',
offset=0.05)
io.addInteraction('ball1-ball2',
'ball1', 'Ball1',
'ball2', 'Ball2',
distance_calculator='cadmbtb',
offset=0.05)
io.addInteraction('ball1-ball3',
'ball1', 'Ball1',
'ball3', 'Ball3',
distance_calculator='cadmbtb',
offset=0.05)
io.addInteraction('ball2-ball3',
'ball2', 'Ball2',
'ball3', 'Ball3',
distance_calculator='cadmbtb',
offset=0.05)
io.addNewtonImpactFrictionNSL('contact', mu=0.3, e=0.)
# Run the simulation from the inputs previously defined and add
# results to the hdf5 file. The visualisation of the output may be done
# with the vview command.
with Hdf5(mode='r+') as io:
io.run(with_timer=False,
gravity_scale=1,
t0=0,
T=10,
h=0.0005,
theta=0.50001,
Newton_max_iter=20,
set_external_forces=None,
solver=numerics.SICONOS_FRICTION_3D_NSGS,
itermax=100000,
tolerance=1e-8,
numerics_verbose=False,
output_frequency=None
# osi=kernel.MoreauJeanCombinedProjectionOSI
)
|
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic RHF spin-spin coupling (SSC) constants
Ref.
Chem. Rev., 99, 293
JCP, 113, 3530
JCP, 113, 9402
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import tools
from pyscf.lib import logger
from pyscf.scf import cphf
from pyscf.scf import _response_functions
from pyscf.ao2mo import _ao2mo
from pyscf.dft import numint
from pyscf.data import nist
from pyscf.data.gyro import get_nuc_g_factor
NUMINT_GRIDS = 30
def make_dso(sscobj, mol, dm0, nuc_pair=None):
'''orbital diamagnetic term'''
if nuc_pair is None: nuc_pair = sscobj.nuc_pair
ssc_dia = []
for ia, ja in nuc_pair:
h11 = dso_integral(mol, mol.atom_coord(ia), mol.atom_coord(ja))
a11 = -numpy.einsum('xyij,ji->xy', h11, dm0)
a11 = a11 - a11.trace() * numpy.eye(3)
ssc_dia.append(a11)
mf = sscobj._scf
if getattr(mf, 'with_x2c', None):
raise NotImplementedError('X2C for SSC')
if getattr(mf, 'with_qmmm', None):
raise NotImplementedError('SSC with QM/MM')
if getattr(mf, 'with_solvent', None):
raise NotImplementedError('SSC with Solvent')
return numpy.asarray(ssc_dia) * nist.ALPHA**4
def dso_integral(mol, orig1, orig2):
'''Integral of vec{r}vec{r}/(|r-orig1|^3 |r-orig2|^3)
Ref. JCP, 73, 5718'''
t, w = numpy.polynomial.legendre.leggauss(NUMINT_GRIDS)
a = (1+t)/(1-t) * .8
w *= 2/(1-t)**2 * .8
fakemol = gto.Mole()
fakemol._atm = numpy.asarray([[0, 0, 0, 0, 0, 0]], dtype=numpy.int32)
fakemol._bas = numpy.asarray([[0, 1, NUMINT_GRIDS, 1, 0, 3, 3+NUMINT_GRIDS, 0]],
dtype=numpy.int32)
p_cart2sph_factor = 0.488602511902919921
fakemol._env = numpy.hstack((orig2, a**2, a**2*w*4/numpy.pi**.5/p_cart2sph_factor))
fakemol._built = True
pmol = mol + fakemol
pmol.set_rinv_origin(orig1)
# <nabla i, j | k> k is a fictitious basis for numerical integraion
mat1 = pmol.intor(mol._add_suffix('int3c1e_iprinv'), comp=3,
shls_slice=(0, mol.nbas, 0, mol.nbas, mol.nbas, pmol.nbas))
# <i, j | nabla k>
mat = pmol.intor(mol._add_suffix('int3c1e_iprinv'), comp=3,
shls_slice=(mol.nbas, pmol.nbas, 0, mol.nbas, 0, mol.nbas))
mat += mat1.transpose(0,3,1,2) + mat1.transpose(0,3,2,1)
return mat
# Note mo10 is the imaginary part of MO^1
def make_pso(sscobj, mol, mo1, mo_coeff, mo_occ, nuc_pair=None):
if nuc_pair is None: nuc_pair = sscobj.nuc_pair
para = []
nocc = numpy.count_nonzero(mo_occ> 0)
nvir = numpy.count_nonzero(mo_occ==0)
atm1lst = sorted(set([i for i,j in nuc_pair]))
atm2lst = sorted(set([j for i,j in nuc_pair]))
atm1dic = dict([(ia,k) for k,ia in enumerate(atm1lst)])
atm2dic = dict([(ia,k) for k,ia in enumerate(atm2lst)])
mo1 = mo1.reshape(len(atm1lst),3,nvir,nocc)
h1 = make_h1_pso(mol, mo_coeff, mo_occ, atm1lst)
h1 = numpy.asarray(h1).reshape(len(atm1lst),3,nvir,nocc)
for i,j in nuc_pair:
# PSO = -Tr(Im[h1_ov], Im[mo1_vo]) + cc = 2 * Tr(Im[h1_vo], Im[mo1_vo])
e = numpy.einsum('xij,yij->xy', h1[atm1dic[i]], mo1[atm2dic[j]])
para.append(e*4) # *4 for +c.c. and double occupnacy
return numpy.asarray(para) * nist.ALPHA**4
def make_h1_pso(mol, mo_coeff, mo_occ, atmlst):
# Imaginary part of H01 operator
# 1/2(A01 dot p + p dot A01) => (a01p + c.c.)/2 ~ <a01p>
# Im[A01 dot p] = Im[vec{r}/r^3 x vec{p}] = Im[-i p (1/r) x p] = -p (1/r) x p
orbo = mo_coeff[:,mo_occ> 0]
orbv = mo_coeff[:,mo_occ==0]
h1 = []
for ia in atmlst:
mol.set_rinv_origin(mol.atom_coord(ia))
h1ao = -mol.intor_asymmetric('int1e_prinvxp', 3)
h1 += [reduce(numpy.dot, (orbv.T.conj(), x, orbo)) for x in h1ao]
return h1
def make_fc(sscobj, nuc_pair=None):
'''Only Fermi-contact'''
if nuc_pair is None: nuc_pair = sscobj.nuc_pair
mol = sscobj.mol
mo_coeff = sscobj._scf.mo_coeff
mo_occ = sscobj._scf.mo_occ
atm1dic, atm2dic = _uniq_atoms(nuc_pair)
h1 = make_h1_fc(mol, mo_coeff, mo_occ, sorted(atm2dic.keys()))
mo1 = solve_mo1_fc(sscobj, h1)
h1 = make_h1_fc(mol, mo_coeff, mo_occ, sorted(atm1dic.keys()))
para = []
for i,j in nuc_pair:
at1 = atm1dic[i]
at2 = atm2dic[j]
e = numpy.einsum('ij,ij', h1[at1], mo1[at2])
para.append(e*4) # *4 for +c.c. and for double occupancy
return numpy.einsum(',k,xy->kxy', nist.ALPHA**4, para, numpy.eye(3))
def solve_mo1_fc(sscobj, h1):
cput1 = (time.clock(), time.time())
log = logger.Logger(sscobj.stdout, sscobj.verbose)
mol = sscobj.mol
mo_energy = sscobj._scf.mo_energy
mo_coeff = sscobj._scf.mo_coeff
mo_occ = sscobj._scf.mo_occ
nset = len(h1)
eai = 1. / lib.direct_sum('a-i->ai', mo_energy[mo_occ==0], mo_energy[mo_occ>0])
mo1 = numpy.asarray(h1) * -eai
if not sscobj.cphf:
return mo1
orbo = mo_coeff[:,mo_occ> 0]
orbv = mo_coeff[:,mo_occ==0]
nocc = orbo.shape[1]
nvir = orbv.shape[1]
nmo = nocc + nvir
vresp = sscobj._scf.gen_response(singlet=False, hermi=1)
mo_v_o = numpy.asarray(numpy.hstack((orbv,orbo)), order='F')
def vind(mo1):
dm1 = _dm1_mo2ao(mo1.reshape(nset,nvir,nocc), orbv, orbo*2) # *2 for double occupancy
dm1 = dm1 + dm1.transpose(0,2,1)
v1 = vresp(dm1)
v1 = _ao2mo.nr_e2(v1, mo_v_o, (0,nvir,nvir,nmo)).reshape(nset,nvir,nocc)
v1 *= eai
return v1.ravel()
mo1 = lib.krylov(vind, mo1.ravel(), tol=sscobj.conv_tol,
max_cycle=sscobj.max_cycle_cphf, verbose=log)
log.timer('solving FC CPHF eqn', *cput1)
return mo1.reshape(nset,nvir,nocc)
def make_fcsd(sscobj, nuc_pair=None):
'''FC + SD contributions to 2nd order energy'''
if nuc_pair is None: nuc_pair = sscobj.nuc_pair
mol = sscobj.mol
mo_coeff = sscobj._scf.mo_coeff
mo_occ = sscobj._scf.mo_occ
atm1dic, atm2dic = _uniq_atoms(nuc_pair)
h1 = make_h1_fcsd(mol, mo_coeff, mo_occ, sorted(atm2dic.keys()))
mo1 = solve_mo1_fc(sscobj, h1)
h1 = make_h1_fcsd(mol, mo_coeff, mo_occ, sorted(atm1dic.keys()))
nocc = numpy.count_nonzero(mo_occ> 0)
nvir = numpy.count_nonzero(mo_occ==0)
mo1 = numpy.asarray(mo1).reshape(-1,3,3,nvir,nocc)
h1 = numpy.asarray(h1).reshape(-1,3,3,nvir,nocc)
para = []
for i,j in nuc_pair:
at1 = atm1dic[i]
at2 = atm2dic[j]
e = numpy.einsum('xwij,ywij->xy', h1[at1], mo1[at2])
para.append(e*4) # *4 for +c.c. and double occupancy
return numpy.asarray(para) * nist.ALPHA**4
def make_h1_fc(mol, mo_coeff, mo_occ, atmlst):
coords = mol.atom_coords()
ao = numint.eval_ao(mol, coords)
mo = ao.dot(mo_coeff)
orbo = mo[:,mo_occ> 0]
orbv = mo[:,mo_occ==0]
fac = 8*numpy.pi/3 *.5 # *.5 due to s = 1/2 * pauli-matrix
h1 = []
for ia in atmlst:
h1.append(fac * numpy.einsum('p,i->pi', orbv[ia], orbo[ia]))
return h1
def make_h1_fcsd(mol, mo_coeff, mo_occ, atmlst):
'''MO integrals for FC + SD'''
orbo = mo_coeff[:,mo_occ> 0]
orbv = mo_coeff[:,mo_occ==0]
h1 = []
for ia in atmlst:
h1ao = _get_integrals_fcsd(mol, ia)
for i in range(3):
for j in range(3):
h1.append(orbv.T.conj().dot(h1ao[i,j]).dot(orbo) * .5)
return h1
def _get_integrals_fcsd(mol, atm_id):
'''AO integrals for FC + SD'''
nao = mol.nao
with mol.with_rinv_origin(mol.atom_coord(atm_id)):
# Note the fermi-contact part is different to the fermi-contact
# operator in HFC, as well as the FC operator in EFG.
# FC here is associated to the the integrals of
# (-\nabla \nabla 1/r + I_3x3 \nabla\dot\nabla 1/r), which includes the
# contribution of Poisson equation twice, i.e. 8\pi rho.
# Therefore, -1./3 * (8\pi rho) is used as the contact contribution in
# function _get_integrals_fc to remove the FC part.
# In HFC or EFG, the factor of FC part is 4\pi/3.
a01p = mol.intor('int1e_sa01sp', 12).reshape(3,4,nao,nao)
h1ao = -(a01p[:,:3] + a01p[:,:3].transpose(0,1,3,2))
return h1ao
def _get_integrals_fc(mol, atm_id):
'''AO integrals for Fermi contact term'''
# The factor -8\pi/3 is used because FC part is assoicated to the integrals
# of (-\nabla \nabla 1/r + I_3x3 \nabla\dot\nabla 1/r). See also the
# function _get_integrals_fcsd above.
coords = mol.atom_coord(atm_id).reshape(1, 3)
ao = mol.eval_gto('GTOval', coords)
return -8*numpy.pi/3 * numpy.einsum('ip,iq->pq', ao, ao)
def _uniq_atoms(nuc_pair):
atm1lst = sorted(set([i for i,j in nuc_pair]))
atm2lst = sorted(set([j for i,j in nuc_pair]))
atm1dic = dict([(ia,k) for k,ia in enumerate(atm1lst)])
atm2dic = dict([(ia,k) for k,ia in enumerate(atm2lst)])
return atm1dic, atm2dic
def _dm1_mo2ao(dm1, ket, bra):
nao, nket = ket.shape
nbra = bra.shape[1]
nset = len(dm1)
dm1 = lib.ddot(ket, dm1.transpose(1,0,2).reshape(nket,nset*nbra))
dm1 = dm1.reshape(nao,nset,nbra).transpose(1,0,2).reshape(nset*nao,nbra)
return lib.ddot(dm1, bra.T).reshape(nset,nao,nao)
def solve_mo1(sscobj, mo_energy=None, mo_coeff=None, mo_occ=None,
h1=None, s1=None, with_cphf=None):
cput1 = (time.clock(), time.time())
log = logger.Logger(sscobj.stdout, sscobj.verbose)
if mo_energy is None: mo_energy = sscobj._scf.mo_energy
if mo_coeff is None: mo_coeff = sscobj._scf.mo_coeff
if mo_occ is None: mo_occ = sscobj._scf.mo_occ
if with_cphf is None: with_cphf = sscobj.cphf
mol = sscobj.mol
if h1 is None:
atmlst = sorted(set([j for i,j in sscobj.nuc_pair]))
h1 = numpy.asarray(make_h1_pso(mol, mo_coeff, mo_occ, atmlst))
if with_cphf:
if callable(with_cphf):
vind = with_cphf
else:
vind = gen_vind(sscobj._scf, mo_coeff, mo_occ)
mo1, mo_e1 = cphf.solve(vind, mo_energy, mo_occ, h1, None,
sscobj.max_cycle_cphf, sscobj.conv_tol,
verbose=log)
else:
e_ai = lib.direct_sum('i-a->ai', mo_energy[mo_occ>0], mo_energy[mo_occ==0])
mo1 = h1 * (1 / e_ai)
mo_e1 = None
logger.timer(sscobj, 'solving mo1 eqn', *cput1)
return mo1, mo_e1
def gen_vind(mf, mo_coeff, mo_occ):
'''Induced potential associated with h1_PSO'''
vresp = mf.gen_response(singlet=True, hermi=2)
occidx = mo_occ > 0
orbo = mo_coeff[:, occidx]
orbv = mo_coeff[:,~occidx]
nocc = orbo.shape[1]
nao, nmo = mo_coeff.shape
nvir = nmo - nocc
def vind(mo1):
dm1 = [reduce(numpy.dot, (orbv, x*2, orbo.T.conj()))
for x in mo1.reshape(-1,nvir,nocc)]
dm1 = numpy.asarray([d1-d1.conj().T for d1 in dm1])
v1mo = numpy.asarray([reduce(numpy.dot, (orbv.T.conj(), x, orbo))
for x in vresp(dm1)])
return v1mo.ravel()
return vind
def _write(stdout, msc3x3, title):
stdout.write('%s\n' % title)
stdout.write('mu_x %s\n' % str(msc3x3[0]))
stdout.write('mu_y %s\n' % str(msc3x3[1]))
stdout.write('mu_z %s\n' % str(msc3x3[2]))
stdout.flush()
def _atom_gyro_list(mol):
gyro = []
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb in mol.nucprop:
prop = mol.nucprop[symb]
mass = prop.get('mass', None)
gyro.append(get_nuc_g_factor(symb, mass))
else:
# Get default isotope
gyro.append(get_nuc_g_factor(symb))
return numpy.array(gyro)
class SpinSpinCoupling(lib.StreamObject):
def __init__(self, scf_method):
self.mol = scf_method.mol
self.verbose = scf_method.mol.verbose
self.stdout = scf_method.mol.stdout
self.chkfile = scf_method.chkfile
self._scf = scf_method
mol = scf_method.mol
self.nuc_pair = [(i,j) for i in range(mol.natm) for j in range(i)]
self.with_fc = True
self.with_fcsd = False
self.cphf = True
self.max_cycle_cphf = 20
self.conv_tol = 1e-9
self.mo10 = None
self.mo_e10 = None
self._keys = set(self.__dict__.keys())
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
log.info('******** %s for %s ********',
self.__class__, self._scf.__class__)
log.info('nuc_pair %s', self.nuc_pair)
log.info('with Fermi-contact %s', self.with_fc)
log.info('with Fermi-contact + spin-dipole %s', self.with_fcsd)
if self.cphf:
log.info('Solving MO10 eq with CPHF.')
log.info('CPHF conv_tol = %g', self.conv_tol)
log.info('CPHF max_cycle_cphf = %d', self.max_cycle_cphf)
if not self._scf.converged:
log.warn('Ground state SCF is not converged')
return self
def kernel(self, mo1=None):
cput0 = (time.clock(), time.time())
self.check_sanity()
self.dump_flags()
mol = self.mol
dm0 = self._scf.make_rdm1()
mo_coeff = self._scf.mo_coeff
mo_occ = self._scf.mo_occ
ssc_dia = self.make_dso(mol, dm0)
if mo1 is None:
mo1 = self.mo10 = self.solve_mo1()[0]
ssc_pso = self.make_pso(mol, mo1, mo_coeff, mo_occ)
e11 = ssc_dia + ssc_pso
if self.with_fcsd:
ssc_fcsd = self.make_fcsd(self.nuc_pair)
e11 += ssc_fcsd
elif self.with_fc:
ssc_fc = self.make_fc(self.nuc_pair)
e11 += ssc_fc
logger.timer(self, 'spin-spin coupling', *cput0)
if self.verbose >= logger.NOTE:
nuc_magneton = .5 * (nist.E_MASS/nist.PROTON_MASS) # e*hbar/2m
au2Hz = nist.HARTREE2J / nist.PLANCK
unit = au2Hz * nuc_magneton ** 2
logger.debug(self, 'Unit AU -> Hz %s', unit)
iso_ssc = unit * numpy.einsum('kii->k', e11) / 3
natm = mol.natm
ktensor = numpy.zeros((natm,natm))
for k, (i, j) in enumerate(self.nuc_pair):
ktensor[i,j] = ktensor[j,i] = iso_ssc[k]
if self.verbose >= logger.DEBUG:
_write(self.stdout, ssc_dia[k]+ssc_para[k],
'\nSSC E11 between %d %s and %d %s' \
% (i, self.mol.atom_symbol(i),
j, self.mol.atom_symbol(j)))
# _write(self.stdout, ssc_dia [k], 'dia-magnetism')
# _write(self.stdout, ssc_para[k], 'para-magnetism')
gyro = _atom_gyro_list(mol)
jtensor = numpy.einsum('ij,i,j->ij', ktensor, gyro, gyro)
label = ['%2d %-2s'%(ia, mol.atom_symbol(ia)) for ia in range(natm)]
logger.note(self, 'Reduced spin-spin coupling constant K (Hz)')
tools.dump_mat.dump_tri(self.stdout, ktensor, label)
logger.info(self, '\nNuclear g factor %s', gyro)
logger.note(self, 'Spin-spin coupling constant J (Hz)')
tools.dump_mat.dump_tri(self.stdout, jtensor, label)
return e11
dia = make_dso = make_dso
make_pso = make_pso
make_fc = make_fc
make_fcsd = make_fcsd
def para(self, mol=None, mo10=None, mo_coeff=None, mo_occ=None,
nuc_pair=None):
ssc_para = self.make_pso(mol, mo1, mo_coeff, mo_occ)
if self.with_fcsd:
ssc_para += self.make_fcsd(mol, mo1, mo_coeff, mo_occ)
elif self.with_fc:
ssc_para += self.make_fc(mol, mo1, mo_coeff, mo_occ)
return ssc_para
solve_mo1 = solve_mo1
SSC = SpinSpinCoupling
from pyscf import scf
scf.hf.RHF.SSC = scf.hf.RHF.SpinSpinCoupling = lib.class_as_method(SSC)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom.extend([
[1 , (0. , 0. , .917)],
['F' , (0. , 0. , 0.)], ])
mol.nucmod = {'F': 2} # gaussian nuclear model
mol.basis = {'H': '6-31g',
'F': '6-31g',}
mol.build()
mf = scf.RHF(mol).run()
ssc = mf.SSC()
ssc.verbose = 4
ssc.cphf = True
ssc.with_fc = True
ssc.with_fcsd = True
jj = ssc.kernel()
print(jj)
print(lib.finger(jj)*1e8 - 0.12374812977885304)
mol = gto.M(atom='''
O 0 0 0
H 0 -0.757 0.587
H 0 0.757 0.587''',
basis='ccpvdz')
mf = scf.RHF(mol).run()
ssc = SSC(mf)
ssc.with_fc = True
ssc.with_fcsd = True
jj = ssc.kernel()
print(lib.finger(jj)*1e8 - -0.11191697931377538)
ssc.with_fc = True
ssc.with_fcsd = False
jj = ssc.kernel()
print(lib.finger(jj)*1e8 - 0.82442034395656116)
|
|
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
Unit tests for tsdb client.
"""
import json
import os
import random
import string
import sys
import unittest
file_path = os.path.normpath(os.path.dirname(__file__))
sys.path.append(file_path + '/../../')
if sys.version_info[0] == 2 :
reload(sys)
sys.setdefaultencoding('utf-8')
from baidubce.services.tsdb.tsdb_client import TsdbClient
import tsdb_test_config
class TestTsdbClient(unittest.TestCase):
"""
Test class for tsdb sdk client
"""
def setUp(self):
self.tsdb_client = TsdbClient(tsdb_test_config.config)
self.query_list = [{
"metric": "cpu_idle",
"field": "value",
"filters": {
"start": 1465376157006,
"tags": {
"host": ["server1", "server2"]
},
"value": ">= 10"
},
"groupBy": [{
"name": "Tag",
"tags": ["rack"]
}],
"limit": 1000,
"aggregators": [{
"name": "Sum",
"sampling": "10 minutes"
}]
}]
self.datapoints = [{
"metric": "cpu_idle",
"field": "field1",
"tags": {
"host": "server1",
"rack": "rack1"
},
"timestamp": 1465376157007,
"value": 51
},
{
"metric": "cpu_idle",
"field": "field2",
"tags": {
"host": "server2",
"rack": "rack2"
},
"values": [
[1465376269769, 67],
[1465376325057, 60]
]
},{
"metric": "cpu_idle",
"field": "value",
"tags": {
"host": "server1",
"rack": "rack1"
},
"timestamp": 1465376157007,
"value": 51
}]
def tearDown(self):
print("ok")
def test_write_datapoints(self):
"""
test_write_datapoints
"""
error = None
try:
response = self.tsdb_client.write_datapoints(self.datapoints)
print(response)
except BaseException as e:
error = e
finally:
self.assertIsNone(error)
def test_write_datapoints_no_gzip(self):
"""
test_write_datapoints_no_gzip
"""
error = None
try:
response = self.tsdb_client.write_datapoints(self.datapoints, False)
print('test_write_datapoints_no_gzip', response)
except BaseException as e:
error = e
finally:
self.assertIsNone(error)
def test_get_metrics(self):
"""
test_get_metrics
"""
error = None
try:
response = self.tsdb_client.get_metrics()
print(response)
except BaseException as e:
error = e
finally:
self.assertIsNone(error)
def test_get_fields(self):
"""
test_get_fields
"""
error = None
try:
response = self.tsdb_client.get_fields('cpu_idle')
print(response)
except BaseException as e:
error = e
finally:
self.assertIsNone(error)
def test_get_tags(self):
"""
test_get_tags
"""
error = None
try:
response = self.tsdb_client.get_tags('cpu_idle')
print(response)
except BaseException as e:
error = e
finally:
self.assertIsNone(error)
def test_get_datapoints(self):
"""
test_get_datapoints
"""
error = None
try:
response = self.tsdb_client.get_datapoints(self.query_list)
print("test_get_datapoints", response)
except BaseException as e:
error = e
finally:
self.assertIsNone(error)
def test_get_rows_with_sql(self):
"""
test get rows with sql
"""
error = None
try:
statements = [
"select timestamp from cpu_idle",
"select value from cpu_idle",
"select host from cpu_idle",
"select timestamp,field1 from cpu_idle",
"select * from cpu_idle",
"select timestamp, value from cpu_idle order by timestamp ",
"select timestamp, value from cpu_idle order by timestamp desc",
'''select timestamp, value from cpu_idle
where value > 30 and timestamp >150937263000''',
"select host, count(1) from cpu_idle group by host",
'''select time_bucket(timestamp, '2 days') as DAY, sum(value) as SUM
from cpu_idle group by time_bucket(timestamp, '2 days')
order by time_bucket(timestamp, '2 days')''',
"select timestamp, ((field2 - field1) * 10) as RESULT, host from cpu_idle",
"select timestamp from cpu_idle",
'''SELECT field1, CASE field1 WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'many' END
FROM cpu_idle''',
"SELECT field1, IF(field1>100,1,0) as result FROM cpu_idle",
"SELECT field1, field2, COALESCE (field1, field2) as result FROM cpu_idle",
"SELECT field1, abs (field1) as result FROM cpu_idle",
"SELECT field1, sqrt (field1) as result FROM cpu_idle",
"SELECT field1, cbrt (field1) as result FROM cpu_idle",
"SELECT field1, ceil (field1) as result FROM cpu_idle",
"SELECT field1, floor (field1) as result FROM cpu_idle",
"SELECT 'str1' || 'str2' as result FROM cpu_idle",
'''SELECT time_bucket(timestamp, '2 days') as DAY, avg(field1) as result
FROM cpu_idle group by time_bucket(timestamp, '2 days')
order by time_bucket(timestamp, '2 days')''',
''' SELECT count(*) as result
FROM cpu_idle where timestamp < 1525611901''',
''' SELECT time_bucket(timestamp, '2 days') as DAY, count(field1) as count
FROM cpu_idle group by time_bucket(timestamp, '2 days')
order by time_bucket(timestamp, '2 days')''',
'''SELECT max_by(field1,field2) as result
FROM cpu_idle where timestamp < 1525611901000 ''',
'''SELECT min_by(field1,field2) as result
FROM cpu_idle where timestamp < 1525611901000 ''',
'''SELECT max(field1) as result
FROM cpu_idle where timestamp < 1525611901000''',
'''SELECT min(field1) as result
FROM cpu_idle where timestamp < 1525611901000''',
'''SELECT time_bucket(timestamp, '2 days') as DAY, sum(field1) as sum
FROM cpu_idle group by time_bucket(timestamp, '2 days')
order by time_bucket(timestamp, '2 days')'''
]
for statement in statements:
response = self.tsdb_client.get_rows_with_sql(statement)
print(statement, response)
except BaseException as e:
error = e
finally:
self.assertIsNone(error)
def test_generate_pre_signed_url(self):
"""
test_generate_pre_signed_url
"""
error = None
try:
response = self.tsdb_client.generate_pre_signed_url(self.query_list)
print(response)
except BaseException as e:
error = e
finally:
self.assertIsNone(error)
def test_generate_pre_signed_url_with_sql(self):
"""
test_generate_pre_signed_url_with_sql
"""
error = None
try:
statement = "select timestamp from cpu_idle"
response = self.tsdb_client.generate_pre_signed_url_with_sql(statement)
print(response)
except BaseException as e:
error = e
finally:
self.assertIsNone(error)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import time
from datetime import datetime
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.json_utils import JSONEncoderForHTML
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str, smart_unicode
from desktop.lib.rest.http_client import RestException
from desktop.lib.view_util import format_duration_in_millis
from desktop.log.access import access_warn
from desktop.models import Document, Document2
from liboozie.oozie_api import get_oozie
from liboozie.credentials import Credentials
from liboozie.submittion import Submission
from liboozie.types import Workflow as OozieWorkflow, Coordinator as CoordinatorWorkflow, Bundle as BundleWorkflow
from oozie.conf import OOZIE_JOBS_COUNT, ENABLE_CRON_SCHEDULING, ENABLE_V2
from oozie.forms import RerunForm, ParameterForm, RerunCoordForm, RerunBundleForm, UpdateEndTimeForm
from oozie.models import Workflow as OldWorkflow, Job, utc_datetime_format, Bundle, Coordinator, get_link, History as OldHistory
from oozie.models2 import History, Workflow, WORKFLOW_NODE_PROPERTIES
from oozie.settings import DJANGO_APPS
def get_history():
if ENABLE_V2.get():
return History
else:
return OldHistory
def get_workflow():
if ENABLE_V2.get():
return Workflow
else:
return OldWorkflow
LOG = logging.getLogger(__name__)
MAX_COORD_ACTIONS = 250
"""
Permissions:
A Workflow/Coordinator/Bundle can:
* be accessed only by its owner or a superuser or by a user with 'dashboard_jobs_access' permissions
* be submitted/modified only by its owner or a superuser
Permissions checking happens by calling:
* check_job_access_permission()
* check_job_edition_permission()
"""
def _get_workflows(user):
return [{
'name': workflow.name,
'owner': workflow.owner.username,
'value': workflow.uuid,
'id': workflow.id
} for workflow in [d.content_object for d in Document.objects.get_docs(user, Document2, extra='workflow2')]
]
def manage_oozie_jobs(request, job_id, action):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage an Oozie job.'))
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
response = {'status': -1, 'data': ''}
try:
oozie_api = get_oozie(request.user)
if action == 'change':
end_time = 'endtime=%s' % (request.POST.get('end_time'))
response['data'] = oozie_api.job_control(job_id, action, parameters={'value': end_time})
else:
response['data'] = oozie_api.job_control(job_id, action)
response['status'] = 0
if 'notification' in request.POST:
request.info(_(request.POST.get('notification')))
except RestException, ex:
response['data'] = _("Error performing %s on Oozie job %s: %s.") % (action, job_id, ex.message)
return JsonResponse(response)
def bulk_manage_oozie_jobs(request):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage the Oozie jobs.'))
response = {'status': -1, 'data': ''}
if 'job_ids' in request.POST and 'action' in request.POST:
jobs = request.POST.get('job_ids').split()
response = {'totalRequests': len(jobs), 'totalErrors': 0, 'messages': ''}
oozie_api = get_oozie(request.user)
for job_id in jobs:
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
try:
oozie_api.job_control(job_id, request.POST.get('action'))
except RestException, ex:
response['totalErrors'] = response['totalErrors'] + 1
response['messages'] += str(ex)
return JsonResponse(response)
def show_oozie_error(view_func):
def decorate(request, *args, **kwargs):
try:
return view_func(request, *args, **kwargs)
except RestException, ex:
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail):
detail = '%s: %s' % (_('The Oozie server is not running'), detail)
raise PopupException(_('An error occurred with Oozie.'), detail=detail)
return wraps(view_func)(decorate)
@show_oozie_error
def list_oozie_workflows(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(),}
if not has_dashboard_jobs_access(request.user):
kwargs['user'] = request.user.username
oozie_api = get_oozie(request.user)
if request.GET.get('format') == 'json':
just_sla = request.GET.get('justsla') == 'true'
if request.GET.get('type') in ('running', 'progress'):
kwargs['filters'] = [('status', status) for status in OozieWorkflow.RUNNING_STATUSES]
elif request.GET.get('type') == 'completed':
kwargs['filters'] = [('status', status) for status in OozieWorkflow.FINISHED_STATUSES]
json_jobs = oozie_api.get_workflows(**kwargs).jobs
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_job(job.id) for job in json_jobs]
return JsonResponse(massaged_oozie_jobs_for_json(json_jobs, request.user, just_sla), encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_workflows.mako', request, {
'user': request.user,
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_coordinators(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(),}
if not has_dashboard_jobs_access(request.user):
kwargs['user'] = request.user.username
oozie_api = get_oozie(request.user)
enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get()
if request.GET.get('format') == 'json':
if request.GET.get('type') in ('running', 'progress'):
kwargs['filters'] = [('status', status) for status in CoordinatorWorkflow.RUNNING_STATUSES]
elif request.GET.get('type') == 'completed':
kwargs['filters'] = [('status', status) for status in CoordinatorWorkflow.FINISHED_STATUSES]
json_jobs = oozie_api.get_coordinators(**kwargs).jobs
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_coordinator(job.id) for job in json_jobs]
return HttpResponse(json.dumps(massaged_oozie_jobs_for_json(json_jobs, request.user)).replace('\\\\', '\\'), content_type="application/json")
return render('dashboard/list_oozie_coordinators.mako', request, {
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
'enable_cron_scheduling': enable_cron_scheduling,
})
@show_oozie_error
def list_oozie_bundles(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(),}
if not has_dashboard_jobs_access(request.user):
kwargs['user'] = request.user.username
oozie_api = get_oozie(request.user)
if request.GET.get('format') == 'json':
if request.GET.get('type') in ('running', 'progress'):
kwargs['filters'] = [('status', status) for status in BundleWorkflow.RUNNING_STATUSES]
elif request.GET.get('type') == 'completed':
kwargs['filters'] = [('status', status) for status in BundleWorkflow.FINISHED_STATUSES]
json_jobs = oozie_api.get_bundles(**kwargs).jobs
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_bundle(job.id) for job in json_jobs]
return HttpResponse(json.dumps(massaged_oozie_jobs_for_json(json_jobs, request.user)).replace('\\\\', '\\'), content_type="application/json")
return render('dashboard/list_oozie_bundles.mako', request, {
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_workflow(request, job_id):
oozie_workflow = check_job_access_permission(request, job_id)
oozie_coordinator = None
if request.GET.get('coordinator_job_id'):
oozie_coordinator = check_job_access_permission(request, request.GET.get('coordinator_job_id'))
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
if oozie_coordinator is not None:
setattr(oozie_workflow, 'oozie_coordinator', oozie_coordinator)
if oozie_bundle is not None:
setattr(oozie_workflow, 'oozie_bundle', oozie_bundle)
oozie_parent = oozie_workflow.get_parent_job_id()
if oozie_parent:
oozie_parent = check_job_access_permission(request, oozie_parent)
workflow_data = None
credentials = None
doc = None
hue_workflow = None
workflow_graph = 'MISSING' # default to prevent loading the graph tab for deleted workflows
full_node_list = None
if ENABLE_V2.get():
try:
# To update with the new History document model
hue_coord = get_history().get_coordinator_from_config(oozie_workflow.conf_dict)
hue_workflow = (hue_coord and hue_coord.workflow) or get_history().get_workflow_from_config(oozie_workflow.conf_dict)
if hue_coord and hue_coord.workflow: hue_coord.workflow.document.doc.get().can_read_or_exception(request.user)
if hue_workflow: hue_workflow.document.doc.get().can_read_or_exception(request.user)
if hue_workflow:
workflow_graph = hue_workflow.gen_status_graph(oozie_workflow)
full_node_list = hue_workflow.nodes
workflow_id = hue_workflow.id
wid = {
'id': workflow_id
}
doc = Document2.objects.get(type='oozie-workflow2', **wid)
new_workflow = get_workflow()(document=doc)
workflow_data = new_workflow.get_data()
credentials = Credentials()
else:
workflow_graph, full_node_list = OldWorkflow.gen_status_graph_from_xml(request.user, oozie_workflow)
except:
pass
else:
history = get_history().cross_reference_submission_history(request.user, job_id)
hue_coord = history and history.get_coordinator() or get_history().get_coordinator_from_config(oozie_workflow.conf_dict)
hue_workflow = (hue_coord and hue_coord.workflow) or (history and history.get_workflow()) or get_history().get_workflow_from_config(oozie_workflow.conf_dict)
if hue_coord and hue_coord.workflow: Job.objects.can_read_or_exception(request, hue_coord.workflow.id)
if hue_workflow: Job.objects.can_read_or_exception(request, hue_workflow.id)
if hue_workflow:
workflow_graph = hue_workflow.gen_status_graph(oozie_workflow)
full_node_list = hue_workflow.node_list
else:
workflow_graph, full_node_list = get_workflow().gen_status_graph_from_xml(request.user, oozie_workflow)
parameters = oozie_workflow.conf_dict.copy()
for action in oozie_workflow.actions:
action.oozie_coordinator = oozie_coordinator
action.oozie_bundle = oozie_bundle
if request.GET.get('format') == 'json':
return_obj = {
'id': oozie_workflow.id,
'status': oozie_workflow.status,
'progress': oozie_workflow.get_progress(full_node_list),
'graph': workflow_graph,
'actions': massaged_workflow_actions_for_json(oozie_workflow.get_working_actions(), oozie_coordinator, oozie_bundle)
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
oozie_slas = []
if oozie_workflow.has_sla:
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_workflow.id,
'parent_id': oozie_workflow.id
}
oozie_slas = oozie_api.get_oozie_slas(**params)
return render('dashboard/list_oozie_workflow.mako', request, {
'oozie_workflow': oozie_workflow,
'oozie_coordinator': oozie_coordinator,
'oozie_bundle': oozie_bundle,
'oozie_parent': oozie_parent,
'oozie_slas': oozie_slas,
'hue_workflow': hue_workflow,
'hue_coord': hue_coord,
'parameters': parameters,
'has_job_edition_permission': has_job_edition_permission,
'workflow_graph': workflow_graph,
'layout_json': json.dumps(workflow_data['layout'], cls=JSONEncoderForHTML) if workflow_data else '',
'workflow_json': json.dumps(workflow_data['workflow'], cls=JSONEncoderForHTML) if workflow_data else '',
'credentials_json': json.dumps(credentials.credentials.keys(), cls=JSONEncoderForHTML) if credentials else '',
'workflow_properties_json': json.dumps(WORKFLOW_NODE_PROPERTIES, cls=JSONEncoderForHTML),
'doc1_id': doc.doc.get().id if doc else -1,
'subworkflows_json': json.dumps(_get_workflows(request.user), cls=JSONEncoderForHTML),
'can_edit_json': json.dumps(doc is None or doc.doc.get().is_editable(request.user))
})
@show_oozie_error
def list_oozie_coordinator(request, job_id):
oozie_coordinator = check_job_access_permission(request, job_id)
# Cross reference the submission history (if any)
coordinator = get_history().get_coordinator_from_config(oozie_coordinator.conf_dict)
try:
coordinator = get_history().objects.get(oozie_job_id=job_id).job.get_full_node()
except:
pass
oozie_bundle = None
if request.GET.get('bundle_job_id'):
try:
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
except:
pass
show_all_actions = request.GET.get('show_all_actions') == 'true'
if request.GET.get('format') == 'json':
actions = massaged_coordinator_actions_for_json(oozie_coordinator, oozie_bundle)
if not show_all_actions:
actions = actions[:MAX_COORD_ACTIONS]
return_obj = {
'id': oozie_coordinator.id,
'status': oozie_coordinator.status,
'progress': oozie_coordinator.get_progress(),
'nextTime': format_time(oozie_coordinator.nextMaterializedTime),
'endTime': format_time(oozie_coordinator.endTime),
'actions': actions,
'show_all_actions': show_all_actions
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
oozie_slas = []
if oozie_coordinator.has_sla:
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_coordinator.id,
'parent_id': oozie_coordinator.id
}
oozie_slas = oozie_api.get_oozie_slas(**params)
enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get()
update_endtime_form = UpdateEndTimeForm()
return render('dashboard/list_oozie_coordinator.mako', request, {
'oozie_coordinator': oozie_coordinator,
'oozie_slas': oozie_slas,
'coordinator': coordinator,
'oozie_bundle': oozie_bundle,
'has_job_edition_permission': has_job_edition_permission,
'show_all_actions': show_all_actions,
'MAX_COORD_ACTIONS': MAX_COORD_ACTIONS,
'enable_cron_scheduling': enable_cron_scheduling,
'update_endtime_form': update_endtime_form,
})
@show_oozie_error
def list_oozie_bundle(request, job_id):
oozie_bundle = check_job_access_permission(request, job_id)
# Cross reference the submission history (if any)
bundle = None
try:
if ENABLE_V2.get():
bundle = get_history().get_bundle_from_config(oozie_bundle.conf_dict)
else:
bundle = get_history().objects.get(oozie_job_id=job_id).job.get_full_node()
except:
pass
if request.GET.get('format') == 'json':
return_obj = {
'id': oozie_bundle.id,
'status': oozie_bundle.status,
'progress': oozie_bundle.get_progress(),
'endTime': format_time(oozie_bundle.endTime),
'actions': massaged_bundle_actions_for_json(oozie_bundle)
}
return HttpResponse(json.dumps(return_obj).replace('\\\\', '\\'), content_type="application/json")
return render('dashboard/list_oozie_bundle.mako', request, {
'oozie_bundle': oozie_bundle,
'bundle': bundle,
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_workflow_action(request, action):
try:
action = get_oozie(request.user).get_action(action)
workflow = check_job_access_permission(request, action.id.split('@')[0])
except RestException, ex:
raise PopupException(_("Error accessing Oozie action %s.") % (action,), detail=ex.message)
oozie_coordinator = None
if request.GET.get('coordinator_job_id'):
oozie_coordinator = check_job_access_permission(request, request.GET.get('coordinator_job_id'))
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
workflow.oozie_coordinator = oozie_coordinator
workflow.oozie_bundle = oozie_bundle
oozie_parent = workflow.get_parent_job_id()
if oozie_parent:
oozie_parent = check_job_access_permission(request, oozie_parent)
return render('dashboard/list_oozie_workflow_action.mako', request, {
'action': action,
'workflow': workflow,
'oozie_coordinator': oozie_coordinator,
'oozie_bundle': oozie_bundle,
'oozie_parent': oozie_parent,
})
@show_oozie_error
def get_oozie_job_log(request, job_id):
oozie_job = check_job_access_permission(request, job_id)
return_obj = {
'id': oozie_job.id,
'status': oozie_job.status,
'log': oozie_job.log,
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
@show_oozie_error
def list_oozie_info(request):
api = get_oozie(request.user)
configuration = api.get_configuration()
oozie_status = api.get_oozie_status()
instrumentation = {}
metrics = {}
if 'org.apache.oozie.service.MetricsInstrumentationService' in [c.strip() for c in configuration.get('oozie.services.ext', '').split(',')]:
api2 = get_oozie(request.user, api_version="v2")
metrics = api2.get_metrics()
else:
instrumentation = api.get_instrumentation()
return render('dashboard/list_oozie_info.mako', request, {
'instrumentation': instrumentation,
'metrics': metrics,
'configuration': configuration,
'oozie_status': oozie_status,
})
@show_oozie_error
def list_oozie_sla(request):
oozie_api = get_oozie(request.user, api_version="v2")
if request.method == 'POST':
params = {}
job_name = request.POST.get('job_name')
if re.match('.*-oozie-oozi-[WCB]', job_name):
params['id'] = job_name
params['parent_id'] = job_name
else:
params['app_name'] = job_name
if 'useDates' in request.POST:
if request.POST.get('start'):
params['nominal_start'] = request.POST.get('start')
if request.POST.get('end'):
params['nominal_end'] = request.POST.get('end')
oozie_slas = oozie_api.get_oozie_slas(**params)
else:
oozie_slas = [] # or get latest?
if request.REQUEST.get('format') == 'json':
massaged_slas = []
for sla in oozie_slas:
massaged_slas.append(massaged_sla_for_json(sla, request))
return HttpResponse(json.dumps({'oozie_slas': massaged_slas}), content_type="text/json")
return render('dashboard/list_oozie_sla.mako', request, {
'oozie_slas': oozie_slas
})
def massaged_sla_for_json(sla, request):
massaged_sla = {
'slaStatus': sla['slaStatus'],
'id': sla['id'],
'appType': sla['appType'],
'appName': sla['appName'],
'appUrl': get_link(sla['id']),
'user': sla['user'],
'nominalTime': sla['nominalTime'],
'expectedStart': sla['expectedStart'],
'actualStart': sla['actualStart'],
'expectedEnd': sla['expectedEnd'],
'actualEnd': sla['actualEnd'],
'jobStatus': sla['jobStatus'],
'expectedDuration': sla['expectedDuration'],
'actualDuration': sla['actualDuration'],
'lastModified': sla['lastModified']
}
return massaged_sla
@show_oozie_error
def rerun_oozie_job(request, job_id, app_path):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
oozie_workflow = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_workflow, request.user)
if request.method == 'POST':
rerun_form = RerunForm(request.POST, oozie_workflow=oozie_workflow)
params_form = ParametersFormSet(request.POST)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
if request.POST['rerun_form_choice'] == 'fail_nodes':
args['fail_nodes'] = 'true'
else:
args['skip_nodes'] = ','.join(rerun_form.cleaned_data['skip_nodes'])
args['deployment_dir'] = app_path
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_workflow(request, job_id, args, mapping)
request.info(_('Workflow re-running.'))
return redirect(reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s %s' % (rerun_form.errors, params_form.errors)))
else:
rerun_form = RerunForm(oozie_workflow=oozie_workflow)
initial_params = ParameterForm.get_initial_params(oozie_workflow.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_job_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_job', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return JsonResponse(popup, safe=False)
def _rerun_workflow(request, oozie_id, run_args, mapping):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, properties=mapping, oozie_id=oozie_id)
job_id = submission.rerun(**run_args)
return job_id
except RestException, ex:
raise PopupException(_("Error re-running workflow %s.") % (oozie_id,),
detail=ex._headers.get('oozie-error-message', ex))
@show_oozie_error
def rerun_oozie_coordinator(request, job_id, app_path):
oozie_coordinator = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_coordinator, request.user)
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
rerun_form = RerunCoordForm(request.POST, oozie_coordinator=oozie_coordinator)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
args['deployment_dir'] = app_path
params = {
'type': 'action',
'scope': ','.join(oozie_coordinator.aggreate(rerun_form.cleaned_data['actions'])),
'refresh': rerun_form.cleaned_data['refresh'],
'nocleanup': rerun_form.cleaned_data['nocleanup'],
}
properties = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_coordinator(request, job_id, args, params, properties)
request.info(_('Coordinator re-running.'))
return redirect(reverse('oozie:list_oozie_coordinator', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s') % smart_unicode(rerun_form.errors))
return list_oozie_coordinator(request, job_id)
else:
rerun_form = RerunCoordForm(oozie_coordinator=oozie_coordinator)
initial_params = ParameterForm.get_initial_params(oozie_coordinator.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_coord_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_coord', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return JsonResponse(popup, safe=False)
def _rerun_coordinator(request, oozie_id, args, params, properties):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, oozie_id=oozie_id, properties=properties)
job_id = submission.rerun_coord(params=params, **args)
return job_id
except RestException, ex:
raise PopupException(_("Error re-running coordinator %s.") % (oozie_id,),
detail=ex._headers.get('oozie-error-message', ex))
@show_oozie_error
def rerun_oozie_bundle(request, job_id, app_path):
oozie_bundle = check_job_access_permission(request, job_id)
check_job_edition_permission(oozie_bundle, request.user)
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
rerun_form = RerunBundleForm(request.POST, oozie_bundle=oozie_bundle)
if sum([rerun_form.is_valid(), params_form.is_valid()]) == 2:
args = {}
args['deployment_dir'] = app_path
params = {
'coord-scope': ','.join(rerun_form.cleaned_data['coordinators']),
'refresh': rerun_form.cleaned_data['refresh'],
'nocleanup': rerun_form.cleaned_data['nocleanup'],
}
if rerun_form.cleaned_data['start'] and rerun_form.cleaned_data['end']:
date = {
'date-scope':
'%(start)s::%(end)s' % {
'start': utc_datetime_format(rerun_form.cleaned_data['start']),
'end': utc_datetime_format(rerun_form.cleaned_data['end'])
}
}
params.update(date)
properties = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
_rerun_bundle(request, job_id, args, params, properties)
request.info(_('Bundle re-running.'))
return redirect(reverse('oozie:list_oozie_bundle', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % (rerun_form.errors,)))
return list_oozie_bundle(request, job_id)
else:
rerun_form = RerunBundleForm(oozie_bundle=oozie_bundle)
initial_params = ParameterForm.get_initial_params(oozie_bundle.conf_dict)
params_form = ParametersFormSet(initial=initial_params)
popup = render('dashboard/rerun_bundle_popup.mako', request, {
'rerun_form': rerun_form,
'params_form': params_form,
'action': reverse('oozie:rerun_oozie_bundle', kwargs={'job_id': job_id, 'app_path': app_path}),
}, force_template=True).content
return JsonResponse(popup, safe=False)
def _rerun_bundle(request, oozie_id, args, params, properties):
try:
submission = Submission(user=request.user, fs=request.fs, jt=request.jt, oozie_id=oozie_id, properties=properties)
job_id = submission.rerun_bundle(params=params, **args)
return job_id
except RestException, ex:
raise PopupException(_("Error re-running bundle %s.") % (oozie_id,),
detail=ex._headers.get('oozie-error-message', ex))
def submit_external_job(request, application_path):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
if params_form.is_valid():
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
application_name = os.path.basename(application_path)
application_class = Bundle if application_name == 'bundle.xml' else Coordinator if application_name == 'coordinator.xml' else get_workflow()
mapping[application_class.get_application_path_key()] = application_path
try:
submission = Submission(request.user, fs=request.fs, jt=request.jt, properties=mapping)
job_id = submission.run(application_path)
except RestException, ex:
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail):
detail = '%s: %s' % (_('The Oozie server is not running'), detail)
LOG.error(smart_str(detail))
raise PopupException(_("Error submitting job %s") % (application_path,), detail=detail)
request.info(_('Oozie job submitted'))
view = 'list_oozie_bundle' if application_name == 'bundle.xml' else 'list_oozie_coordinator' if application_name == 'coordinator.xml' else 'list_oozie_workflow'
return redirect(reverse('oozie:%s' % view, kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % params_form.errors))
else:
parameters = Submission(request.user, fs=request.fs, jt=request.jt).get_external_parameters(application_path)
initial_params = ParameterForm.get_initial_params(parameters)
params_form = ParametersFormSet(initial=initial_params)
popup = render('editor/submit_job_popup.mako', request, {
'params_form': params_form,
'name': _('Job'),
'action': reverse('oozie:submit_external_job', kwargs={'application_path': application_path})
}, force_template=True).content
return JsonResponse(popup, safe=False)
def massaged_workflow_actions_for_json(workflow_actions, oozie_coordinator, oozie_bundle):
actions = []
for action in workflow_actions:
if oozie_coordinator is not None:
setattr(action, 'oozie_coordinator', oozie_coordinator)
if oozie_bundle is not None:
setattr(action, 'oozie_bundle', oozie_bundle)
massaged_action = {
'id': action.id,
'log': action.get_absolute_log_url(),
'url': action.get_absolute_url(),
'name': action.name,
'type': action.type,
'status': action.status,
'externalIdUrl': action.get_external_id_url(),
'externalId': action.externalId,
'startTime': format_time(action.startTime),
'endTime': format_time(action.endTime),
'retries': action.retries,
'errorCode': action.errorCode,
'errorMessage': action.errorMessage,
'transition': action.transition,
'data': action.data,
}
actions.append(massaged_action)
return actions
def massaged_coordinator_actions_for_json(coordinator, oozie_bundle):
coordinator_id = coordinator.id
coordinator_actions = coordinator.get_working_actions()
actions = []
related_job_ids = []
related_job_ids.append('coordinator_job_id=%s' % coordinator_id)
if oozie_bundle is not None:
related_job_ids.append('bundle_job_id=%s' %oozie_bundle.id)
for action in coordinator_actions:
massaged_action = {
'id': action.id,
'url': action.externalId and reverse('oozie:list_oozie_workflow', kwargs={'job_id': action.externalId}) + '?%s' % '&'.join(related_job_ids) or '',
'number': action.actionNumber,
'type': action.type,
'status': action.status,
'externalId': action.externalId or '-',
'externalIdUrl': action.externalId and reverse('oozie:list_oozie_workflow_action', kwargs={'action': action.externalId}) or '',
'nominalTime': format_time(action.nominalTime),
'title': action.title,
'createdTime': format_time(action.createdTime),
'lastModifiedTime': format_time(action.lastModifiedTime),
'errorCode': action.errorCode,
'errorMessage': action.errorMessage,
'missingDependencies': action.missingDependencies
}
actions.append(massaged_action)
# Sorting for Oozie < 4.1 backward compatibility
actions.sort(key=lambda k: k['number'], reverse=True)
return actions
def massaged_bundle_actions_for_json(bundle):
bundle_actions = bundle.get_working_actions()
actions = []
for action in bundle_actions:
massaged_action = {
'id': action.coordJobId,
'url': action.coordJobId and reverse('oozie:list_oozie_coordinator', kwargs={'job_id': action.coordJobId}) + '?bundle_job_id=%s' % bundle.id or '',
'name': action.coordJobName,
'type': action.type,
'status': action.status,
'externalId': action.coordExternalId or '-',
'frequency': action.frequency,
'timeUnit': action.timeUnit,
'nextMaterializedTime': action.nextMaterializedTime,
'concurrency': action.concurrency,
'pauseTime': action.pauseTime,
'user': action.user,
'acl': action.acl,
'timeOut': action.timeOut,
'coordJobPath': action.coordJobPath,
'executionPolicy': action.executionPolicy,
'startTime': action.startTime,
'endTime': action.endTime,
'lastAction': action.lastAction
}
actions.insert(0, massaged_action)
return actions
def format_time(st_time):
if st_time is None:
return '-'
elif type(st_time) == time.struct_time:
return time.strftime("%a, %d %b %Y %H:%M:%S", st_time)
else:
return st_time
def catch_unicode_time(u_time):
if type(u_time) == time.struct_time:
return u_time
else:
return datetime.timetuple(datetime.strptime(u_time, '%a, %d %b %Y %H:%M:%S %Z'))
def massaged_oozie_jobs_for_json(oozie_jobs, user, just_sla=False):
jobs = []
for job in oozie_jobs:
if not just_sla or (just_sla and job.has_sla) and job.appName != 'pig-app-hue-script':
last_modified_time_millis = hasattr(job, 'lastModTime') and job.lastModTime and (time.time() - time.mktime(job.lastModTime)) * 1000 or 0
duration_millis = job.endTime and job.startTime and ((time.mktime(job.endTime) - time.mktime(job.startTime)) * 1000) or 0
massaged_job = {
'id': job.id,
'lastModTime': hasattr(job, 'lastModTime') and job.lastModTime and format_time(job.lastModTime) or None,
'lastModTimeInMillis': last_modified_time_millis,
'lastModTimeFormatted': last_modified_time_millis and format_duration_in_millis(last_modified_time_millis) or None,
'kickoffTime': hasattr(job, 'kickoffTime') and job.kickoffTime and format_time(job.kickoffTime) or '',
'kickoffTimeInMillis': hasattr(job, 'kickoffTime') and job.kickoffTime and time.mktime(catch_unicode_time(job.kickoffTime)) or 0,
'nextMaterializedTime': hasattr(job, 'nextMaterializedTime') and job.nextMaterializedTime and format_time(job.nextMaterializedTime) or '',
'nextMaterializedTimeInMillis': hasattr(job, 'nextMaterializedTime') and job.nextMaterializedTime and time.mktime(job.nextMaterializedTime) or 0,
'timeOut': hasattr(job, 'timeOut') and job.timeOut or None,
'endTime': job.endTime and format_time(job.endTime) or None,
'endTimeInMillis': job.endTime and time.mktime(job.endTime) or 0,
'status': job.status,
'isRunning': job.is_running(),
'duration': duration_millis and format_duration_in_millis(duration_millis) or None,
'durationInMillis': duration_millis,
'appName': job.appName,
'progress': job.get_progress(),
'user': job.user,
'absoluteUrl': job.get_absolute_url(),
'canEdit': has_job_edition_permission(job, user),
'killUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'kill'}),
'suspendUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'suspend'}),
'resumeUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'resume'}),
'created': hasattr(job, 'createdTime') and job.createdTime and format_time(job.createdTime) or '',
'createdInMillis': hasattr(job, 'createdTime') and job.createdTime and time.mktime(catch_unicode_time(job.createdTime)) or 0,
'startTime': hasattr(job, 'startTime') and format_time(job.startTime) or None,
'startTimeInMillis': hasattr(job, 'startTime') and job.startTime and time.mktime(job.startTime) or 0,
'run': hasattr(job, 'run') and job.run or 0,
'frequency': hasattr(job, 'frequency') and Coordinator.CRON_MAPPING.get(job.frequency, job.frequency) or None,
'timeUnit': hasattr(job, 'timeUnit') and job.timeUnit or None,
'parentUrl': hasattr(job, 'parentId') and job.parentId and get_link(job.parentId) or ''
}
jobs.append(massaged_job)
return { 'jobs': jobs }
def check_job_access_permission(request, job_id):
"""
Decorator ensuring that the user has access to the job submitted to Oozie.
Arg: Oozie 'workflow', 'coordinator' or 'bundle' ID.
Return: the Oozie workflow, coordinator or bundle or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
if job_id is not None:
oozie_api = get_oozie(request.user)
if job_id.endswith('W'):
get_job = oozie_api.get_job
elif job_id.endswith('C'):
get_job = oozie_api.get_coordinator
else:
get_job = oozie_api.get_bundle
try:
oozie_job = get_job(job_id)
except RestException, ex:
raise PopupException(_("Error accessing Oozie job %s.") % (job_id,),
detail=ex._headers['oozie-error-message', ''])
if request.user.is_superuser \
or oozie_job.user == request.user.username \
or has_dashboard_jobs_access(request.user):
return oozie_job
else:
message = _("Permission denied. %(username)s does not have the permissions to access job %(id)s.") % \
{'username': request.user.username, 'id': oozie_job.id}
access_warn(request, message)
raise PopupException(message)
def check_job_edition_permission(oozie_job, user):
if has_job_edition_permission(oozie_job, user):
return oozie_job
else:
message = _("Permission denied. %(username)s does not have the permissions to modify job %(id)s.") % \
{'username': user.username, 'id': oozie_job.id}
raise PopupException(message)
def has_job_edition_permission(oozie_job, user):
return user.is_superuser or oozie_job.user == user.username
def has_dashboard_jobs_access(user):
return user.is_superuser or user.has_hue_permission(action="dashboard_jobs_access", app=DJANGO_APPS[0])
|
|
# -- coding: utf-8 --
from __future__ import absolute_import
from unittest import main, TestCase
from tempfile import mkdtemp
from os.path import join, dirname, abspath
from shutil import rmtree, copytree
from re import sub
import random
import sys
from chime.repo_functions import ChimeRepo
from slugify import slugify
import logging
import tempfile
logging.disable(logging.CRITICAL)
repo_root = abspath(join(dirname(__file__), '..'))
sys.path.insert(0, repo_root)
from box.util.rotunicode import RotUnicode
from httmock import response, HTTMock
from mock import MagicMock
from bs4 import Comment
from chime import create_app, repo_functions, google_api_functions, view_functions, constants
from unit.chime_test_client import ChimeTestClient
import codecs
codecs.register(RotUnicode.search_function)
# these patterns help us search the HTML of a response to determine if the expected page loaded
PATTERN_BRANCH_COMMENT = u'<!-- branch: {} -->'
PATTERN_AUTHOR_COMMENT = u'<!-- author: {} -->'
PATTERN_TASK_COMMENT = u'<!-- task: {} -->'
PATTERN_TEMPLATE_COMMENT = u'<!-- template name: {} -->'
PATTERN_FILE_COMMENT = u'<!-- file type: {file_type}, file name: {file_name}, file title: {file_title} -->'
PATTERN_OVERVIEW_ITEM_CREATED = u'<p>The "{created_name}" {created_type} was created by {author_email}.</p>'
PATTERN_OVERVIEW_ACTIVITY_STARTED = u'<p>The "{activity_name}" activity was started by {author_email}.</p>'
PATTERN_OVERVIEW_COMMENT_BODY = u'<div class="comment__body">{comment_body}</div>'
PATTERN_OVERVIEW_ITEM_DELETED = u'<p>The "{deleted_name}" {deleted_type} {deleted_also}was deleted by {author_email}.</p>'
PATTERN_FLASH_TASK_DELETED = u'You deleted the "{description}" activity!'
PATTERN_FLASH_SAVED_CATEGORY = u'<li class="flash flash--notice">Saved changes to the {title} topic! Remember to submit this change for feedback when you\'re ready to go live.</li>'
PATTERN_FLASH_CREATED_CATEGORY = u'Created a new topic named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_CREATED_ARTICLE = u'Created a new article named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_SAVED_ARTICLE = u'Saved changes to the {title} article! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_DELETED_ARTICLE = u'The "{title}" article was deleted! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FORM_CATEGORY_TITLE = u'<input name="en-title" type="text" value="{title}" class="directory-modify__name" placeholder="Crime Statistics and Maps">'
PATTERN_FORM_CATEGORY_DESCRIPTION = u'<textarea name="en-description" class="directory-modify__description" placeholder="Crime statistics and reports by district and map">{description}</textarea>'
# review stuff
PATTERN_UNREVIEWED_EDITS_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Unreviewed Edits</a>'
PATTERN_FEEDBACK_REQUESTED_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Feedback requested</a>'
PATTERN_READY_TO_PUBLISH_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Ready to publish</a>'
class TestProcess (TestCase):
def setUp(self):
self.old_tempdir, tempfile.tempdir = tempfile.tempdir, mkdtemp(prefix='chime-TestProcess-')
self.work_path = mkdtemp(prefix='chime-repo-clones-')
repo_path = dirname(abspath(__file__)) + '/../test-app.git'
upstream_repo_dir = mkdtemp(prefix='repo-upstream-', dir=self.work_path)
upstream_repo_path = join(upstream_repo_dir, 'test-app.git')
copytree(repo_path, upstream_repo_path)
self.upstream = ChimeRepo(upstream_repo_path)
repo_functions.ignore_task_metadata_on_merge(self.upstream)
self.origin = self.upstream.clone(mkdtemp(prefix='repo-origin-', dir=self.work_path), bare=True)
repo_functions.ignore_task_metadata_on_merge(self.origin)
# environ['GIT_AUTHOR_NAME'] = ' '
# environ['GIT_COMMITTER_NAME'] = ' '
# environ['GIT_AUTHOR_EMAIL'] = u'erica@example.com'
# environ['GIT_COMMITTER_EMAIL'] = u'erica@example.com'
create_app_environ = {}
create_app_environ['GA_CLIENT_ID'] = 'client_id'
create_app_environ['GA_CLIENT_SECRET'] = 'meow_secret'
self.ga_config_dir = mkdtemp(prefix='chime-config-', dir=self.work_path)
create_app_environ['RUNNING_STATE_DIR'] = self.ga_config_dir
create_app_environ['WORK_PATH'] = self.work_path
create_app_environ['REPO_PATH'] = self.origin.working_dir
create_app_environ['AUTH_DATA_HREF'] = 'http://example.com/auth.csv'
create_app_environ['BROWSERID_URL'] = 'http://localhost'
create_app_environ['LIVE_SITE_URL'] = 'http://example.org/'
create_app_environ['SUPPORT_EMAIL_ADDRESS'] = u'support@example.com'
create_app_environ['SUPPORT_PHONE_NUMBER'] = u'(123) 456-7890'
self.app = create_app(create_app_environ)
# write a tmp config file
config_values = {
"access_token": "meowser_token",
"refresh_token": "refresh_meows",
"profile_id": "12345678",
"project_domain": ""
}
with self.app.app_context():
google_api_functions.write_ga_config(config_values, self.app.config['RUNNING_STATE_DIR'])
random.choice = MagicMock(return_value="P")
def tearDown(self):
rmtree(tempfile.tempdir)
tempfile.tempdir = self.old_tempdir
def auth_csv_example_allowed(self, url, request):
if url.geturl() == 'http://example.com/auth.csv':
return response(200, '''Email domain,Organization\nexample.com,Example Org''')
raise Exception('Asked for unknown URL ' + url.geturl())
def mock_persona_verify_erica(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "erica@example.com"}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_frances(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "frances@example.com"}''')
else:
return self.auth_csv_example_allowed(url, request)
# in TestProcess
def test_editing_process_with_two_users(self):
''' Check edit process with a user looking at feedback from another user.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Ninjas', 'Flipping Out', 'So Awesome'
branch_name = erica.quick_activity_setup(*args)
# Edit the new article.
erica.edit_article('So, So Awesome', 'It was the best of times.')
# Ask for feedback
erica.follow_link('/tree/{}/'.format(branch_name))
erica.request_feedback('Is this okay?')
#
# Switch users and comment on the activity.
#
frances.open_link(erica.path)
frances.leave_feedback('It is super-great.')
#
# Switch back and look for that bit of feedback.
#
erica.reload()
words = erica.soup.find(text='It is super-great.')
comment = words.find_parent('div').find_parent('div')
author = comment.find(text='frances@example.com')
self.assertTrue(author is not None)
# in TestProcess
def test_editing_process_with_two_categories(self):
''' Check edit process with a user looking at activity from another user.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Erica starts a new task, "Diving for Dollars".
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task('Diving for Dollars')
erica_branchname = erica.get_branch_name()
# Erica creates a new category and asks for feedback.
erica.follow_link('/tree/{}/edit/other/'.format(erica_branchname))
erica.add_category('Dollars')
erica.follow_link('/tree/{}/'.format(erica_branchname))
erica.request_feedback('Is this okay?')
# Frances starts a new task, "Bobbing for Apples".
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task('Bobbing for Apples')
frances_branchname = frances.get_branch_name()
# Frances creates a new category.
frances.follow_link('/tree/{}/edit/other/'.format(frances_branchname))
frances.add_category('Apples')
# Frances approves Erica's new work and publishes it.
frances.open_link(erica.path)
frances.leave_feedback('It is super-great.')
frances.approve_activity()
frances.publish_activity()
# Erica should now expect to see her own new category.
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task('Canticle for Leibowitz')
erica_branchname2 = erica.get_branch_name()
erica.follow_link('/tree/{}/edit/other/'.format(erica_branchname2))
self.assertIsNotNone(erica.soup.find(text='Dollars'), 'Should see first published category')
# Frances should still not expect to see Erica's published category.
frances.open_link('/tree/{}/edit/'.format(frances_branchname))
frances.follow_link('/tree/{}/edit/other/'.format(frances_branchname))
self.assertIsNone(frances.soup.find(text='Dollars'), 'Should not see first published category')
# in TestProcess
def test_notified_when_saving_article_in_published_activity(self):
''' You're notified and redirected when trying to save an article in a published activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Ninjas', 'Flipping Out', 'So Awesome'
branch_name = erica.quick_activity_setup(*args)
# Edit the new article.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
article_path = erica.path
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(branch_name))
erica.request_feedback(comment_text='Is this okay?')
# Re-load the article page
erica.open_link(article_path)
#
# Switch users and publish the activity.
#
frances.open_link(url='/tree/{}/'.format(branch_name))
frances.leave_feedback(comment_text='It is super-great.')
frances.approve_activity()
frances.publish_activity()
#
# Switch back and try to make another edit.
#
erica.edit_article(title_str='Just Awful', body_str='It was the worst of times.')
# we should've been redirected to the activity overview page
self.assertEqual(erica.path, '/tree/{}/'.format(branch_name))
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# in TestProcess
def test_published_branch_not_resurrected_on_save(self):
''' Saving a change on a branch that exists locally but isn't at origin because it was published doesn't re-create the branch.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task, topic, subtopic, article
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Squeeze A School Of Fish Into A Bait Ball for Dolphins'
article_name = u'Stunned Fish'
args = task_description, u'Plowing Through', u'Feeding On', article_name
branch_name = erica.quick_activity_setup(*args)
article_path = erica.path
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(branch_name))
erica.request_feedback(comment_text='Is this okay?')
# Re-load the article page
erica.open_link(url=article_path)
# verify that the branch exists locally and remotely
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
self.assertTrue(branch_name in repo.branches)
# there's a remote branch with the branch name, but no tag
self.assertFalse('refs/tags/{}'.format(branch_name) in repo.git.ls_remote('origin', branch_name).split())
self.assertTrue('refs/heads/{}'.format(branch_name) in repo.git.ls_remote('origin', branch_name).split())
#
# Switch to frances, approve and publish erica's changes
#
frances.open_link(url='/tree/{}/'.format(branch_name))
frances.leave_feedback(comment_text='It is perfect.')
frances.approve_activity()
frances.publish_activity()
#
# Switch to erica, try to submit an edit to the article
#
erica.edit_article(title_str=article_name, body_str=u'Chase fish into shallow water to catch them.')
# we should've been redirected to the activity overview page
self.assertEqual(erica.path, '/tree/{}/'.format(branch_name))
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# verify that the branch exists locally and not remotely
self.assertTrue(branch_name in repo.branches)
# there's a remote tag with the branch name, but no branch
self.assertTrue('refs/tags/{}'.format(branch_name) in repo.git.ls_remote('origin', branch_name).split())
self.assertFalse('refs/heads/{}'.format(branch_name) in repo.git.ls_remote('origin', branch_name).split())
# in TestProcess
def test_notified_when_browsing_in_published_activity(self):
''' You're notified and redirected when trying to browse a published activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description='Eating Carrion for Vultures')
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a new category
category_name = u'Forage'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(erica_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users
#
# approve and publish erica's changes
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is perfect.')
frances.approve_activity()
frances.publish_activity()
#
# Switch users
#
# try to open an edit page (but anticipate a redirect)
erica.open_link(url='/tree/{}/edit/other/{}/'.format(erica_branch_name, category_slug), expected_status_code=303)
# we should've been redirected to the activity overview page
self.assertEqual(erica.path, '/tree/{}/'.format(erica_branch_name))
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# in TestProcess
def test_editing_process_with_conflicting_edit(self):
''' Check edit process with a user attempting to change an activity with a conflict.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Bobbing for Apples', 'Ninjas', 'Flipping Out', 'So Awesome'
f_branch_name = frances.quick_activity_setup(*args)
f_article_path = frances.path
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Ninjas', 'Flipping Out', 'So Awesome'
e_branch_name = erica.quick_activity_setup(*args)
# Edit the new article.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(e_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users and publish the activity.
#
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is super-great.')
frances.approve_activity()
frances.publish_activity()
#
# Now introduce a conflicting change on the original activity,
# and verify that the expected flash warning is displayed.
#
frances.open_link(f_article_path)
frances.edit_article(title_str='So, So Awful', body_str='It was the worst of times.')
self.assertIsNotNone(frances.soup.find(text=repo_functions.MERGE_CONFLICT_WARNING_FLASH_MESSAGE),
'Should see a warning about the conflict above the article.')
frances.follow_link(href='/tree/{}/'.format(f_branch_name))
self.assertIsNotNone(frances.soup.find(text=repo_functions.MERGE_CONFLICT_WARNING_FLASH_MESSAGE),
'Should see a warning about the conflict in the activity history.')
# in TestProcess
def test_editing_process_with_conflicting_edit_but_no_publish(self):
''' Check edit process with a user attempting to change an activity with a conflict.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Frances: Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Bobbing for Apples', 'Ninjas', 'Flipping Out', 'So Awesome'
frances.quick_activity_setup(*args)
# Erica: Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Ninjas', 'Flipping Out', 'So Awesome'
erica.quick_activity_setup(*args)
# Erica edits the new article.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
# Frances edits the new article.
frances.edit_article(title_str='So, So Awful', body_str='It was the worst of times.')
def test_editing_process_with_nonconflicting_edit(self):
''' Check edit process with a user attempting to change an activity with no conflict.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Bobbing for Apples', 'Ninjas', 'Flipping Out', 'So Awesome'
f_branch_name = frances.quick_activity_setup(*args)
f_article_path = frances.path
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Diving for Dollars', 'Samurai', 'Flipping Out', 'So Awesome'
e_branch_name = erica.quick_activity_setup(*args)
# Edit the new article.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(e_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users and publish the activity.
#
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is super-great.')
frances.approve_activity()
frances.publish_activity()
#
# Now introduce a conflicting change on the original activity,
# and verify that the expected flash warning is displayed.
#
frances.open_link(f_article_path)
frances.edit_article(title_str='So, So Awful', body_str='It was the worst of times.')
self.assertIsNone(frances.soup.find(text=repo_functions.UPSTREAM_EDIT_INFO_FLASH_MESSAGE),
'Should not see a warning about the conflict in the activity history.')
frances.follow_link(href='/tree/{}/'.format(f_branch_name))
self.assertIsNone(frances.soup.find(text=repo_functions.UPSTREAM_EDIT_INFO_FLASH_MESSAGE),
'Should not see a warning about the conflict in the activity history.')
# in TestProcess
def test_editing_process_with_conflicting_edit_on_same_article(self):
''' Two people editing the same article in the same branch get a useful error.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Frances: Start a new task, topic, subtopic, article
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Triassic for Artemia', 'Biological', 'Toxicity', 'Assays'
frances.quick_activity_setup(*args)
branch_name = frances.get_branch_name()
# Frances and Erica load the same article
erica.open_link(frances.path)
# Erica edits the new article.
erica.edit_article(title_str='Assays', body_str='Broad leaf-like appendages')
# Frances edits the same article and gets an error
frances.edit_article(title_str='Assays', body_str='Typical primitive arthropod')
# we can't get the date exactly right, so test for every other part of the message
message_edited = view_functions.MESSAGE_PAGE_EDITED.format(published_date=u'xxx', published_by=erica_email)
message_edited_split = message_edited.split(u'xxx')
for part in message_edited_split:
self.assertIsNotNone(frances.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# Frances successfully browses elsewhere in the activity
frances.open_link(url='/tree/{}/'.format(branch_name))
# Frances successfully deletes the task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=branch_name)
# Frances successfully creates a new task
frances.start_task(description='Narrow Braincase for Larger Carnassials')
# in TestProcess
def test_task_not_marked_published_after_merge_conflict(self):
''' When publishing an activity results in a merge conflict, it shouldn't be marked published.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description='Eating Carrion for Vultures')
erica_branch_name = erica.get_branch_name()
# Look for an "other" link that we know about - is it a category?
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a new category, subcategory, and article
erica.add_category(category_name=u'Forage')
erica.add_subcategory(subcategory_name='Dead Animals')
erica.add_article(article_name='Dingos')
# Edit the new article.
erica.edit_article(title_str='Dingos', body_str='Canis Lupus Dingo')
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(erica_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users
#
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task(description='Flying in Circles for Vultures')
frances_branch_name = frances.get_branch_name()
# Look for an "other" link that we know about - is it a category?
frances.follow_link(href='/tree/{}/edit/other/'.format(frances_branch_name))
# Create a duplicate new category, subcategory, and article
frances.add_category(category_name=u'Forage')
frances.add_subcategory(subcategory_name='Dead Animals')
frances.add_article(article_name='Dingos')
# Edit the new article.
frances.edit_article(title_str='Dingos', body_str='Apex Predator')
# Ask for feedback
frances.follow_link(href='/tree/{}/'.format(frances_branch_name))
frances.request_feedback(comment_text='Is this okay?')
frances_overview_path = frances.path
# frances approves and publishes erica's changes
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is perfect.')
frances.approve_activity()
frances.publish_activity()
# erica approves and publishes frances's changes
erica.open_link(url=frances_overview_path)
erica.leave_feedback(comment_text='It is not bad.')
erica.approve_activity()
erica.publish_activity(expected_status_code=500)
# we got a 500 error page about a merge conflict
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.find_all(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'error-500') in comments)
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'a' and u'MergeConflict' in tag['href']))
# re-load the overview page
erica.open_link(url=frances_overview_path)
# verify that the publish button is still available
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'button' and tag['value'] == u'Publish'))
# in TestProcess
def test_redirect_to_overview_when_branch_published(self):
''' When you're working in a published branch and don't have a local copy, you're redirected to
that activity's overview page.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description='Eating Carrion for Vultures')
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a new category
category_name = u'Forage'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(erica_branch_name))
erica.request_feedback(comment_text='Is this okay?')
#
# Switch users
#
# approve and publish erica's changes
frances.open_link(url=erica.path)
frances.leave_feedback(comment_text='It is perfect.')
frances.approve_activity()
frances.publish_activity()
# delete all trace of the branch locally
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
repo.git.checkout('master')
repo.git.branch('-D', erica_branch_name)
repo.git.remote('prune', 'origin')
#
# Switch users
#
# load an edit page
erica.open_link(url='/tree/{}/edit/other/{}/'.format(erica_branch_name, category_slug), expected_status_code=303)
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# the overview page was loaded
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.find_all(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'activity-overview') in comments)
# in TestProcess
def test_notified_when_working_in_deleted_task(self):
''' When someone else deletes a task you're working in, you're notified.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Eating Carrion for Vultures'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
#
# Switch users
#
# delete erica's task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=erica_branch_name)
self.assertEqual(PATTERN_FLASH_TASK_DELETED.format(description=task_description), frances.soup.find('li', class_='flash').text)
#
# Switch users
#
# load an edit page
erica.open_link(url='/tree/{}/edit/other/'.format(erica_branch_name))
# a warning is flashed about working in a deleted branch
self.assertIsNotNone(erica.soup.find(text=view_functions.MESSAGE_ACTIVITY_DELETED))
# in TestProcess
def test_page_not_found_when_branch_deleted(self):
''' When you're working in a deleted branch and don't have a local copy, you get a 404 error
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Eating Carrion for Vultures'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
#
# Switch users
#
# delete erica's task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=erica_branch_name)
self.assertEqual(PATTERN_FLASH_TASK_DELETED.format(description=task_description), frances.soup.find('li', class_='flash').text)
# delete all trace of the branch locally
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
repo.git.checkout('master')
repo.git.branch('-D', erica_branch_name)
repo.git.remote('prune', 'origin')
#
# Switch users
#
# load an edit page
erica.open_link(url='/tree/{}/edit/other/'.format(erica_branch_name), expected_status_code=404)
# a warning is flashed about working in a deleted branch
self.assertIsNotNone(erica.soup.find(text=view_functions.MESSAGE_ACTIVITY_DELETED))
# the 404 page was loaded
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.find_all(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'error-404') in comments)
# in TestProcess
def test_deleted_branch_not_resurrected_on_save(self):
''' Saving a change on a branch that exists locally but isn't at origin because it was deleted doesn't re-create the branch.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Squeeze A School Of Fish Into A Bait Ball for Dolphins'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a category, subcategory, and article
article_name = u'Stunned Fish'
erica.add_category(category_name=u'Plowing Through')
erica.add_subcategory(subcategory_name=u'Feeding On')
erica.add_article(article_name=article_name)
erica_article_path = erica.path
# verify that the branch exists locally and remotely
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
self.assertTrue(erica_branch_name in repo.branches)
self.assertIsNotNone(repo_functions.get_branch_if_exists_at_origin(clone=repo, default_branch_name='master', new_branch_name=erica_branch_name))
#
# Switch users
#
# delete erica's task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=erica_branch_name)
self.assertEqual(PATTERN_FLASH_TASK_DELETED.format(description=task_description), frances.soup.find('li', class_='flash').text)
#
# Switch users
#
# load the article edit page
erica.open_link(url=erica_article_path)
# a warning is flashed about working in a deleted branch
self.assertIsNotNone(erica.soup.find(text=view_functions.MESSAGE_ACTIVITY_DELETED))
# try to save an edit to the article
erica.edit_article(title_str=article_name, body_str=u'Chase fish into shallow water to catch them.')
# we're in the article-edit template
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.find_all(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'article-edit') in comments)
# a warning is flashed about working in a deleted branch
self.assertIsNotNone(erica.soup.find(text=view_functions.MESSAGE_ACTIVITY_DELETED))
# verify that the branch exists locally and not remotely
self.assertTrue(erica_branch_name in repo.branches)
self.assertIsNone(repo_functions.get_branch_if_exists_at_origin(clone=repo, default_branch_name='master', new_branch_name=erica_branch_name))
# in TestProcess
def test_forms_for_changes_in_active_task(self):
''' When working in an active (not published or deleted) task, forms or form buttons that allow
you to make changes are visible.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Eating Carrion for Vultures'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a category, sub-category, article
category_name = u'Antennae Segments'
category_slug = slugify(category_name)
subcategory_name = u'Short Ovipositors'
article_name = u'Inject Eggs Directly Into a Host Body'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
erica.add_article(article_name=article_name)
article_path = erica.path
#
# All the edit forms and buttons are there as expected
#
# load an edit page
erica.open_link(url=subcategory_path)
# the drop-down comment form is there
review_modal = erica.soup.find(lambda tag: bool(tag.name == 'form' and 'review-modal' in tag.get('class')))
self.assertIsNotNone(review_modal)
# the add new topic, subtopic, and article fields is there
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add topic')))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add subtopic')))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add article')))
# there's an edit (pencil) button on the category or subcategory, and a delete (trashcan) button on the article
topic_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == category_name)).find_parent('li')
self.assertIsNotNone(topic_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class'))))
subtopic_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == subcategory_name)).find_parent('li')
self.assertIsNotNone(subtopic_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class'))))
article_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == article_name)).find_parent('li')
self.assertIsNotNone(article_li.find(lambda tag: bool(tag.name == 'span' and 'fa-trash' in tag.get('class'))))
# load a modify page
index_filename = u'index.{}'.format(constants.CONTENT_FILE_EXTENSION)
erica.open_link(url='/tree/{}/edit/other/{}'.format(erica_branch_name, join(category_slug, index_filename)))
# there's a save and delete button on the modify category form
modify_form = erica.soup.find('textarea', attrs={'name': 'en-description'}).find_parent('form')
delete_button = modify_form.find('button', attrs={'value': 'delete_category'})
save_button = modify_form.find('button', attrs={'value': 'save_category'})
self.assertIsNotNone(delete_button)
self.assertIsNotNone(save_button)
# load an article edit page
erica.open_link(url=article_path)
# there's a save button on the edit form
edit_form = erica.soup.find(lambda tag: bool(tag.name == 'form' and u'/tree/{}/save/'.format(erica_branch_name) in tag.get('action')))
save_button = edit_form.find('button', value='Save')
self.assertIsNotNone(save_button)
# in TestProcess
def test_no_forms_for_changes_in_inactive_task(self):
''' When working in an inactive (published or deleted) task, forms or form buttons that would
allow you to make changes are hidden.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'Eating Carrion for Vultures'
erica.start_task(description=task_description)
erica_branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(erica_branch_name))
# Create a category, sub-category, article
category_name = u'Antennae Segments'
category_slug = slugify(category_name)
subcategory_name = u'Short Ovipositors'
article_name = u'Inject Eggs Directly Into a Host Body'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
erica.add_article(article_name=article_name)
article_path = erica.path
#
# Switch users
#
# delete erica's task
frances.open_link(url=constants.ROUTE_ACTIVITY)
frances.delete_task(branch_name=erica_branch_name)
self.assertEqual(PATTERN_FLASH_TASK_DELETED.format(description=task_description), frances.soup.find('li', class_='flash').text)
#
# Switch users
#
# load an edit page
erica.open_link(url=subcategory_path)
# the drop-down comment form isn't there
review_modal = erica.soup.find(lambda tag: bool(tag.name == 'form' and 'review-modal' in tag.get('class')))
self.assertIsNone(review_modal)
# the add new topic, subtopic, and article fields aren't there
self.assertIsNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add topic')))
self.assertIsNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add subtopic')))
self.assertIsNone(erica.soup.find(lambda tag: bool(tag.name == 'input' and tag.get('placeholder') == 'Add article')))
# there's no edit (pencil) button on the category or subcategory, and no delete (trashcan) button on the article
topic_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == category_name)).find_parent('li')
self.assertIsNone(topic_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class'))))
subtopic_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == subcategory_name)).find_parent('li')
self.assertIsNone(subtopic_li.find(lambda tag: bool(tag.name == 'span' and 'fa-pencil' in tag.get('class'))))
article_li = erica.soup.find(lambda tag: bool(tag.name == 'a' and tag.text == article_name)).find_parent('li')
self.assertIsNone(article_li.find(lambda tag: bool(tag.name == 'span' and 'fa-trash' in tag.get('class'))))
# load a modify page
index_filename = u'index.{}'.format(constants.CONTENT_FILE_EXTENSION)
erica.open_link(url='/tree/{}/edit/other/{}'.format(erica_branch_name, join(category_slug, index_filename)))
# there's no save or delete button on the modify category form
modify_form = erica.soup.find('textarea', attrs={'name': 'en-description'}).find_parent('form')
delete_button = modify_form.find('button', attrs={'value': 'delete_category'})
save_button = modify_form.find('button', attrs={'value': 'save_category'})
self.assertIsNone(delete_button)
self.assertIsNone(save_button)
# load an article edit page
erica.open_link(url=article_path)
# there's no save button on the edit form
edit_form = erica.soup.find(lambda tag: bool(tag.name == 'form' and u'/tree/{}/save/'.format(erica_branch_name) in tag.get('action')))
save_button = edit_form.find('button', value='Save')
self.assertIsNone(save_button)
# in TestProcess
def test_editing_out_of_date_article(self):
''' Check edit process with a user attempting to edit an out-of-date article.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
args = 'Bobbing for Apples', 'Ninjas', 'Flipping Out', 'So Awesome'
frances.quick_activity_setup(*args)
frances.edit_article(title_str='So, So Awesome', body_str='It was the best of times.')
# Erica now opens the article that Frances started.
erica.open_link(frances.path)
# Frances starts a different article.
frances.open_link(dirname(dirname(frances.path)) + '/')
frances.add_article('So Terrible')
# Meanwhile, Erica completes her edits.
erica.edit_article(title_str='So, So Awesome', body_str='It was the best of times.\n\nBut also the worst of times.')
# in TestProcess
def test_published_activity_history_accuracy(self):
''' A published activity's history is constructed as expected.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(email=erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(email=frances_email)
# Erica starts a new task, topic, sub-topic, article
erica.open_link(constants.ROUTE_ACTIVITY)
activity_description = u'Reef-Associated Roving Coralgroupers'
topic_name = u'Plectropomus Pessuliferus'
subtopic_name = u'Recruit Giant Morays'
article_name = u'In Hunting For Food'
args = activity_description, topic_name, subtopic_name, article_name
branch_name = erica.quick_activity_setup(*args)
# edit the article
erica.edit_article(title_str=article_name, body_str=u'This is the only known instance of interspecies cooperative hunting among fish.')
# Load the activity overview page
erica.open_link(url='/tree/{}/'.format(branch_name))
# Leave a comment
comment_body = u'The invitation to hunt is initiated by head-shaking.'
erica.leave_feedback(comment_text=comment_body)
# Request feedback
erica.request_feedback()
#
# Switch users and publish the activity.
#
frances.open_link(url=erica.path)
frances.approve_activity()
frances.publish_activity()
#
# Switch users and load the activity page.
#
erica.open_link(url=constants.ROUTE_ACTIVITY)
# verify that the project is listed in the recently published column
pub_ul = erica.soup.select("#activity-list-published")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=activity_description))
# load the published activitiy's overview page
erica.open_link(url='/tree/{}/'.format(branch_name))
# a warning is flashed about working in a published branch
# we can't get the date exactly right, so test for every other part of the message
message_published = view_functions.MESSAGE_ACTIVITY_PUBLISHED.format(published_date=u'xxx', published_by=frances_email)
message_published_split = message_published.split(u'xxx')
for part in message_published_split:
self.assertIsNotNone(erica.soup.find(lambda tag: tag.name == 'li' and part in tag.text))
# there is a summary
summary_div = erica.soup.find("div", {"data-test-id": "summary-div"})
self.assertIsNotNone(summary_div)
# it's right about what's changed
self.assertIsNotNone(summary_div.find(lambda tag: bool(tag.name == 'p' and '1 article and 2 topics have been changed' in tag.text)))
# grab all the list items and make sure they match what we did above
check_rows = summary_div.find_all('li')
# the link to create a new change
change_row = check_rows.pop()
self.assertIsNotNone(change_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(change_row.find("a", {"data-test-id": "change-link"}).text, constants.TEXT_ADD_CHANGE)
# the topic creation
category_row = check_rows.pop()
self.assertIsNotNone(category_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(category_row.find('h3', {"data-test-id": "change-title"}).text, topic_name)
self.assertEqual(category_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(category_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
# the subtopic creation
subcategory_row = check_rows.pop()
self.assertIsNotNone(subcategory_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(subcategory_row.find('h3', {"data-test-id": "change-title"}).text, subtopic_name)
self.assertEqual(subcategory_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(subcategory_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
# the article creation & edit
article_1_row = check_rows.pop()
self.assertIsNotNone(article_1_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(article_1_row.find('h3', {"data-test-id": "change-title"}).text, article_name)
self.assertEqual(article_1_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.ARTICLE_LAYOUT].title())
self.assertEqual(article_1_row.find('p', {"data-test-id": "change-actions"}).text, u'Created, Edited')
# no rows left
self.assertEqual(len(check_rows), 0)
# also check the full history
history_div = erica.soup.find("div", class_="activity-log")
check_rows = history_div.find_all('div', class_='activity-log-item')
self.assertEqual(len(check_rows), 9)
# activity started
started_row = check_rows.pop()
# The "Reef-Associated Roving Coralgroupers" activity was started by erica@example.com.
self.assertEqual(started_row.find('p').text.strip(), u'The "{}" {} by {}.'.format(activity_description, repo_functions.ACTIVITY_CREATED_MESSAGE, erica_email))
topic_row = check_rows.pop()
# The "Plectropomus Pessuliferus" topic was created by erica@example.com.
self.assertEqual(topic_row.find('p').text.strip(), u'The "{}" topic was created by {}.'.format(topic_name, erica_email))
subtopic_row = check_rows.pop()
# The "Recruit Giant Morays" topic was created by erica@example.com.
self.assertEqual(subtopic_row.find('p').text.strip(), u'The "{}" topic was created by {}.'.format(subtopic_name, erica_email))
article_created_row = check_rows.pop()
# The "In Hunting For Food" article was created by erica@example.com.
self.assertEqual(article_created_row.find('p').text.strip(), u'The "{}" article was created by {}.'.format(article_name, erica_email))
article_edited_row = check_rows.pop()
# The "In Hunting For Food" article was edited by erica@example.com.
self.assertEqual(article_edited_row.find('p').text.strip(), u'The "{}" article was edited by {}.'.format(article_name, erica_email))
comment_row = check_rows.pop()
self.assertEqual(comment_row.find('div', class_='comment__author').text, erica_email)
self.assertEqual(comment_row.find('div', class_='comment__body').text, comment_body)
feedback_requested_row = check_rows.pop()
# erica@example.com requested feedback on this activity.
self.assertEqual(feedback_requested_row.find('p').text.strip(), u'{} {}'.format(erica_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE))
endorsed_row = check_rows.pop()
# frances@example.com endorsed this activity.
self.assertEqual(endorsed_row.find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE))
published_row = check_rows.pop()
# frances@example.com published this activity.
self.assertEqual(published_row.find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_PUBLISHED_MESSAGE))
# in TestProcess
def test_published_activities_dont_mix_histories(self):
''' The histories of two published activities that were worked on simultaneously don't leak into each other.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(email=erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(email=frances_email)
# Erica starts two new tasks
erica.open_link(constants.ROUTE_ACTIVITY)
first_activity_description = u'Use Gestures To Coordinate Hunts'
first_branch_name = erica.quick_activity_setup(first_activity_description)
first_edit_path = erica.path
erica.open_link(constants.ROUTE_ACTIVITY)
second_activity_description = u'Come To The Coral Trout\'s Aid When Signalled'
second_branch_name = erica.quick_activity_setup(second_activity_description)
second_edit_path = erica.path
# Erica creates a new topic in the two tasks
erica.open_link(first_edit_path)
first_topic_name = u'Plectropomus Leopardus'
erica.add_category(category_name=first_topic_name)
erica.open_link(second_edit_path)
second_topic_name = u'Cheilinus Undulatus'
erica.add_category(category_name=second_topic_name)
# Erica leaves comments on the two tasks and requests feedback
erica.open_link(url='/tree/{}/'.format(first_branch_name))
first_comment_body = u'Testing their interactions with Napolean wrasse decoys.'
erica.leave_feedback(comment_text=first_comment_body)
# Request feedback
erica.request_feedback()
erica.open_link(url='/tree/{}/'.format(second_branch_name))
second_comment_body = u'The "good" wrasse would come to the trout\'s aid when signalled, whereas the "bad" one would swim in the opposite direction.'
erica.leave_feedback(comment_text=second_comment_body)
# Request feedback
erica.request_feedback()
#
# Switch users and publish the activities.
#
frances.open_link(url='/tree/{}/'.format(first_branch_name))
frances.approve_activity()
frances.publish_activity()
frances.open_link(url='/tree/{}/'.format(second_branch_name))
frances.approve_activity()
frances.publish_activity()
#
# Switch users and check the first overview page.
#
erica.open_link(url='/tree/{}/'.format(first_branch_name))
# there is a summary
summary_div = erica.soup.find("div", {"data-test-id": "summary-div"})
self.assertIsNotNone(summary_div)
# it's right about what's changed
self.assertIsNotNone(summary_div.find(lambda tag: bool(tag.name == 'p' and '1 topic has been changed' in tag.text)))
# grab all the list items and make sure they match what we did above
check_rows = summary_div.find_all('li')
# the link to create a new change
change_row = check_rows.pop()
self.assertIsNotNone(change_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(change_row.find("a", {"data-test-id": "change-link"}).text, constants.TEXT_ADD_CHANGE)
# the topic creation
category_row = check_rows.pop()
self.assertIsNotNone(category_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(category_row.find('h3', {"data-test-id": "change-title"}).text, first_topic_name)
self.assertEqual(category_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(category_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
# no rows left
self.assertEqual(len(check_rows), 0)
# also check the full history
history_div = erica.soup.find("div", class_="activity-log")
check_rows = history_div.find_all('div', class_='activity-log-item')
self.assertEqual(len(check_rows), 6)
# The "Use Gestures To Coordinate Hunts" activity was started by erica@example.com.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'The "{}" {} by {}.'.format(first_activity_description, repo_functions.ACTIVITY_CREATED_MESSAGE, erica_email))
# The "Plectropomus Leopardus" topic was created by erica@example.com.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'The "{}" topic was created by {}.'.format(first_topic_name, erica_email))
# Testing their interactions with Napolean wrasse decoys.
self.assertEqual(check_rows.pop().find('div', class_='comment__body').text, first_comment_body)
# erica@example.com requested feedback on this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(erica_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE))
# frances@example.com endorsed this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE))
# frances@example.com published this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_PUBLISHED_MESSAGE))
#
# Check the second overview page.
#
erica.open_link(url='/tree/{}/'.format(second_branch_name))
# there is a summary
summary_div = erica.soup.find("div", {"data-test-id": "summary-div"})
self.assertIsNotNone(summary_div)
# it's right about what's changed
self.assertIsNotNone(summary_div.find(lambda tag: bool(tag.name == 'p' and '1 topic has been changed' in tag.text)))
# grab all the list items and make sure they match what we did above
check_rows = summary_div.find_all('li')
# the link to create a new change
change_row = check_rows.pop()
self.assertIsNotNone(change_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(change_row.find("a", {"data-test-id": "change-link"}).text, constants.TEXT_ADD_CHANGE)
# the topic creation
category_row = check_rows.pop()
self.assertIsNotNone(category_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(category_row.find('h3', {"data-test-id": "change-title"}).text, second_topic_name)
self.assertEqual(category_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(category_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
# no rows left
self.assertEqual(len(check_rows), 0)
# also check the full history
history_div = erica.soup.find("div", class_="activity-log")
check_rows = history_div.find_all('div', class_='activity-log-item')
self.assertEqual(len(check_rows), 6)
# The "Use Gestures To Coordinate Hunts" activity was started by erica@example.com.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'The "{}" {} by {}.'.format(second_activity_description, repo_functions.ACTIVITY_CREATED_MESSAGE, erica_email))
# The "Plectropomus Leopardus" topic was created by erica@example.com.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'The "{}" topic was created by {}.'.format(second_topic_name, erica_email))
# Testing their interactions with Napolean wrasse decoys.
self.assertEqual(check_rows.pop().find('div', class_='comment__body').text, second_comment_body)
# erica@example.com requested feedback on this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(erica_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE))
# frances@example.com endorsed this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE))
# frances@example.com published this activity.
self.assertEqual(check_rows.pop().find('p').text.strip(), u'{} {}'.format(frances_email, repo_functions.ACTIVITY_PUBLISHED_MESSAGE))
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.documentai_v1beta2.types import document
from google.cloud.documentai_v1beta2.types import document_understanding
from google.longrunning import operations_pb2 # type: ignore
from .base import DocumentUnderstandingServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import DocumentUnderstandingServiceGrpcTransport
class DocumentUnderstandingServiceGrpcAsyncIOTransport(
DocumentUnderstandingServiceTransport
):
"""gRPC AsyncIO backend transport for DocumentUnderstandingService.
Service to parse structured information from unstructured or
semi-structured documents using state-of-the-art Google AI such
as natural language, computer vision, and translation.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "documentai.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "documentai.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def batch_process_documents(
self,
) -> Callable[
[document_understanding.BatchProcessDocumentsRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch process documents method over gRPC.
LRO endpoint to batch process many documents. The output is
written to Cloud Storage as JSON in the [Document] format.
Returns:
Callable[[~.BatchProcessDocumentsRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_process_documents" not in self._stubs:
self._stubs["batch_process_documents"] = self.grpc_channel.unary_unary(
"/google.cloud.documentai.v1beta2.DocumentUnderstandingService/BatchProcessDocuments",
request_serializer=document_understanding.BatchProcessDocumentsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_process_documents"]
@property
def process_document(
self,
) -> Callable[
[document_understanding.ProcessDocumentRequest], Awaitable[document.Document]
]:
r"""Return a callable for the process document method over gRPC.
Processes a single document.
Returns:
Callable[[~.ProcessDocumentRequest],
Awaitable[~.Document]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "process_document" not in self._stubs:
self._stubs["process_document"] = self.grpc_channel.unary_unary(
"/google.cloud.documentai.v1beta2.DocumentUnderstandingService/ProcessDocument",
request_serializer=document_understanding.ProcessDocumentRequest.serialize,
response_deserializer=document.Document.deserialize,
)
return self._stubs["process_document"]
def close(self):
return self.grpc_channel.close()
__all__ = ("DocumentUnderstandingServiceGrpcAsyncIOTransport",)
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import copy
import itertools
from oslo import messaging
from oslo.serialization import jsonutils
from oslo.utils import excutils
from oslo.utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.i18n import _, _LE
from nova import image
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
from nova.openstack.common import log as logging
from nova import quota
from nova.scheduler import client as scheduler_client
from nova.scheduler import driver as scheduler_driver
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='2.0')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.additional_endpoints.append(self.compute_task_mgr)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@messaging.expected_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_LE("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, six.string_types):
updates[key] = timeutils.parse_strtime(value)
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@messaging.expected_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
def instance_get_all_by_host(self, context, host, node,
columns_to_join):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
@messaging.expected_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
@messaging.expected_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed, update_cells):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
# NOTE(danms): This can be removed in version 3.0 of the RPC API
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
def block_device_mapping_update_or_create(self, context, values, create):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
bdm_obj = objects.BlockDeviceMapping._from_db_object(
context, objects.BlockDeviceMapping(), bdm)
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm_obj,
create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join,
use_slave):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join, use_slave=use_slave)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end,
project_id, host):
# Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
def instance_get_active_by_window_joined(self, context, begin, end,
project_id, host):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
result = self.db.instance_destroy(context, instance['uuid'])
return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v3.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed, update_totals):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@messaging.expected_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic, host, binary):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# FIXME(comstud) Potentially remove this on bump to v3.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
@messaging.expected_exceptions(exception.InstanceActionNotFound)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
@messaging.expected_exceptions(exception.InstanceActionNotFound,
exception.InstanceActionEventNotFound)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values):
result = self.db.compute_node_update(context, node['id'], values)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
@messaging.expected_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items, message):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
def notify_usage_exists(self, context, instance, current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
image_id = instance.get('%s_id' % image_type)
if image_id is not None:
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, context, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(context, *args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
result = self._object_dispatch(objclass, objmethod, context,
args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
return (result.obj_to_primitive(target_version=objver)
if isinstance(result, nova_object.NovaObject) else result)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, context,
args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
oldobj[name] != objinst[name]):
updates[name] = field.to_primitive(objinst, name,
objinst[name])
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport(self, context, objinst, target_version):
return objinst.obj_to_primitive(target_version=target_version)
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.9')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.image_api = image.API()
self.scheduler_client = scheduler_client.SchedulerClient()
@messaging.expected_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceNotRunning,
exception.MigrationPreCheckError)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, 'cold_migrate',
instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
scheduler_utils.populate_retry(filter_properties, instance['uuid'])
hosts = self.scheduler_client.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance['vm_state']
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
quotas.rollback()
# if the flavor IDs match, it's migrate; otherwise resize
if flavor['id'] == instance['instance_type_id']:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
# TODO(timello): originally, instance_type in request_spec
# on compute.api.resize does not have 'extra_specs', so we
# remove it for now to keep tests backward compatibility.
request_spec['instance_type'].pop('extra_specs')
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance['vm_state'],
'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
quotas.rollback()
def _set_vm_state_and_notify(self, context, method, updates, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceNotRunning,
exception.MigrationPreCheckError) as ex:
with excutils.save_and_reraise_exception():
# TODO(johngarbutt) - eventually need instance actions here
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
except Exception as ex:
LOG.error(_LE('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.'),
{'instance_id': instance['uuid'], 'dest': destination},
exc_info=True)
raise exception.MigrationError(reason=ex)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
try:
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
scheduler_utils.populate_retry(filter_properties,
instances[0].uuid)
hosts = self.scheduler_client.select_destinations(context,
request_spec, filter_properties)
except Exception as exc:
for instance in instances:
scheduler_driver.handle_schedule_error(context, exc,
instance.uuid, request_spec)
return
for (instance, host) in itertools.izip(instances, hosts):
try:
instance.refresh()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host['host'], image=image,
request_spec=request_spec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
def _delete_image(self, context, image_id):
return self.image_api.delete(context, image_id)
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
hosts = self.scheduler_client.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id)
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
snapshot_id = sys_meta.get('shelved_image_id')
if snapshot_id:
self._delete_image(context, snapshot_id)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image_id = sys_meta.get('shelved_image_id')
with compute_utils.EventReporter(
context, 'get_image_info', instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
instance.uuid):
filter_properties = {}
hosts = self._schedule_instances(context, image,
filter_properties,
instance)
host_state = hosts[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
except exception.NoValidHost:
instance.task_state = None
instance.save()
LOG.warning(_("No valid host found for unshelve instance"),
instance=instance)
return
else:
LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in sys_meta:
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
if not host:
# NOTE(lcostantino): Retrieve scheduler filters for the
# instance when the feature is available
filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(context,
image_ref,
[instance])
try:
hosts = self.scheduler_client.select_destinations(context,
request_spec,
filter_properties)
host = hosts.pop(0)['host']
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_("No valid host found for rebuild"),
instance=instance)
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host)
|
|
'''
Hypergraph representation.
@author: Daniel Bauer (dbauer)
@author: Nathan Schneider (nschneid)
@since: 2012-06-18
'''
from collections import defaultdict
import unittest
import re
import sys
import copy
from operator import itemgetter
from common.cfg import NonterminalLabel
from common.exceptions import DerivationException
_graphics = False
def require_graphics():
global _graphics
if _graphics: return
# Try to import modules to render DAGs
global xdot
import xdot
global pgv
import pygraphviz as pgv
_graphics = True
def print_amr_error(amr_str, warn=sys.stderr):
warn.write("Could not parse AMR.\n")
warn.write(amr_str)
warn.write("\n")
def conv(s):
if not s:
return "NONE"
if isinstance(s, StrLiteral):
return s
elif s.startswith('"') and s.endswith('"'):
return s[1:-1]
else:
return s
class ListMap(defaultdict):
'''
A map that can contain several values for the same key.
@author: Nathan Schneider (nschneid)
@since: 2012-06-18
>>> x = ListMap()
>>> x.append('mykey', 3)
>>> x.append('key2', 'val')
>>> x.append('mykey', 8)
>>> x
defaultdict(<type 'list'>, {'key2': ['val'], 'mykey': [3, 8]})
>>> x['mykey']
3
>>> x.getall('mykey')
[3, 8]
>>> x.items()
[('key2', 'val'), ('mykey', 3), ('mykey', 8)]
>>> x.itemsfor('mykey')
[('mykey', 3), ('mykey', 8)]
>>> x.replace('mykey', 0)
>>> x
defaultdict(<type 'list'>, {'key2': ['val'], 'mykey': [0]})
'''
def __init__(self, *args, **kwargs):
defaultdict.__init__(self, list, *args, **kwargs)
def __setitem__(self, k, v):
if k in self:
raise KeyError('Cannot assign to ListMap entry; use replace() or append()')
return defaultdict.__setitem__(self, k, v)
def __getitem__(self, k):
'''Returns the *first* list entry for the key.'''
return dict.__getitem__(self, k)[0]
def getall(self, k):
return dict.__getitem__(self, k)
def items(self):
return [(k,v) for k,vv in defaultdict.items(self) for v in vv]
def values(self):
return [v for k,v in self.items()]
def itemsfor(self, k):
return [(k,v) for v in self.getall(k)]
def replace(self, k, v):
defaultdict.__setitem__(self, k, [v])
def append(self, k, v):
defaultdict.__getitem__(self, k).append(v)
def remove(self, k, v):
defaultdict.__getitem__(self, k).remove(v)
if not dict.__getitem__(self,k):
del self[k]
def __reduce__(self):
t = defaultdict.__reduce__(self)
return (t[0], ()) + t[2:]
# Actual AMR class
class Hgraph(defaultdict):
"""
An abstract meaning representation.
The structure consists of nested mappings from role names to fillers.
Because a concept may have multiple roles with the same name,
a ListMap data structure holds a list of fillers for each role.
A set of (concept, role, filler) triples can be extracted as well.
"""
_parser_singleton = None
def __init__(self, *args, **kwargs):
defaultdict.__init__(self, ListMap, *args, **kwargs)
self.roots = []
self.external_nodes = {}
self.rev_external_nodes = {}
self.replace_count = 0 # Count how many replacements have occured in this DAG
# to prefix unique new node IDs for glued fragments.
self.__cached_triples = None
self.__cached_depth = None
self.__nodelabels = False
self.node_alignments = {}
self.edge_alignments = {}
self.__cached_triples = None
self.node_to_concepts = {}
def __reduce__(self):
t = defaultdict.__reduce__(self)
return (t[0], ()) + (self.__dict__,) +t[3:]
####Hashing methods###
def _get_node_hashes(self):
tabu = set()
queue = []
node_to_id = defaultdict(int)
for x in sorted(self.roots):
if type(x) is tuple:
for y in x:
queue.append((y,0))
node_to_id[y] = 0
else:
queue.append((x,0))
node_to_id[x] = 0
while queue:
node, depth = queue.pop(0)
if not node in tabu:
tabu.add(node)
rels = tuple(sorted(self[node].keys()))
node_to_id[node] += 13 * depth + hash(rels)
for rel in rels:
children = self[node].getall(rel)
for child in children:
if not child in node_to_id:
if type(child) is tuple:
for c in child:
node_to_hash = 41 * depth
queue.append((c, depth+1))
else:
node_to_hash = 41 * depth
queue.append((child, depth+1))
return node_to_id
def __hash__(self):
# We compute a hash for each node in the AMR and then sum up the hashes.
# Colisions are minimized because each node hash is offset according to its distance from
# the root.
node_to_id = self._get_node_hashes()
return sum(node_to_id[node] for node in node_to_id)
def __eq__(self, other):
return hash(self) == hash(other)
@classmethod
def from_concept_edge_labels(cls, amr):
"""
Create a new AMR from an AMR or a DAG in which concepts are pushed into incoming edges.
"""
new_amr = amr.clone()
new_amr.roots = copy.copy(amr.roots)
for par, rel, child in amr.triples():
if type(rel) is str:
parts = rel.rsplit(":",1)
part2 = None
if len(parts)==2:
part1, part2 = parts
if not (part1.lower().startswith("root")):
new_amr._replace_triple(par, rel, child, par, part1, child)
for c in child:
new_amr.node_to_concepts[c] = part2
if (par,rel,child) in amr.edge_alignments:
if not c in new_amr.node_alignments:
new_amr.node_alignments[c] = []
new_amr.node_alignments[c].extend(amr.edge_alignments[(par,rel,child)])
if rel.lower().startswith("root"):
new_amr.roots.remove(par)
new_amr._remove_triple(par, rel, child)
new_amr.roots = []
for c in child:
new_amr.roots.append(c)
elif par in amr.roots and par not in new_amr.node_to_concepts:
new_amr.node_to_concepts[par] = None
new_amr.edge_alignments = {}
return new_amr
def to_concept_edge_labels(self, warn=False):
""""
Return an new DAG with equivalent structure as this AMR (plus additional root-edge), in
which concepts are pushed into incoming edges.
"""
new_amr = self.clone(warn=warn)
for par, rel, child in self.triples(instances = False):
#new_rel = "%s:%s" % (rel, ":".join(self.node_to_concepts[c] for c in child if c in self.node_to_concepts))
children = [conv(self.node_to_concepts[c]) if c in self.node_to_concepts and self.node_to_concepts[c] else conv(c) for c in child]
new_rel = '%s:%s' % (rel, ':'.join(children))
new_amr._replace_triple(par,rel,child, par, new_rel, child, warn=warn)
# Copy edge alignemnts
if (par, rel, child) in self.edge_alignments:
new_amr.edge_alignments[(par, new_rel, child)] = self.edge_alignments[(par,rel,child)]
# Copy node alignments of children
for c in child:
if c in self.node_alignments:
if not (par, new_rel, child) in new_amr.edge_alignments:
new_amr.edge_alignments[(par, new_rel, child)] = []
new_amr.edge_alignments[(par, new_rel, child)].extend(self.node_alignments[c])
for e in new_amr.edge_alignments:
new_amr.edge_alignments[e] = list(set(new_amr.edge_alignments[e]))
for r in self.roots:
if r in self.node_to_concepts:
new_rel = "ROOT:%s" % conv(self.node_to_concepts[r])
else:
new_rel = "ROOT"
newtriple = ('root0', new_rel, (r,))
new_amr._add_triple(*newtriple, warn=warn)
new_amr.roots.remove(r)
if not "root0" in new_amr.roots:
new_amr.roots.append('root0' )
if r in self.node_alignments:
new_amr.edge_alignments[newtriple] = self.node_alignments[r]
return new_amr
# def make_rooted_amr(self, root, swap_callback=None and (lambda oldtrip,newtrip: True), warn=sys.stderr):
# """
# Flip edges in the AMR so that all nodes are reachable from the unique root.
# If 'swap_callback' is provided, it is called whenever an edge is inverted with
# two arguments: the old triple and the new triple.
# >>> x =Hgraph.from_triples( [(u'j', u'ARG0', (u'p',)), (u'j', u'ARG1', (u'b',)), (u'j', u'ARGM-PRD', ('t',)), (u'j', 'time', ('d',)), (u'p', 'age', ('t1',)), (u'p', 'name', ('n',)), ('t', u'ARG0-of', ('d1',)), ('d', 'day', (29,)), ('d', 'month', (11,)), ('t1', 'quant', (61,)), ('t1', 'unit', ('y',)), ('n', 'op1', (u'"Pierre"',)), ('n', 'op2', (u'"Vinken"',)), ('d1', u'ARG0', ('t',)), ('d1', u'ARG3', (u'n1',))] , {u'b': u'board', 'd': 'date-entity', u'j': u'join-01-ROOT', 't1': 'temporal-quantity', u'p': u'person', 't': 'thing', 'y': 'year', u'n1': u'nonexecutive', 'n': 'name', 'd1': 'direct-01'} )
# >>> x
# DAG{ (j / join-01-ROOT :ARG0 (p / person :age (t1 / temporal-quantity :quant 61 :unit (y / year) ) :name (n / name :op1 "Pierre" :op2 "Vinken")) :ARG1 (b / board) :ARGM-PRD (t / thing :ARG0-of (d1 / direct-01 :ARG0 t :ARG3 (n1 / nonexecutive) )) :time (d / date-entity :day 29 :month 11)) }
# >>> x.make_rooted_amr("n")
# DAG{ (n / name :name-of (p / person :ARG0-of (j / join-01-ROOT :ARG1 (b / board) :ARGM-PRD (t / thing :ARG0-of (d1 / direct-01 :ARG0 t :ARG3 (n1 / nonexecutive) )) :time (d / date-entity :day 29 :month 11)) :age (t1 / temporal-quantity :quant 61 :unit (y / year) )) :op1 "Pierre" :op2 "Vinken") }
# """
# if not root in self:
# raise ValueError, "%s is not a node in this AMR." % root
# amr = self.clone(warn=warn)
#
# all_nodes = set(amr.get_nodes())
#
# unreached = True
# while unreached:
# reach_triples = amr.triples(start_node = root, instances = False)
# reached = set()
# reached.add(root)
# for p,r,c in reach_triples:
# reached.add(p)
# reached.update(c)
#
# unreached = all_nodes - reached
#
# out_triples = [(p,r,c) for p,r,c in amr.triples(refresh = True, instances = False) if c[0] in reached and p in unreached]
# for p,r,c in out_triples:
# newtrip = (c[0],"%s-of" %r, (p,))
# amr._replace_triple(p,r,c,*newtrip, warn=warn)
# if swap_callback: swap_callback((p,r,c),newtrip)
# amr.triples(refresh = True)
# amr.roots = [root]
# amr.node_alignments = self.node_alignments
# return amr
def stringify(self, warn=False):
"""
Convert all special symbols in the AMR to strings.
"""
new_amr = Hgraph()
def conv(node): # Closure over new_amr
if isinstance(node, StrLiteral):
var = str(node)[1:-1]
new_amr._set_concept(var, str(node))
return var
else:
return str(node)
for p,r,c in self.triples(instances = False):
c_new = tuple([conv(child) for child in c]) if type(c) is tuple else conv(c)
p_new = conv(p)
new_amr._add_triple(p_new, r, c_new, warn=warn)
new_amr.roots = [conv(r) for r in self.roots]
new_amr.external_nodes = dict((conv(r),val) for r,val in self.external_nodes.items())
new_amr.rev_external_nodes = dict((val, conv(r)) for val,r in self.rev_external_nodes.items())
new_amr.edge_alignments = self.edge_alignments
new_amr.node_alignments = self.node_alignments
for node in self.node_to_concepts:
new_amr._set_concept(conv(node), self.node_to_concepts[node])
return new_amr
# Class methods to create new AMRs
@classmethod
def from_string(cls, amr_string):
"""
Initialize a new abstract meaning representation from a Pennman style string.
"""
if not cls._parser_singleton: # Initialize the AMR parser only once
from graph_description_parser import GraphDescriptionParser, LexerError, ParserError
_parser_singleton = GraphDescriptionParser()
amr = _parser_singleton.parse_string(amr_string)
return amr
@classmethod
def from_triples(cls, triples, concepts, roots=None, warn=sys.stderr):
"""
Initialize a new hypergraph from a collection of triples and a node to concept map.
"""
graph = Hgraph() # Make new DAG
for parent, relation, child in triples:
if isinstance(parent, basestring):
new_par = parent.replace("@","")
if parent.startswith("@"):
graph.external_nodes.append(new_par)
else:
new_par = parent
if type(child) is tuple:
new_child = []
for c in child:
if isinstance(c, basestring):
new_c = c.replace("@","")
new_child.append(new_c)
nothing = graph[new_c]
if c.startswith("@"):
graph.external_nodes.append(new_c)
else:
nothing = graph[c]
new_child.append(c)
new_child = tuple(new_child)
else: # Allow triples to have single string children for convenience.
# and downward compatibility.
if isinstance(child, basestring):
tmpchild = child.replace("@","")
if child.startswith("@"):
graph.external_nodes.append(tmpchild)
new_child = (tmpchild,)
nothing = graph[tmpchild]
else:
new_child = (child,)
nothing = graph[child]
graph._add_triple(new_par, relation, new_child, warn=warn)
# Allow the passed root to be either an iterable of roots or a single root
if roots:
try: # Try to interpret roots as iterable
graph.roots.extend(roots)
except TypeError: # If this fails just use the whole object as root
graph.roots = list([roots])
else:
graph.roots = graph.find_roots(warn=warn)
graph.node_to_concepts = concepts
graph.__cached_triples = None
return graph
# Methods that create new AMRs
def get_concept(self, node):
"""
Retrieve the concept name for a node.
"""
return self.node_to_concepts[node]
def _set_concept(self, node, concept):
"""
Set concept name for a node.
"""
self.node_to_concepts[node] = concept
def get_nodes(self):
"""
Return the set of node identifiers in the DAG.
"""
# The default dictionary creates keys for hyperedges... not sure why.
# We just ignore them.
ordered = self.get_ordered_nodes()
res = ordered.keys()
res.sort(lambda x,y: cmp(ordered[x], ordered[y]))
return res
def has_edge(self, par, rel, child):
return self.has_triple(par, rel, child)
def has_triple(self, parent, relation, child):
"""
Return True if the DAG contains the given triple.
"""
try:
result = child in self[parent].get(relation)
except (TypeError, ValueError):
return False
return result
def get_all_depths(self):
if not self.__cached_depth:
self.triples()
return self.__cached_depth
def get_depth(self, triple):
if not self.__cached_depth:
self.triples()
return self.__cached_depth[triple]
def out_edges(self, node, nodelabels = False):
"""
Return outgoing edges from this node.
"""
assert node in self
if nodelabels:
result = []
nlabel = self.node_to_concepts[node]
for rel, child in self[node].items():
if type(child) is tuple:
nchild = tuple([(c, self.node_to_concepts[c]) for c in child])
else:
nchild = (child, self.node_to_concepts[child])
result.append(((node, nlabel), rel, nchild))
return result
return [(node, rel, child) for rel, child in self[node].items()]
#def root_edges(self):
# """
# Return a list of out_going edges from the root nodes.
# """
# return flatten([self.out_edges(r) for r in self.roots])
def get_all_in_edges(self, nodelabels = False):
"""
Return dictionary mapping nodes to their incomping edges.
"""
res = defaultdict(list)
for node, rel, child in self.triples(nodelabels = nodelabels):
if type(child) is tuple:
for c in child:
if nodelabels:
res[c[0]].append((node,rel,child))
else:
res[c].append((node,rel,child))
else:
if nodelabels:
res[child].append((node,rel,child))
else:
res[child[0]].append((node,rel,child))
return res
def in_edges(self, node, nodelabels = False):
"""
Return incoming edges for a single node.
"""
return self.get_all_in_edges(nodelabels)[node]
def nonterminal_edges(self):
"""
Retrieve all nonterminal labels from the DAG.
"""
return [t for t in self.triples() if isinstance(t[1], NonterminalLabel)]
def get_terminals_and_nonterminals(self, nodelabels = False):
"""
Return a tuple in which the first element is a set of all terminal labels
and the second element is a set of all nonterminal labels.
"""
# This is used to compute reachability of grammar rules
terminals = set()
nonterminals = set()
for p,r,children in self.triples():
if isinstance(r, NonterminalLabel):
nonterminals.add(r.label)
else:
if nodelabels:
terminals.add((self.node_to_concepts[p],r,tuple([self.node_to_concepts[c] for c in children])))
else:
terminals.add(r)
return terminals, nonterminals
def get_external_nodes(self):
"""
Retrieve the list of external nodes of this dag fragment.
"""
return self.external_nodes
def reach(self, node):
"""
Return the set of nodes reachable from a node
"""
res = set()
for p,r,c in self.triples(start_node = node, instances = False):
res.add(p)
if type(c) is tuple:
res.update(c)
else:
res.add(c)
return res
def find_roots(self, warn=sys.stderr):
"""
Find and return a set of the roots of the DAG. This does NOT set the 'roots' attribute.
"""
# there cannot be an odering of root nodes so it is okay to return a set
parents = set()
for k in self.keys():
if type(k) is tuple:
parents.update(k)
else:
parents.add(k)
children = set()
for node in parents:
for v in self[node].values():
if type(v) is tuple:
children.update(v)
else:
children.add(v)
roots = list(parents - children)
not_found = parents.union(children)
for r in roots:
x = self.triples(start_node = r, instances = False)
for p,r,c in x:
if p in not_found:
not_found.remove(p)
if type(c) is tuple:
for ch in c:
if ch in not_found:
not_found.remove(ch)
if c in not_found:
not_found.remove(c)
while not_found:
parents = sorted([x for x in not_found if self[x]], key=lambda a:len(self.triples(start_node = a)))
if not parents:
if warn: warn.write("WARNING: orphaned leafs %s.\n" % str(not_found))
roots.extend(list(not_found))
return roots
new_root = parents.pop()
for p,r,c in self.triples(start_node = new_root):
if p in not_found:
not_found.remove(p)
if type(c) is tuple:
for ch in c:
if ch in not_found:
not_found.remove(ch)
if c in not_found:
not_found.remove(c)
roots.append(new_root)
return roots
def get_ordered_nodes(self):
"""
Get an mapping of nodes in this DAG to integers specifying a total order of
nodes. (partial order broken according to edge_label).
"""
order = {}
count = 0
for par, rel, child in self.triples(instances = False):
if not par in order:
order[par] = count
count += 1
if type(child) is tuple:
for c in child:
if not c in order:
order[c] = count
count += 1
else:
if not child in order:
order[child] = count
count += 1
return order
def find_leaves(self):
"""
Get all leaves in a DAG.
"""
out_count = defaultdict(int)
for par, rel, child in self.triples():
out_count[par] += 1
if type(child) is tuple:
for c in child:
if not c in out_count:
out_count[c] = 0
else:
if not child in out_count:
out_count[child] = 0
result = [n for n in out_count if out_count[n]==0]
order = self.get_ordered_nodes()
result.sort(lambda x,y: cmp(order[x], order[y]))
return result
def get_reentrant_nodes(self):
"""
Get a list of nodes that have an in-degree > 1.
"""
in_count = defaultdict(int)
for par, rel, child in self.triples():
if type(child) is tuple:
for c in child:
in_count[c] += 1
else:
in_count[child] += 1
result = [n for n in in_count if in_count[n]>1]
order = self.get_ordered_nodes()
result.sort(lambda x,y: cmp(order[x], order[y]))
return result
def get_weakly_connected_roots(self, warn=sys.stderr):
"""
Return a set of root nodes for each weakly connected component.
>>> x = Dag.from_triples([("a","B","c"), ("d","E","f")])
>>> x.get_weakly_connected_roots()
set(['a', 'd'])
>>> y = Dag.from_triples([("a","B","c"), ("d","E","f"),("c","H","f")],{})
>>> y.get_weakly_connected_roots()
set(['a'])
>>> y.is_connected()
True
"""
roots = list(self.find_roots(warn=warn))
if len(roots) == 1:
return roots
merged = defaultdict(list)
node_to_comp = defaultdict(list)
equiv = {}
for r in roots:
for n in self.reach(r):
node_to_comp[n].append(r)
if len(node_to_comp[n]) == 2:
if not r in equiv:
equiv[r] = node_to_comp[n][0]
final = set()
for r in roots:
unique_repr = r
while unique_repr in equiv:
unique_repr = equiv[unique_repr]
final.add(unique_repr)
return final
#new_roots = set()
#for r in nodes:
def is_connected(self, warn=sys.stderr):
return len(self.get_weakly_connected_roots(warn=warn)) == 1
# Methods that traverse the hypergraph and represent it in different ways
def dfs(self, extractor = lambda node, firsthit, leaf: node.__repr__(), combiner = lambda par,\
childmap, depth: {par: childmap.items()}, hedge_combiner = lambda x: tuple(x)):
"""
Recursively traverse the dag depth first starting at node. When traveling up through the
recursion a value is extracted from each child node using the provided extractor method,
then the values are combined using the provided combiner method. At the root node the
result of the combiner is returned. Extractor takes a "firsthit" argument that is true
the first time a node is touched.
"""
tabu = set()
tabu_edge = set()
def rec_step(node, depth):
if type(node) is tuple: # Hyperedge case
pass
else:
node = (node,)
allnodes = []
for n in node:
firsthit = not n in tabu
tabu.add(n)
leaf = False if self[n] else True
#firsthit = not node in tabu
extracted = extractor(n, firsthit, leaf)
child_map = ListMap()
for rel, child in self[n].items():
if not (n, rel, child) in tabu_edge:
if child in tabu:
child_map.append(rel, extractor(child, False, leaf))
#pass
else:
tabu_edge.add((n, rel, child))
child_map.append(rel, rec_step(child, depth + 1))
if child_map:
combined = combiner(extracted, child_map, depth)
allnodes.append(combined)
else:
allnodes.append(extracted)
return hedge_combiner(allnodes)
return [rec_step(node, 0) for node in self.roots]
def triples(self, instances = False, start_node = None, refresh = False, nodelabels = False):
"""
Retrieve a list of (parent, edge-label, tails) triples.
"""
if (not (refresh or start_node or nodelabels!=self.__nodelabels)) and self.__cached_triples:
return self.__cached_triples
triple_to_depth = {}
triples = []
tabu = set()
if start_node:
queue = [(start_node,0)]
else:
queue = [(x,0) for x in self.roots]
while queue:
node, depth = queue.pop(0)
if not node in tabu:
tabu.add(node)
for rel, child in sorted(self[node].items(), key=itemgetter(0)):
if nodelabels:
newchild = tuple([(n,self.node_to_concepts[n]) for n in child])
newnode = (node, self.node_to_concepts[node])
t = (newnode, rel, newchild)
else:
t = (node, rel, child)
triples.append(t)
triple_to_depth[t] = depth
if type(child) is tuple:
for c in child:
if not c in tabu:
queue.append((c, depth+1))
else:
if not child in tabu:
queue.append((child, depth+1))
#if instances:
# if instances:
# for node, concept in self.node_to_concepts.items():
# triples.append((node, 'instance', concept))
# self.__cached_triples = res
if not start_node:
self.__cached_triples = triples
self.__cached_depth = triple_to_depth
self.__nodelabels = nodelabels
return triples
def __str__(self):
reentrant_nodes = self.get_reentrant_nodes()
def extractor(node, firsthit, leaf):
if node is None:
return "root"
if type(node) is tuple or type(node) is list:
return " ".join("%s*%i" % (n, self.external_nodes[n]) if n in self.external_nodes else n for n in node)
else:
if type(node) is int or type(node) is float or isinstance(node, (Literal, StrLiteral)):
return str(node)
else:
if firsthit:
if node in self.node_to_concepts and self.node_to_concepts[node]:
concept = self.node_to_concepts[node]
if node in self.external_nodes:
return "%s%s*%i " % ("%s." % node if node in reentrant_nodes else "", concept, self.external_nodes[node])
else:
return "%s%s " % ("%s." % node if node in reentrant_nodes else "", concept)
else:
if node in self.external_nodes:
return "%s.*%i " % (node if node in reentrant_nodes else "", self.external_nodes[node])
else:
return "%s." % (node if node in reentrant_nodes else "")
else:
return "%s." % (node if node in reentrant_nodes else "")
def combiner(nodestr, childmap, depth):
childstr = " ".join(["\n%s %s %s" % (depth * "\t", ":%s" % rel if rel else "", child) for rel, child in sorted(childmap.items())])
return "(%s %s)" % (nodestr, childstr)
def hedgecombiner(nodes):
return " ".join(nodes)
return " ".join(self.dfs(extractor, combiner, hedgecombiner))
def to_amr_string(self):
def extractor(node, firsthit, leaf):
if node is None:
return "root"
if type(node) is tuple or type(node) is list:
return ",".join("@%s" % (n) if n in self.external_nodes else n for n in node)
else:
if type(node) is int or type(node) is float or isinstance(node, (Literal, StrLiteral)):
alignmentstr = "~e.%s" % ",".join(str(x) for x in self.node_alignments[node]) if node in self.node_alignments else ""
return "%s%s" % (str(node), alignmentstr)
else:
if firsthit and node in self.node_to_concepts:
concept = self.node_to_concepts[node]
alignmentstr = "~e.%s" % ",".join(str(x) for x in self.node_alignments[node]) if node in self.node_alignments else ""
if not self[node]:
if node in self.external_nodes:
return "(@%s / %s%s) " % (node, concept, alignmentstr)
else:
return "(%s / %s%s) " % (node, concept, alignmentstr)
else:
if node in self.external_nodes:
return "@%s / %s%s " % (node, concept, alignmentstr)
else:
return "%s / %s%s " % (node, concept, alignmentstr)
else:
if node in self.external_nodes:
return "@%s" % node
else:
return "%s" % node
def combiner(nodestr, childmap, depth):
childstr = " ".join(["\n%s :%s %s" % (depth * "\t", rel, child) for rel, child in sorted(childmap.items())])
return "(%s %s)" % (nodestr, childstr)
def hedgecombiner(nodes):
return " ,".join(nodes)
return "\n".join(self.dfs(extractor, combiner, hedgecombiner))
def to_string(self, newline = False):
if newline:
return str(self)
else:
return re.sub("(\n|\s+)"," ",str(self))
def graph_yield(self):
"""
Return the yield of this graph (a list of all edge labels).
Hyperedge tentacles are ordered. If hyperedges are used to represent
trees this returns the intuitive yield of this tree.
If a node has multiple children their order is abitrary.
"""
tabu = set()
def rec_step(node, depth):
if type(node) is not tuple:
node = (node,)
allnodes = []
for n in node:
firsthit = not n in tabu
tabu.add(n)
leaf = False if self[n] else True
#firsthit = not node in tabu
extracted = self.node_to_concepts[n]
#child_map = ListMap()
if extracted:
allnodes.append(extracted)
for rel, child in self[n].items():
if child in tabu:
allnodes.append(rel)
else:
if rel:
allnodes.append(rel)
if child:
allnodes.extend(rec_step(child, depth +1))
return allnodes
return sum([rec_step(node, 0) for node in self.roots],[])
def get_dot(self, instances = True):
"""
Return a graphviz dot representation.
"""
return self._get_gv_graph(instances).to_string()
def _get_gv_graph(self, instances = True):
"""
Return a pygraphviz AGraph.
"""
require_graphics()
graph = pgv.AGraph(strict=False,directed=True)
graph.node_attr.update(height=0.1, width=0.1, shape='none')
graph.edge_attr.update(fontsize='9')
for node, rel, child in self.triples(instances):
nodestr, childstr = node, child
if not instances:
if node in self.node_to_concepts:
nodestr = "%s / %s" % (node, self.node_to_concepts[node])
if child in self.node_to_concepts:
childstr = "%s / %s" % (child, self.node_to_concepts[child])
graph.add_edge(nodestr, childstr, label=":%s"%rel)
return graph
def render(self, instances = True):
"""
Interactively view the graph using xdot.
"""
require_graphics()
dot = self.get_dot(instances)
window = xdot.DotWindow()
window.set_dotcode(dot)
def render_to_file(self, file_or_name, instances = True, *args, **kwargs):
"""
Save graph to file
"""
graph = self._get_gv_graph(instances)
graph.draw(file_or_name, prog="dot", *args, **kwargs)
def clone(self, warn=sys.stderr):
"""
Return a deep copy of the AMR.
"""
new = Hgraph()
new.roots = copy.copy(self.roots)
new.external_nodes = copy.copy(self.external_nodes)
new.rev_external_nodes = copy.copy(self.rev_external_nodes)
new.node_to_concepts = copy.copy(self.node_to_concepts)
new.node_alignments, new.edge_alignments = self.node_alignments, self.edge_alignments
for triple in self.triples(instances = False):
new._add_triple(*copy.copy(triple), warn=warn)
return new
def _get_canonical_nodes(self, prefix = ""):
"""
Get a mapping from node identifiers to IDs of the form x[prefix]number.
This uses the hash code for each node which only depend on DAG topology (not on node IDs).
Therefore two DAGs with the same structure will receive the same canonical node labels.
"""
# Get node hashes, then sort according to hash_code and use the index into this
# sorted list as new ID.
node_hashes = self._get_node_hashes()
nodes = node_hashes.keys()
nodes.sort(lambda x,y: cmp(int(node_hashes[x]),int(node_hashes[y])))
return dict([(node.replace("@",""), "x%s%s" % (prefix, str(node_id)) ) for node_id, node in enumerate(nodes)])
def clone_canonical(self, external_dict = {}, prefix = "", warn=False):
"""
Return a version of the DAG where all nodes have been replaced with canonical IDs.
"""
new = Hgraph()
node_map = self._get_canonical_nodes(prefix)
for k,v in external_dict.items():
node_map[k] = v
#return self.apply_node_map(node_map)
new.roots = [node_map[x] for x in self.roots]
for node in self.node_alignments:
new.node_alignments[node_map[node]] = self.node_alignments[node]
for par, rel, child in self.edge_alignments:
if type(child) is tuple:
new.edge_alignments[(node_map[par] if par in node_map else par, rel, tuple([(node_map[c] if c in node_map else c) for c in child]))] = self.edge_alignments[(par, rel, child)]
else:
new.edge_alignments[(node_map[par] if par in node_map else par, rel, node_map[child] if child in node_map else child)] = self.edge_alignments[(par, rel, child)]
new.external_nodes = dict((node_map[x], self.external_nodes[x]) for x in self.external_nodes)
new.rev_external_nodes = dict((self.external_nodes[x], node_map[x]) for x in self.external_nodes)
for par, rel, child in self.triples(instances = False):
if type(child) is tuple:
new._add_triple(node_map[par], rel, tuple([node_map[c] for c in child]), warn=warn)
else:
new._add_triple(node_map[par], rel, node_map[child], warn=warn)
new.node_to_concepts = {}
for node in self.node_to_concepts:
if node in node_map:
new.node_to_concepts[node_map[node]] = self.node_to_concepts[node]
else:
new.node_to_concepts[node] = self.node_to_concepts[node]
return new
def apply_node_map(self, node_map, warn=False):
new = Hgraph()
new.roots = [node_map[x] if x in node_map else x for x in self.roots ]
new.external_nodes = dict([(node_map[x], self.external_nodes[x]) if x in node_map else x for x in self.external_nodes])
for node in self.node_alignments:
new.node_alignments[node_map[node]] = self.node_alignments[node]
for par, rel, child in self.edge_alignments:
if type(child) is tuple:
new.edge_alignments[(node_map[par] if par in node_map else par, rel, tuple([(node_map[c] if c in node_map else c) for c in child]))] = self.edge_alignments[(par, rel, child)]
else:
new.edge_alignments[(node_map[par] if par in node_map else par, rel, node_map[child] if child in node_map else child)] = self.edge_alignments[(par, rel, child)]
for par, rel, child in Dag.triples(self):
if type(child) is tuple:
new._add_triple(node_map[par] if par in node_map else par, rel, tuple([(node_map[c] if c in node_map else c) for c in child]), warn=warn)
else:
new._add_triple(node_map[par] if par in node_map else par, rel, node_map[child] if child in node_map else child, warn=warn)
new.__cached_triples = None
for n in self.node_to_concepts:
if n in node_map:
new.node_to_concepts[node_map[n]] = self.node_to_concepts[n]
else:
new.node_to_concepts[n] = self.node_to_concepts[n]
return new
def find_nt_edge(self, label, index):
for p,r,c in self.triples():
if type(r) is NonterminalLabel:
if r.label == label and r.index == index:
return p,r,c
def remove_fragment(self, dag):
"""
Remove a collection of hyperedges from the DAG.
"""
res_dag = Hgraph.from_triples([edge for edge in self.triples() if not dag.has_edge(*edge)], dag.node_to_concepts)
res_dag.roots = [r for r in self.roots if r in res_dag]
res_dag.external_nodes = dict([(n, self.external_nodes[n]) for n in self.external_nodes if n in res_dag])
return res_dag
def replace_fragment(self, dag, new_dag, partial_boundary_map = {}, warn=False):
"""
Replace a collection of hyperedges in the DAG with another collection of edges.
"""
# First get a mapping of boundary nodes in the new fragment to
# boundary nodes in the fragment to be replaced
leaves = dag.find_leaves()
external = new_dag.get_external_nodes()
assert len(external) == len(leaves)
boundary_map = dict([(x, leaves[external[x]]) for x in external])
dagroots = dag.find_roots() if not dag.roots else dag.roots
assert len(dagroots) == len(new_dag.roots)
for i in range(len(dagroots)):
boundary_map[new_dag.roots[i]] = dagroots[i]
boundary_map.update(partial_boundary_map)
# Make sure node labels agree
for x in boundary_map:
if new_dag.node_to_concepts[x] != dag.node_to_concepts[boundary_map[x]]:
raise DerivationException, "Derivation produces contradictory node labels."
# now remove the old fragment
res_dag = self.remove_fragment(dag)
res_dag.roots = [boundary_map[x] if x in boundary_map else x for x in self.roots]
res_dag.external_nodes = dict([(boundary_map[x], self.external_nodes[x]) if x in boundary_map else (x, self.external_nodes[x]) for x in self.external_nodes])
# and add the remaining edges, fusing boundary nodes
for par, rel, child in new_dag.triples():
new_par = boundary_map[par] if par in boundary_map else par
if type(child) is tuple: #Hyperedge case
new_child = tuple([boundary_map[c] if c in boundary_map else c for c in child])
else:
new_child = boundary_map[child] if child in boundary_map else child
res_dag._add_triple(new_par, rel, new_child, warn=warn)
res_dag.node_to_concepts.update(new_dag.node_to_concepts)
return res_dag
def find_external_nodes(self, dag):
"""
Find the external nodes of the fragment dag in this Dag.
"""
# All nodes in the fragment that have an edge which is not itself in the fragment
dagroots = dag.roots if dag.roots else dag.find_roots()
return [l for l in dag if self[l] and not l in dagroots and \
(False in [dag.has_edge(*e) for e in self.in_edges(l)] or \
False in [dag.has_edge(*e) for e in self.out_edges(l)])]
def collapse_fragment(self, dag, label = None, unary = False, warn=False):
"""
Remove all edges in a collection and connect their boundary node with a single hyperedge.
>>> d1 = Dag.from_string("(A :foo (B :blubb (D :fee E) :back C) :bar C)")
>>> d2 = Dag.from_string("(A :foo (B :blubb D))")
>>> d1.find_external_nodes(d2)
['B', 'D']
>>> d_gold = Dag.from_string("(A :new (B :back C), (D :fee E) :bar C)")
>>> d1.collapse_fragment(d2, "new") == d_gold
True
"""
dagroots = dag.find_roots() if not dag.roots else dag.roots
if dag.external_nodes: # Can use specified external nodesnode
external = tuple(set(self.find_external_nodes(dag) +
dag.external_nodes))
else:
external = tuple(self.find_external_nodes(dag))
if not unary and not external:
# prevent unary edges if flag is set
external = (dag.find_leaves()[0],)
res_dag = self.remove_fragment(dag)
for r in dagroots:
res_dag._add_triple(r, label, external, warn=warn)
res_dag.roots = self.roots
return res_dag
# Methods that change the hypergraph
def _add_triple(self, parent, relation, child, warn=sys.stderr):
"""
Add a (parent, relation, child) triple to the DAG.
"""
if type(child) is not tuple:
child = (child,)
if parent in child:
#raise Exception('self edge!')
#sys.stderr.write("WARNING: Self-edge (%s, %s, %s).\n" % (parent, relation, child))
if warn: warn.write("WARNING: Self-edge (%s, %s, %s).\n" % (parent, relation, child))
#raise ValueError, "Cannot add self-edge (%s, %s, %s)." % (parent, relation, child)
for c in child:
x = self[c]
for rel, test in self[c].items():
if parent in test:
if warn: warn.write("WARNING: (%s, %s, %s) produces a cycle with (%s, %s, %s)\n" % (parent, relation, child, c, rel, test))
#raise ValueError,"(%s, %s, %s) would produce a cycle with (%s, %s, %s)" % (parent, relation, child, c, rel, test)
self[parent].append(relation, child)
def _replace_triple(self, parent1, relation1, child1, parent2, relation2, child2, warn=sys.stderr):
"""
Delete a (parent, relation, child) triple from the DAG.
"""
self._remove_triple(parent1, relation1, child1)
self._add_triple(parent2, relation2, child2, warn=warn)
def _remove_triple(self, parent, relation, child):
"""
Delete a (parent, relation, child) triple from the DAG.
"""
try:
self[parent].remove(relation, child)
except ValueError:
raise ValueError, "(%s, %s, %s) is not an AMR edge." % (parent, relation, child)
# HRG related methods
def compute_fw_table(self):
table = dict()
nodes = self.get_nodes()
for node in nodes:
table[(node,node)] = (0,None)
for oedge in self.out_edges(node):
for tnode in oedge[2]:
table[(node,tnode)] = (1,oedge)
table[(tnode,node)] = (1,oedge)
for n_k in nodes:
for n_i in nodes:
for n_j in nodes:
if not ((n_i, n_k) in table and (n_k, n_j) in table):
continue
k_dist = table[(n_i,n_k)][0] + table[(n_k,n_j)][0]
k_edge_forward = table[(n_k,n_j)][1]
k_edge_back = table[(n_k,n_i)][1]
if (n_i, n_j) not in table or k_dist < table[(n_i,n_j)][0]:
table[(n_i,n_j)] = (k_dist, k_edge_forward)
table[(n_j,n_i)] = (k_dist, k_edge_back)
self.fw_table = table
def star(self, node):
return frozenset(self.in_edges(node) + self.out_edges(node))
class StrLiteral(str):
def __str__(self):
return '"%s"' % "".join(self)
def __repr__(self):
return "".join(self)
class SpecialValue(str):
pass
class Quantity(str):
pass
class Literal(str):
def __str__(self):
return "'%s" % "".join(self)
def __repr__(self):
return "".join(self)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
import datetime
import logging
import os
import re
import socket
import subprocess
import threading
import weakref
import six
import pkg_resources
import requests
from chef.auth import sign_request
from chef.exceptions import ChefServerError
from chef.rsa import Key
from chef.utils import json
from chef.utils.file import walk_backwards
api_stack = threading.local()
log = logging.getLogger('chef.api')
config_ruby_script = """
require 'chef'
Chef::Config.from_file('%s')
puts Chef::Config.configuration.to_json
""".strip()
def api_stack_value():
if not hasattr(api_stack, 'value'):
api_stack.value = []
return api_stack.value
class UnknownRubyExpression(Exception):
"""Token exception for unprocessed Ruby expressions."""
class ChefAPI(object):
"""The ChefAPI object is a wrapper for a single Chef server.
.. admonition:: The API stack
PyChef maintains a stack of :class:`ChefAPI` objects to be use with
other methods if an API object isn't given explicitly. The first
ChefAPI created will become the default, though you can set a specific
default using :meth:`ChefAPI.set_default`. You can also use a ChefAPI
as a context manager to create a scoped default::
with ChefAPI('http://localhost:4000', 'client.pem', 'admin'):
n = Node('web1')
"""
ruby_value_re = re.compile(r'#\{([^}]+)\}')
env_value_re = re.compile(r'ENV\[(.+)\]')
ruby_string_re = re.compile(r'^\s*(["\'])(.*?)\1\s*$')
def __init__(self, url, key, client, version='0.10.8', headers={}, ssl_verify=True):
self.url = url.rstrip('/')
self.parsed_url = six.moves.urllib.parse.urlparse(self.url)
if not isinstance(key, Key):
key = Key(key)
if not key.key:
raise ValueError("ChefAPI attribute 'key' was invalid.")
self.key = key
self.client = client
self.version = version
self.headers = dict((k.lower(), v) for k, v in six.iteritems(headers))
self.version_parsed = pkg_resources.parse_version(self.version)
self.platform = self.parsed_url.hostname == 'api.opscode.com'
self.ssl_verify = ssl_verify
if not api_stack_value():
self.set_default()
@classmethod
def from_config_file(cls, path):
"""Load Chef API paraters from a config file. Returns None if the
config can't be used.
"""
log.debug('Trying to load from "%s"', path)
if not os.path.isfile(path) or not os.access(path, os.R_OK):
# Can't even read the config file
log.debug('Unable to read config file "%s"', path)
return
url = key_path = client_name = None
ssl_verify = True
for line in open(path):
if not line.strip() or line.startswith('#'):
continue # Skip blanks and comments
parts = line.split(None, 1)
if len(parts) != 2:
continue # Not a simple key/value, we can't parse it anyway
key, value = parts
md = cls.ruby_string_re.search(value)
if md:
value = md.group(2)
elif key == 'ssl_verify_mode':
log.debug('Found ssl_verify_mode: %r', value)
ssl_verify = (value.strip() != ':verify_none')
log.debug('ssl_verify = %s', ssl_verify)
else:
# Not a string, don't even try
log.debug('Value for {0} does not look like a string: {1}'.format(key, value))
continue
def _ruby_value(match):
expr = match.group(1).strip()
if expr == 'current_dir':
return os.path.dirname(path)
envmatch = cls.env_value_re.match(expr)
if envmatch:
envmatch = envmatch.group(1).strip('"').strip("'")
return os.environ.get(envmatch) or ''
log.debug('Unknown ruby expression in line "%s"', line)
raise UnknownRubyExpression
try:
value = cls.ruby_value_re.sub(_ruby_value, value)
except UnknownRubyExpression:
continue
if key == 'chef_server_url':
log.debug('Found URL: %r', value)
url = value
elif key == 'node_name':
log.debug('Found client name: %r', value)
client_name = value
elif key == 'client_key':
log.debug('Found key path: %r', value)
key_path = value
if not os.path.isabs(key_path):
# Relative paths are relative to the config file
key_path = os.path.abspath(os.path.join(os.path.dirname(path), key_path))
if not (url and client_name and key_path):
# No URL, no chance this was valid, try running Ruby
log.debug('No Chef server config found, trying Ruby parse')
url = key_path = client_name = None
proc = subprocess.Popen('ruby', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
script = config_ruby_script % path.replace('\\', '\\\\').replace("'", "\\'")
out, err = proc.communicate(script)
if proc.returncode == 0 and out.strip():
data = json.loads(out)
log.debug('Ruby parse succeeded with %r', data)
url = data.get('chef_server_url')
client_name = data.get('node_name')
key_path = data.get('client_key')
else:
log.debug('Ruby parse failed with exit code %s: %s', proc.returncode, out.strip())
if not url:
# Still no URL, can't use this config
log.debug('Still no Chef server URL found')
return
if not key_path:
# Try and use ./client.pem
key_path = os.path.join(os.path.dirname(path), 'client.pem')
if not os.path.isfile(key_path) or not os.access(key_path, os.R_OK):
# Can't read the client key
log.debug('Unable to read key file "%s"', key_path)
return
if not client_name:
client_name = socket.getfqdn()
return cls(url, key_path, client_name, ssl_verify=ssl_verify)
@staticmethod
def get_global():
"""Return the API on the top of the stack."""
while api_stack_value():
api = api_stack_value()[-1]()
if api is not None:
return api
del api_stack_value()[-1]
def set_default(self):
"""Make this the default API in the stack. Returns the old default if any."""
old = None
if api_stack_value():
old = api_stack_value().pop(0)
api_stack_value().insert(0, weakref.ref(self))
return old
def __enter__(self):
api_stack_value().append(weakref.ref(self))
return self
def __exit__(self, type, value, traceback):
del api_stack_value()[-1]
def _request(self, method, url, data, headers):
request = requests.api.request(method, url, headers=headers, data=data, verify=self.ssl_verify)
return request
def request(self, method, path, headers={}, data=None):
auth_headers = sign_request(key=self.key, http_method=method,
path=self.parsed_url.path+path.split('?', 1)[0], body=data,
host=self.parsed_url.netloc, timestamp=datetime.datetime.utcnow(),
user_id=self.client)
request_headers = {}
request_headers.update(self.headers)
request_headers.update(dict((k.lower(), v) for k, v in six.iteritems(headers)))
request_headers['x-chef-version'] = self.version
request_headers.update(auth_headers)
try:
response = self._request(method, self.url + path, data, dict(
(k.capitalize(), v) for k, v in six.iteritems(request_headers)))
except requests.ConnectionError as e:
raise ChefServerError(e.message)
if response.ok:
return response
raise ChefServerError.from_error(response.reason, code=response.status_code)
def api_request(self, method, path, headers={}, data=None):
headers = dict((k.lower(), v) for k, v in six.iteritems(headers))
headers['accept'] = 'application/json'
if data is not None:
headers['content-type'] = 'application/json'
data = json.dumps(data)
response = self.request(method, path, headers, data)
return response.json()
def __getitem__(self, path):
return self.api_request('GET', path)
def autoconfigure(base_path=None):
"""Try to find a knife or chef-client config file to load parameters from,
starting from either the given base path or the current working directory.
The lookup order mirrors the one from Chef, first all folders from the base
path are walked back looking for .chef/knife.rb, then ~/.chef/knife.rb,
and finally /etc/chef/client.rb.
The first file that is found and can be loaded successfully will be loaded
into a :class:`ChefAPI` object.
"""
base_path = base_path or os.getcwd()
# Scan up the tree for a knife.rb or client.rb. If that fails try looking
# in /etc/chef. The /etc/chef check will never work in Win32, but it doesn't
# hurt either.
for path in walk_backwards(base_path):
config_path = os.path.join(path, '.chef', 'knife.rb')
api = ChefAPI.from_config_file(config_path)
if api is not None:
return api
# The walk didn't work, try ~/.chef/knife.rb
config_path = os.path.expanduser(os.path.join('~', '.chef', 'knife.rb'))
api = ChefAPI.from_config_file(config_path)
if api is not None:
return api
# Nothing in the home dir, try /etc/chef/client.rb
config_path = os.path.join(os.path.sep, 'etc', 'chef', 'client.rb')
api = ChefAPI.from_config_file(config_path)
if api is not None:
return api
|
|
import os
import pytest
import requests
import subprocess
import signal
from os.path import (
abspath,
basename,
dirname,
exists,
join,
relpath,
split,
splitext,
)
from tests.plugins.upload_to_s3 import S3_URL
from tests.plugins.utils import (
info,
ok,
red,
warn,
write,
yellow,
)
from tests.plugins.image_diff import process_image_diff
from tests.plugins.phantomjs_screenshot import get_phantomjs_screenshot
from .collect_examples import example_dir
from .utils import (
deal_with_output_cells,
get_example_pngs,
no_ext,
)
@pytest.mark.examples
def test_server_examples(server_example, bokeh_server, diff, log_file):
# Note this is currently broken - server uses random sessions but we're
# calling for "default" here - this has been broken for a while.
# https://github.com/bokeh/bokeh/issues/3897
url = '%s/?bokeh-session-id=%s' % (bokeh_server, basename(no_ext(server_example)))
assert _run_example(server_example, log_file) == 0, 'Example did not run'
_assert_snapshot(server_example, url, 'server', diff)
if diff:
_get_pdiff(server_example, diff)
@pytest.mark.examples
def test_notebook_examples(notebook_example, jupyter_notebook, diff):
notebook_port = pytest.config.option.notebook_port
url_path = join(*_get_path_parts(abspath(notebook_example)))
url = 'http://localhost:%d/notebooks/%s' % (notebook_port, url_path)
assert deal_with_output_cells(notebook_example), 'Notebook failed'
_assert_snapshot(notebook_example, url, 'notebook', diff)
if diff:
_get_pdiff(notebook_example, diff)
@pytest.mark.examples
def test_file_examples(file_example, diff, log_file):
html_file = "%s.html" % no_ext(file_example)
url = 'file://' + html_file
assert _run_example(file_example, log_file) == 0, 'Example did not run'
_assert_snapshot(file_example, url, 'file', diff)
if diff:
_get_pdiff(file_example, diff)
def _get_pdiff(example, diff):
test_png, ref_png, diff_png = get_example_pngs(example, diff)
info("generated image: " + test_png)
retrieved_reference_image = _get_reference_image_from_s3(example, diff)
if retrieved_reference_image:
ref_png_path = dirname(ref_png)
if not exists(ref_png_path):
os.makedirs(ref_png_path)
with open(ref_png, "wb") as f:
f.write(retrieved_reference_image)
info("saved reference: " + ref_png)
code = process_image_diff(diff_png, test_png, ref_png)
if code != 0:
warn("generated and reference images differ")
warn("diff: " + diff_png)
else:
ok("generated and reference images match")
def _get_path_parts(path):
parts = []
while True:
newpath, tail = split(path)
parts.append(tail)
path = newpath
if tail == 'examples':
break
parts.reverse()
return parts
def _print_phantomjs_output(result):
errors = result['errors']
messages = result['messages']
resources = result['resources']
for message in messages:
msg = message['msg']
line = message.get('line')
source = message.get('source')
if source is None:
write(msg)
elif line is None:
write("%s: %s" % (source, msg))
else:
write("%s:%s: %s" % (source, line, msg))
# Process resources
for resource in resources:
url = resource['url']
if url.endswith(".png"):
ok("%s: %s (%s)" % (url, yellow(resource['status']), resource['statusText']))
else:
warn("Resource error:: %s: %s (%s)" % (url, red(resource['status']), resource['statusText']))
# You can have a successful test, and still have errors reported, so not failing here.
for error in errors:
warn("%s: %s" % (red("PhatomJS Error: "), error['msg']))
for item in error['trace']:
write(" %s: %d" % (item['file'], item['line']))
def _assert_snapshot(example, url, example_type, diff):
# Get setup datapoints
screenshot_path, _, _ = get_example_pngs(example, diff)
if example_type == 'notebook':
wait = pytest.config.option.notebook_phantom_wait * 1000
height = 2000
else:
wait = 1000
height = 1000
result = get_phantomjs_screenshot(url, screenshot_path, wait, height=height)
status = result['status']
errors = result['errors']
messages = result['messages']
resources = result['resources']
if status != 'success':
assert False, "PhantomJS did not succeed: %s | %s | %s" % (errors, messages, resources)
else:
if pytest.config.option.verbose:
_print_phantomjs_output(result)
assert True
def _get_reference_image_from_s3(example, diff):
example_path = relpath(splitext(example)[0], example_dir)
ref_loc = join(diff, example_path + ".png")
ref_url = join(S3_URL, ref_loc)
response = requests.get(ref_url)
if not response.ok:
info("reference image %s doesn't exist" % ref_url)
return None
return response.content
def _run_example(example, log_file):
example_path = join(example_dir, example)
code = """\
__file__ = filename = '%s'
import random
random.seed(1)
import numpy as np
np.random.seed(1)
with open(filename, 'rb') as example:
exec(compile(example.read(), filename, 'exec'))
""" % example_path
cmd = ["python", "-c", code]
cwd = dirname(example_path)
env = os.environ.copy()
env['BOKEH_RESOURCES'] = 'relative'
env['BOKEH_BROWSER'] = 'none'
class Timeout(Exception):
pass
def alarm_handler(sig, frame):
raise Timeout
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(10)
try:
proc = subprocess.Popen(cmd, cwd=cwd, env=env, stdout=log_file, stderr=log_file)
return proc.wait()
except Timeout:
warn("Timeout - Example timed out when attempting to run")
proc.kill()
return 0
finally:
signal.alarm(0)
|
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
import array
import contextlib
import inspect
import traceback
import warnings
from xcffib.xproto import EventMask, StackMode, SetMode
import xcffib.xproto
from . import command
from . import utils
from . import hook
from .log_utils import logger
# ICCM Constants
NoValue = 0x0000
XValue = 0x0001
YValue = 0x0002
WidthValue = 0x0004
HeightValue = 0x0008
AllValues = 0x000F
XNegative = 0x0010
YNegative = 0x0020
USPosition = (1 << 0)
USSize = (1 << 1)
PPosition = (1 << 2)
PSize = (1 << 3)
PMinSize = (1 << 4)
PMaxSize = (1 << 5)
PResizeInc = (1 << 6)
PAspect = (1 << 7)
PBaseSize = (1 << 8)
PWinGravity = (1 << 9)
PAllHints = (PPosition | PSize | PMinSize | PMaxSize | PResizeInc | PAspect)
InputHint = (1 << 0)
StateHint = (1 << 1)
IconPixmapHint = (1 << 2)
IconWindowHint = (1 << 3)
IconPositionHint = (1 << 4)
IconMaskHint = (1 << 5)
WindowGroupHint = (1 << 6)
MessageHint = (1 << 7)
UrgencyHint = (1 << 8)
AllHints = (InputHint | StateHint | IconPixmapHint | IconWindowHint |
IconPositionHint | IconMaskHint | WindowGroupHint | MessageHint |
UrgencyHint)
WithdrawnState = 0
DontCareState = 0
NormalState = 1
ZoomState = 2
IconicState = 3
InactiveState = 4
RectangleOut = 0
RectangleIn = 1
RectanglePart = 2
VisualNoMask = 0x0
VisualIDMask = 0x1
VisualScreenMask = 0x2
VisualDepthMask = 0x4
VisualClassMask = 0x8
VisualRedMaskMask = 0x10
VisualGreenMaskMask = 0x20
VisualBlueMaskMask = 0x40
VisualColormapSizeMask = 0x80
VisualBitsPerRGBMask = 0x100
VisualAllMask = 0x1FF
ReleaseByFreeingColormap = 1
BitmapSuccess = 0
BitmapOpenFailed = 1
BitmapFileInvalid = 2
BitmapNoMemory = 3
XCSUCCESS = 0
XCNOMEM = 1
XCNOENT = 2
# float states
NOT_FLOATING = 1 # not floating
FLOATING = 2
MAXIMIZED = 3
FULLSCREEN = 4
TOP = 5
MINIMIZED = 6
_NET_WM_STATE_REMOVE = 0
_NET_WM_STATE_ADD = 1
_NET_WM_STATE_TOGGLE = 2
def _geometry_getter(attr):
def get_attr(self):
if getattr(self, "_" + attr) is None:
g = self.window.get_geometry()
# trigger the geometry setter on all these
self.x = g.x
self.y = g.y
self.width = g.width
self.height = g.height
return getattr(self, "_" + attr)
return get_attr
def _geometry_setter(attr):
def f(self, value):
if not isinstance(value, int):
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
logger.error("!!!! setting %s to a non-int %s; please report this!", attr, value)
logger.error(''.join(stack_trace[:-1]))
value = int(value)
setattr(self, "_" + attr, value)
return f
def _float_getter(attr):
def getter(self):
if self._float_info[attr] is not None:
return self._float_info[attr]
# we don't care so much about width or height, if not set, default to the window width/height
if attr in ('width', 'height'):
return getattr(self, attr)
raise AttributeError("Floating not yet configured yet")
return getter
def _float_setter(attr):
def setter(self, value):
self._float_info[attr] = value
return setter
class _Window(command.CommandObject):
_windowMask = 0 # override in child class
def __init__(self, window, qtile):
self.window, self.qtile = window, qtile
self.hidden = True
self.group = None
self.icons = {}
window.set_attribute(eventmask=self._windowMask)
self._float_info = {
'x': None,
'y': None,
'width': None,
'height': None,
}
try:
g = self.window.get_geometry()
self._x = g.x
self._y = g.y
self._width = g.width
self._height = g.height
self._float_info['width'] = g.width
self._float_info['height'] = g.height
except xcffib.xproto.DrawableError:
# Whoops, we were too early, so let's ignore it for now and get the
# values on demand.
self._x = None
self._y = None
self._width = None
self._height = None
self.borderwidth = 0
self.bordercolor = None
self.name = "<no name>"
self.strut = None
self.state = NormalState
self.window_type = "normal"
self._float_state = NOT_FLOATING
self._demands_attention = False
self.hints = {
'input': True,
'icon_pixmap': None,
'icon_window': None,
'icon_x': 0,
'icon_y': 0,
'icon_mask': 0,
'window_group': None,
'urgent': False,
# normal or size hints
'width_inc': None,
'height_inc': None,
'base_width': 0,
'base_height': 0,
}
self.updateHints()
x = property(fset=_geometry_setter("x"), fget=_geometry_getter("x"))
y = property(fset=_geometry_setter("y"), fget=_geometry_getter("y"))
@property
def width(self):
return _geometry_getter("width")(self)
@width.setter
def width(self, value):
_geometry_setter("width")(self, value)
@property
def height(self):
return _geometry_getter("height")(self)
@height.setter
def height(self, value):
_geometry_setter("height")(self, value)
float_x = property(
fset=_float_setter("x"),
fget=_float_getter("x")
)
float_y = property(
fset=_float_setter("y"),
fget=_float_getter("y")
)
float_width = property(
fset=_float_setter("width"),
fget=_float_getter("width")
)
float_height = property(
fset=_float_setter("height"),
fget=_float_getter("height")
)
@property
def has_focus(self):
return self == self.qtile.currentWindow
def updateName(self):
try:
self.name = self.window.get_name()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
hook.fire('client_name_updated', self)
def updateHints(self):
"""Update the local copy of the window's WM_HINTS
See http://tronche.com/gui/x/icccm/sec-4.html#WM_HINTS
"""
try:
h = self.window.get_wm_hints()
normh = self.window.get_wm_normal_hints()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
# FIXME
# h values
# {
# 'icon_pixmap': 4194337,
# 'icon_window': 0,
# 'icon_mask': 4194340,
# 'icon_y': 0,
# 'input': 1,
# 'icon_x': 0,
# 'window_group': 4194305
# 'initial_state': 1,
# 'flags': set(['StateHint',
# 'IconMaskHint',
# 'WindowGroupHint',
# 'InputHint',
# 'UrgencyHint',
# 'IconPixmapHint']),
# }
if normh:
normh.pop('flags')
normh['min_width'] = max(0, normh.get('min_width', 0))
normh['min_height'] = max(0, normh.get('min_height', 0))
if not normh['base_width'] and \
normh['min_width'] and \
normh['width_inc']:
# seems xcffib does ignore base width :(
normh['base_width'] = (
normh['min_width'] % normh['width_inc']
)
if not normh['base_height'] and \
normh['min_height'] and \
normh['height_inc']:
# seems xcffib does ignore base height :(
normh['base_height'] = (
normh['min_height'] % normh['height_inc']
)
self.hints.update(normh)
if h and 'UrgencyHint' in h['flags']:
if self.qtile.currentWindow != self:
self.hints['urgent'] = True
hook.fire('client_urgent_hint_changed', self)
elif self.urgent:
self.hints['urgent'] = False
hook.fire('client_urgent_hint_changed', self)
if getattr(self, 'group', None):
self.group.layoutAll()
return
def updateState(self):
triggered = ['urgent']
if self.qtile.config.auto_fullscreen:
triggered.append('fullscreen')
state = self.window.get_net_wm_state()
logger.debug('_NET_WM_STATE: %s', state)
for s in triggered:
setattr(self, s, (s in state))
@property
def urgent(self):
return self.hints['urgent'] or self._demands_attention
@urgent.setter
def urgent(self, val):
self._demands_attention = val
# TODO unset window hint as well?
if not val:
self.hints['urgent'] = False
def info(self):
if self.group:
group = self.group.name
else:
group = None
return dict(
name=self.name,
x=self.x,
y=self.y,
width=self.width,
height=self.height,
group=group,
id=self.window.wid,
floating=self._float_state != NOT_FLOATING,
float_info=self._float_info,
maximized=self._float_state == MAXIMIZED,
minimized=self._float_state == MINIMIZED,
fullscreen=self._float_state == FULLSCREEN
)
@property
def state(self):
return self.window.get_wm_state()[0]
@state.setter
def state(self, val):
if val in (WithdrawnState, NormalState, IconicState):
self.window.set_property('WM_STATE', [val, 0])
def setOpacity(self, opacity):
if 0.0 <= opacity <= 1.0:
real_opacity = int(opacity * 0xffffffff)
self.window.set_property('_NET_WM_WINDOW_OPACITY', real_opacity)
else:
return
def getOpacity(self):
opacity = self.window.get_property(
"_NET_WM_WINDOW_OPACITY", unpack=int
)
if not opacity:
return 1.0
else:
value = opacity[0]
# 2 decimal places
as_float = round(value / 0xffffffff, 2)
return as_float
opacity = property(getOpacity, setOpacity)
def kill(self):
if "WM_DELETE_WINDOW" in self.window.get_wm_protocols():
data = [
self.qtile.conn.atoms["WM_DELETE_WINDOW"],
xcffib.xproto.Time.CurrentTime,
0,
0,
0
]
u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5)
e = xcffib.xproto.ClientMessageEvent.synthetic(
format=32,
window=self.window.wid,
type=self.qtile.conn.atoms["WM_PROTOCOLS"],
data=u
)
self.window.send_event(e)
else:
self.window.kill_client()
self.qtile.conn.flush()
def hide(self):
# We don't want to get the UnmapNotify for this unmap
with self.disableMask(xcffib.xproto.EventMask.StructureNotify):
self.window.unmap()
self.hidden = True
def unhide(self):
self.window.map()
self.state = NormalState
self.hidden = False
@contextlib.contextmanager
def disableMask(self, mask):
self._disableMask(mask)
yield
self._resetMask()
def _disableMask(self, mask):
self.window.set_attribute(
eventmask=self._windowMask & (~mask)
)
def _resetMask(self):
self.window.set_attribute(
eventmask=self._windowMask
)
def place(self, x, y, width, height, borderwidth, bordercolor,
above=False, force=False, margin=None):
"""Places the window at the specified location with the given size.
If force is false, than it tries to obey hints
"""
# TODO: self.x/y/height/width are updated BEFORE
# place is called, so there's no way to know if only
# the position is changed, so we are sending
# the ConfigureNotify every time place is called
#
# # if position change and size don't
# # send a configure notify. See ICCCM 4.2.3
# send_notify = False
# if (self.x != x or self.y != y) and \
# (self.width == width and self.height == height):
# send_notify = True
# #for now, we just:
send_notify = True
# Adjust the placement to account for layout margins, if there are any.
if margin is not None:
x += margin
y += margin
width -= margin * 2
height -= margin * 2
# save x and y float offset
if self.group is not None and self.group.screen is not None:
self.float_x = x - self.group.screen.x
self.float_y = y - self.group.screen.y
self.x = x
self.y = y
self.width = width
self.height = height
self.borderwidth = borderwidth
self.bordercolor = bordercolor
kwarg = dict(
x=x,
y=y,
width=width,
height=height,
borderwidth=borderwidth,
)
if above:
kwarg['stackmode'] = StackMode.Above
self.window.configure(**kwarg)
if send_notify:
self.send_configure_notify(x, y, width, height)
if bordercolor is not None:
self.window.set_attribute(borderpixel=bordercolor)
def send_configure_notify(self, x, y, width, height):
"""Send a synthetic ConfigureNotify"""
window = self.window.wid
above_sibling = False
override_redirect = False
event = xcffib.xproto.ConfigureNotifyEvent.synthetic(
event=window,
window=window,
above_sibling=above_sibling,
x=x,
y=y,
width=width,
height=height,
border_width=self.borderwidth,
override_redirect=override_redirect
)
self.window.send_event(event, mask=EventMask.StructureNotify)
def can_steal_focus(self):
return self.window.get_wm_type() != 'notification'
def focus(self, warp):
# Workaround for misbehaving java applications (actually it might be
# qtile who misbehaves by not implementing some X11 protocol correctly)
#
# See this xmonad issue for more information on the problem:
# http://code.google.com/p/xmonad/issues/detail?id=177
#
# 'sun-awt-X11-XFramePeer' is a main window of a java application.
# Only send WM_TAKE_FOCUS not FocusIn
# 'sun-awt-X11-XDialogPeer' is a dialog of a java application. Do not
# send any event.
cls = self.window.get_wm_class() or ''
is_java_main = 'sun-awt-X11-XFramePeer' in cls
is_java_dialog = 'sun-awt-X11-XDialogPeer' in cls
is_java = is_java_main or is_java_dialog
if not self.hidden:
# Never send TAKE_FOCUS on java *dialogs*
if not is_java_dialog and \
"WM_TAKE_FOCUS" in self.window.get_wm_protocols():
data = [
self.qtile.conn.atoms["WM_TAKE_FOCUS"],
xcffib.xproto.Time.CurrentTime,
0,
0,
0
]
u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5)
e = xcffib.xproto.ClientMessageEvent.synthetic(
format=32,
window=self.window.wid,
type=self.qtile.conn.atoms["WM_PROTOCOLS"],
data=u
)
self.window.send_event(e)
# Never send FocusIn to java windows
if not is_java and self.hints['input']:
self.window.set_input_focus()
try:
if warp and self.qtile.config.cursor_warp:
self.window.warp_pointer(self.width // 2, self.height // 2)
except AttributeError:
pass
if self.urgent:
self.urgent = False
atom = self.qtile.conn.atoms["_NET_WM_STATE_DEMANDS_ATTENTION"]
state = list(self.window.get_property('_NET_WM_STATE', 'ATOM', unpack=int))
if atom in state:
state.remove(atom)
self.window.set_property('_NET_WM_STATE', state)
self.qtile.root.set_property("_NET_ACTIVE_WINDOW", self.window.wid)
hook.fire("client_focus", self)
def _items(self, name):
return None
def _select(self, name, sel):
return None
def cmd_focus(self, warp=None):
"""Focuses the window."""
if warp is None:
warp = self.qtile.config.cursor_warp
self.focus(warp=warp)
def cmd_info(self):
"""Returns a dictionary of info for this object"""
return self.info()
def cmd_inspect(self):
"""Tells you more than you ever wanted to know about a window"""
a = self.window.get_attributes()
attrs = {
"backing_store": a.backing_store,
"visual": a.visual,
"class": a._class,
"bit_gravity": a.bit_gravity,
"win_gravity": a.win_gravity,
"backing_planes": a.backing_planes,
"backing_pixel": a.backing_pixel,
"save_under": a.save_under,
"map_is_installed": a.map_is_installed,
"map_state": a.map_state,
"override_redirect": a.override_redirect,
# "colormap": a.colormap,
"all_event_masks": a.all_event_masks,
"your_event_mask": a.your_event_mask,
"do_not_propagate_mask": a.do_not_propagate_mask
}
props = self.window.list_properties()
normalhints = self.window.get_wm_normal_hints()
hints = self.window.get_wm_hints()
protocols = []
for i in self.window.get_wm_protocols():
protocols.append(i)
state = self.window.get_wm_state()
return dict(
attributes=attrs,
properties=props,
name=self.window.get_name(),
wm_class=self.window.get_wm_class(),
wm_window_role=self.window.get_wm_window_role(),
wm_type=self.window.get_wm_type(),
wm_transient_for=self.window.get_wm_transient_for(),
protocols=protocols,
wm_icon_name=self.window.get_wm_icon_name(),
wm_client_machine=self.window.get_wm_client_machine(),
normalhints=normalhints,
hints=hints,
state=state,
float_info=self._float_info
)
class Internal(_Window):
"""An internal window, that should not be managed by qtile"""
_windowMask = EventMask.StructureNotify | \
EventMask.PropertyChange | \
EventMask.EnterWindow | \
EventMask.FocusChange | \
EventMask.Exposure | \
EventMask.ButtonPress | \
EventMask.ButtonRelease | \
EventMask.KeyPress
@classmethod
def create(cls, qtile, x, y, width, height, opacity=1.0):
win = qtile.conn.create_window(x, y, width, height)
win.set_property("QTILE_INTERNAL", 1)
i = Internal(win, qtile)
i.place(x, y, width, height, 0, None)
i.opacity = opacity
return i
def __repr__(self):
return "Internal(%r, %s)" % (self.name, self.window.wid)
def kill(self):
self.qtile.conn.conn.core.DestroyWindow(self.window.wid)
def cmd_kill(self):
self.kill()
class Static(_Window):
"""An internal window, that should not be managed by qtile"""
_windowMask = EventMask.StructureNotify | \
EventMask.PropertyChange | \
EventMask.EnterWindow | \
EventMask.FocusChange | \
EventMask.Exposure
def __init__(self, win, qtile, screen,
x=None, y=None, width=None, height=None):
_Window.__init__(self, win, qtile)
self.updateName()
self.conf_x = x
self.conf_y = y
self.conf_width = width
self.conf_height = height
self.x = x or 0
self.y = y or 0
self.width = width or 0
self.height = height or 0
self.screen = screen
if None not in (x, y, width, height):
self.place(x, y, width, height, 0, 0)
self.update_strut()
def handle_ConfigureRequest(self, e):
cw = xcffib.xproto.ConfigWindow
if self.conf_x is None and e.value_mask & cw.X:
self.x = e.x
if self.conf_y is None and e.value_mask & cw.Y:
self.y = e.y
if self.conf_width is None and e.value_mask & cw.Width:
self.width = e.width
if self.conf_height is None and e.value_mask & cw.Height:
self.height = e.height
self.place(
self.screen.x + self.x,
self.screen.y + self.y,
self.width,
self.height,
self.borderwidth,
self.bordercolor
)
return False
def update_strut(self):
strut = self.window.get_property(
"_NET_WM_STRUT_PARTIAL",
unpack=int
)
strut = strut or self.window.get_property(
"_NET_WM_STRUT",
unpack=int
)
strut = strut or (0, 0, 0, 0)
self.qtile.update_gaps(strut, self.strut)
self.strut = strut
def handle_PropertyNotify(self, e):
name = self.qtile.conn.atoms.get_name(e.atom)
if name in ("_NET_WM_STRUT_PARTIAL", "_NET_WM_STRUT"):
self.update_strut()
def __repr__(self):
return "Static(%r)" % self.name
class Window(_Window):
_windowMask = EventMask.StructureNotify | \
EventMask.PropertyChange | \
EventMask.EnterWindow | \
EventMask.FocusChange
# Set when this object is being retired.
defunct = False
def __init__(self, window, qtile):
_Window.__init__(self, window, qtile)
self._group = None
self.updateName()
# add to group by position according to _NET_WM_DESKTOP property
group = None
index = window.get_wm_desktop()
if index is not None and index < len(qtile.groups):
group = qtile.groups[index]
elif index is None:
transient_for = window.get_wm_transient_for()
win = qtile.windowMap.get(transient_for)
if win is not None:
group = win._group
if group is not None:
group.add(self)
self._group = group
if group != qtile.currentScreen.group:
self.hide()
# add window to the save-set, so it gets mapped when qtile dies
qtile.conn.conn.core.ChangeSaveSet(SetMode.Insert, self.window.wid)
self.update_wm_net_icon()
@property
def group(self):
return self._group
@group.setter
def group(self, group):
if group:
try:
self.window.set_property(
"_NET_WM_DESKTOP",
self.qtile.groups.index(group)
)
except xcffib.xproto.WindowError:
logger.exception("whoops, got error setting _NET_WM_DESKTOP, too early?")
self._group = group
@property
def edges(self):
return (self.x, self.y, self.x + self.width, self.y + self.height)
@property
def floating(self):
return self._float_state != NOT_FLOATING
@floating.setter
def floating(self, do_float):
if do_float and self._float_state == NOT_FLOATING:
if self.group and self.group.screen:
screen = self.group.screen
self._enablefloating(
screen.x + self.float_x, screen.y + self.float_y, self.float_width, self.float_height
)
else:
# if we are setting floating early, e.g. from a hook, we don't have a screen yet
self._float_state = FLOATING
elif (not do_float) and self._float_state != NOT_FLOATING:
if self._float_state == FLOATING:
# store last size
self.float_width = self.width
self.float_height = self.height
self._float_state = NOT_FLOATING
self.group.mark_floating(self, False)
hook.fire('float_change')
def toggle_floating(self):
self.floating = not self.floating
def togglefloating(self):
warnings.warn("togglefloating is deprecated, use toggle_floating", DeprecationWarning)
self.toggle_floating()
def enablefloating(self):
warnings.warn("enablefloating is deprecated, use floating=True", DeprecationWarning)
self.floating = True
def disablefloating(self):
warnings.warn("disablefloating is deprecated, use floating=False", DeprecationWarning)
self.floating = False
@property
def fullscreen(self):
return self._float_state == FULLSCREEN
@fullscreen.setter
def fullscreen(self, do_full):
atom = set([self.qtile.conn.atoms["_NET_WM_STATE_FULLSCREEN"]])
prev_state = set(self.window.get_property('_NET_WM_STATE', 'ATOM', unpack=int))
def set_state(old_state, new_state):
if new_state != old_state:
self.window.set_property('_NET_WM_STATE', list(new_state))
if do_full:
screen = self.group.screen or \
self.qtile.find_closest_screen(self.x, self.y)
self._enablefloating(
screen.x,
screen.y,
screen.width,
screen.height,
new_float_state=FULLSCREEN
)
set_state(prev_state, prev_state | atom)
return
if self._float_state == FULLSCREEN:
# The order of calling set_state() and then
# setting self.floating = False is important
set_state(prev_state, prev_state - atom)
self.floating = False
return
def toggle_fullscreen(self):
self.fullscreen = not self.fullscreen
def togglefullscreen(self):
warnings.warn("togglefullscreen is deprecated, use toggle_fullscreen", DeprecationWarning)
self.toggle_fullscreen()
@property
def maximized(self):
return self._float_state == MAXIMIZED
@maximized.setter
def maximized(self, do_maximize):
if do_maximize:
screen = self.group.screen or \
self.qtile.find_closest_screen(self.x, self.y)
self._enablefloating(
screen.dx,
screen.dy,
screen.dwidth,
screen.dheight,
new_float_state=MAXIMIZED
)
else:
if self._float_state == MAXIMIZED:
self.floating = False
def enablemaximize(self, state=MAXIMIZED):
warnings.warn("enablemaximize is deprecated, use maximized=True", DeprecationWarning)
self.maximized = True
def toggle_maximize(self, state=MAXIMIZED):
self.maximized = not self.maximized
def togglemaximize(self):
warnings.warn("togglemaximize is deprecated, use toggle_maximize", DeprecationWarning)
self.toggle_maximize()
@property
def minimized(self):
return self._float_state == MINIMIZED
@minimized.setter
def minimized(self, do_minimize):
if do_minimize:
if self._float_state != MINIMIZED:
self._enablefloating(new_float_state=MINIMIZED)
else:
if self._float_state == MINIMIZED:
self.floating = False
def enableminimize(self):
warnings.warn("enableminimized is deprecated, use minimized=True", DeprecationWarning)
self.minimized = True
def toggle_minimize(self):
self.minimized = not self.minimized
def toggleminimize(self):
warnings.warn("toggleminimize is deprecated, use toggle_minimize", DeprecationWarning)
self.toggle_minimize()
def static(self, screen, x=None, y=None, width=None, height=None):
"""Makes this window a static window, attached to a Screen
If any of the arguments are left unspecified, the values given by the
window itself are used instead. So, for a window that's aware of its
appropriate size and location (like dzen), you don't have to specify
anything.
"""
self.defunct = True
screen = self.qtile.screens[screen]
if self.group:
self.group.remove(self)
s = Static(self.window, self.qtile, screen, x, y, width, height)
self.qtile.windowMap[self.window.wid] = s
hook.fire("client_managed", s)
return s
def tweak_float(self, x=None, y=None, dx=0, dy=0,
w=None, h=None, dw=0, dh=0):
print(x, y)
if x is not None:
self.x = x
self.x += dx
if y is not None:
self.y = y
self.y += dy
if w is not None:
self.width = w
self.width += dw
if h is not None:
self.height = h
self.height += dh
if self.height < 0:
self.height = 0
if self.width < 0:
self.width = 0
screen = self.qtile.find_closest_screen(self.x, self.y)
if self.group and screen is not None and screen != self.group.screen:
self.group.remove(self, force=True)
screen.group.add(self, force=True)
self.qtile.toScreen(screen.index)
self._reconfigure_floating()
def getsize(self):
return (self.width, self.height)
def getposition(self):
return (self.x, self.y)
def _reconfigure_floating(self, new_float_state=FLOATING):
if new_float_state == MINIMIZED:
self.state = IconicState
self.hide()
else:
width = max(self.width, self.hints.get('min_width', 0))
height = max(self.height, self.hints.get('min_height', 0))
if self.hints['base_width'] and self.hints['width_inc']:
width -= (width - self.hints['base_width']) % self.hints['width_inc']
if self.hints['base_height'] and self.hints['height_inc']:
height -= (height - self.hints['base_height']) % self.hints['height_inc']
print("placing", self.x, self.y, width, height)
self.place(
self.x, self.y,
width, height,
self.borderwidth,
self.bordercolor,
above=True,
)
if self._float_state != new_float_state:
self._float_state = new_float_state
if self.group: # may be not, if it's called from hook
self.group.mark_floating(self, True)
hook.fire('float_change')
def _enablefloating(self, x=None, y=None, w=None, h=None,
new_float_state=FLOATING):
if new_float_state != MINIMIZED:
self.x = x
self.y = y
self.width = w
self.height = h
self._reconfigure_floating(new_float_state=new_float_state)
def togroup(self, groupName=None):
"""Move window to a specified group"""
if groupName is None:
group = self.qtile.currentGroup
else:
group = self.qtile.groupMap.get(groupName)
if group is None:
raise command.CommandError("No such group: %s" % groupName)
if self.group is not group:
self.hide()
if self.group:
if self.group.screen:
# for floats remove window offset
self.x -= self.group.screen.x
self.group.remove(self)
if group.screen and self.x < group.screen.x:
self.x += group.screen.x
group.add(self)
def toscreen(self, index=None):
""" Move window to a specified screen, or the current screen. """
if index is None:
screen = self.qtile.currentScreen
else:
try:
screen = self.qtile.screens[index]
except IndexError:
raise command.CommandError('No such screen: %d' % index)
self.togroup(screen.group.name)
def match(self, wname=None, wmclass=None, role=None):
"""Match window against given attributes.
Parameters
==========
wname :
matches against the window name or title, that is, either
``_NET_WM_VISIBLE_NAME``, ``_NET_WM_NAME``, ``WM_NAME``.
wmclass :
matches against any of the two values in the ``WM_CLASS`` property
role :
matches against the ``WM_WINDOW_ROLE`` property
"""
if not (wname or wmclass or role):
raise TypeError(
"Either a name, a wmclass or a role must be specified"
)
if wname and wname == self.name:
return True
try:
cliclass = self.window.get_wm_class()
if wmclass and cliclass and wmclass in cliclass:
return True
clirole = self.window.get_wm_window_role()
if role and clirole and role == clirole:
return True
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return False
return False
def handle_EnterNotify(self, e):
hook.fire("client_mouse_enter", self)
if self.qtile.config.follow_mouse_focus and \
self.group.currentWindow != self:
self.group.focus(self, False)
if self.group.screen and \
self.qtile.currentScreen != self.group.screen and \
self.qtile.config.follow_mouse_focus:
self.qtile.toScreen(self.group.screen.index, False)
return True
def handle_ConfigureRequest(self, e):
if self.qtile._drag and self.qtile.currentWindow == self:
# ignore requests while user is dragging window
return
if getattr(self, 'floating', False):
# only obey resize for floating windows
cw = xcffib.xproto.ConfigWindow
width = e.width if e.value_mask & cw.Width else self.width
height = e.height if e.value_mask & cw.Height else self.height
x = e.x if e.value_mask & cw.X else self.x
y = e.y if e.value_mask & cw.Y else self.y
else:
width, height, x, y = self.width, self.height, self.x, self.y
if self.group and self.group.screen:
self.place(
x, y,
width, height,
self.borderwidth, self.bordercolor,
)
self.updateState()
return False
def update_wm_net_icon(self):
"""Set a dict with the icons of the window"""
icon = self.window.get_property('_NET_WM_ICON', 'CARDINAL')
if not icon:
return
icon = list(map(ord, icon.value))
icons = {}
while True:
if not icon:
break
size = icon[:8]
if len(size) != 8 or not size[0] or not size[4]:
break
icon = icon[8:]
width = size[0]
height = size[4]
next_pix = width * height * 4
data = icon[:next_pix]
arr = array.array("B", data)
for i in range(0, len(arr), 4):
mult = arr[i + 3] / 255.
arr[i + 0] = int(arr[i + 0] * mult)
arr[i + 1] = int(arr[i + 1] * mult)
arr[i + 2] = int(arr[i + 2] * mult)
icon = icon[next_pix:]
icons["%sx%s" % (width, height)] = arr
self.icons = icons
hook.fire("net_wm_icon_change", self)
def handle_ClientMessage(self, event):
atoms = self.qtile.conn.atoms
opcode = event.type
data = event.data
if atoms["_NET_WM_STATE"] == opcode:
prev_state = self.window.get_property(
'_NET_WM_STATE',
'ATOM',
unpack=int
)
current_state = set(prev_state)
action = data.data32[0]
for prop in (data.data32[1], data.data32[2]):
if not prop:
# skip 0
continue
if action == _NET_WM_STATE_REMOVE:
current_state.discard(prop)
elif action == _NET_WM_STATE_ADD:
current_state.add(prop)
elif action == _NET_WM_STATE_TOGGLE:
current_state ^= set([prop]) # toggle :D
self.window.set_property('_NET_WM_STATE', list(current_state))
elif atoms["_NET_ACTIVE_WINDOW"] == opcode:
source = data.data32[0]
if source == 2: # XCB_EWMH_CLIENT_SOURCE_TYPE_NORMAL
logger.info("Focusing window by pager")
self.qtile.currentScreen.setGroup(self.group)
self.group.focus(self)
else: # XCB_EWMH_CLIENT_SOURCE_TYPE_OTHER
focus_behavior = self.qtile.config.focus_on_window_activation
if focus_behavior == "focus":
logger.info("Focusing window")
self.qtile.currentScreen.setGroup(self.group)
self.group.focus(self)
elif focus_behavior == "smart" and self.group.screen and self.group.screen == self.qtile.currentScreen:
logger.info("Focusing window")
self.qtile.currentScreen.setGroup(self.group)
self.group.focus(self)
elif focus_behavior == "urgent" or (focus_behavior == "smart" and not self.group.screen):
logger.info("Setting urgent flag for window")
self.urgent = True
else:
logger.info("Ignoring focus request")
def handle_PropertyNotify(self, e):
name = self.qtile.conn.atoms.get_name(e.atom)
logger.debug("PropertyNotifyEvent: %s", name)
if name == "WM_TRANSIENT_FOR":
pass
elif name == "WM_HINTS":
self.updateHints()
elif name == "WM_NORMAL_HINTS":
self.updateHints()
elif name == "WM_NAME":
self.updateName()
elif name == "_NET_WM_NAME":
self.updateName()
elif name == "_NET_WM_VISIBLE_NAME":
self.updateName()
elif name == "WM_ICON_NAME":
pass
elif name == "_NET_WM_ICON_NAME":
pass
elif name == "_NET_WM_ICON":
self.update_wm_net_icon()
elif name == "ZOOM":
pass
elif name == "_NET_WM_WINDOW_OPACITY":
pass
elif name == "WM_STATE":
pass
elif name == "_NET_WM_STATE":
self.updateState()
elif name == "WM_PROTOCOLS":
pass
elif name == "_NET_WM_DESKTOP":
# Some windows set the state(fullscreen) when starts,
# updateState is here because the group and the screen
# are set when the property is emitted
# self.updateState()
self.updateState()
elif name == "_NET_WM_USER_TIME":
if not self.qtile.config.follow_mouse_focus and \
self.group.currentWindow != self:
self.group.focus(self, False)
else:
logger.info("Unknown window property: %s", name)
return False
def _items(self, name):
if name == "group":
return (True, None)
elif name == "layout":
return (True, list(range(len(self.group.layouts))))
elif name == "screen":
return (True, None)
def _select(self, name, sel):
if name == "group":
return self.group
elif name == "layout":
if sel is None:
return self.group.layout
else:
return utils.lget(self.group.layouts, sel)
elif name == "screen":
return self.group.screen
def __repr__(self):
return "Window(%r)" % self.name
def cmd_static(self, screen, x, y, width, height):
self.static(screen, x, y, width, height)
def cmd_kill(self):
"""Kill this window
Try to do this politely if the client support
this, otherwise be brutal.
"""
self.kill()
def cmd_togroup(self, groupName=None):
"""Move window to a specified group.
If groupName is not specified, we assume the current group
Examples
========
Move window to current group::
togroup()
Move window to group "a"::
togroup("a")
"""
self.togroup(groupName)
def cmd_toscreen(self, index=None):
"""Move window to a specified screen.
If index is not specified, we assume the current screen
Examples
========
Move window to current screen::
toscreen()
Move window to screen 0::
toscreen(0)
"""
self.toscreen(index)
def cmd_move_floating(self, dx, dy, curx, cury):
"""Move window by dx and dy"""
self.tweak_float(dx=dx, dy=dy)
def cmd_resize_floating(self, dw, dh, curx, cury):
"""Add dw and dh to size of window"""
self.tweak_float(dw=dw, dh=dh)
def cmd_set_position_floating(self, x, y, curx, cury):
"""Move window to x and y"""
self.tweak_float(x=x, y=y)
def cmd_set_size_floating(self, w, h, curx, cury):
"""Set window dimensions to w and h"""
self.tweak_float(w=w, h=h)
def cmd_get_position(self):
return self.getposition()
def cmd_get_size(self):
return self.getsize()
def cmd_toggle_floating(self):
self.toggle_floating()
def cmd_enable_floating(self):
self.floating = True
def cmd_disable_floating(self):
self.floating = False
def cmd_toggle_maximize(self):
self.toggle_maximize()
def cmd_enable_maximize(self):
self.maximize = True
def cmd_disable_maximize(self):
self.maximize = False
def cmd_toggle_fullscreen(self):
self.toggle_fullscreen()
def cmd_enable_fullscreen(self):
self.fullscreen = True
def cmd_disable_fullscreen(self):
self.fullscreen = False
def cmd_toggle_minimize(self):
self.toggle_minimize()
def cmd_enable_minimize(self):
self.minimize = True
def cmd_disable_minimize(self):
self.minimize = False
def cmd_bring_to_front(self):
if self.floating:
self.window.configure(stackmode=StackMode.Above)
else:
self._reconfigure_floating() # atomatically above
def cmd_match(self, *args, **kwargs):
return self.match(*args, **kwargs)
def cmd_opacity(self, opacity):
if opacity < .1:
self.opacity = .1
elif opacity > 1:
self.opacity = 1
else:
self.opacity = opacity
def cmd_down_opacity(self):
if self.opacity > .2:
# don't go completely clear
self.opacity -= .1
else:
self.opacity = .1
def cmd_up_opacity(self):
if self.opacity < .9:
self.opacity += .1
else:
self.opacity = 1
def _is_in_window(self, x, y, window):
return (window.edges[0] <= x <= window.edges[2] and
window.edges[1] <= y <= window.edges[3])
def cmd_set_position(self, dx, dy, curx, cury):
if self.floating:
self.tweak_float(dx, dy)
return
for window in self.group.windows:
if window == self or window.floating:
continue
if self._is_in_window(curx, cury, window):
clients = self.group.layout.clients
index1 = clients.index(self)
index2 = clients.index(window)
clients[index1], clients[index2] = clients[index2], clients[index1]
self.group.layout.focused = index2
self.group.layoutAll()
break
|
|
# +--------------------------------------------------------------------------+
# | Licensed Materials - Property of IBM |
# | |
# | (C) Copyright IBM Corporation 2008. |
# +--------------------------------------------------------------------------+
# | This module complies with SQLAlchemy 0.8 and is |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable |
# | law or agreed to in writing, software distributed under the License is |
# | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
# | KIND, either express or implied. See the License for the specific |
# | language governing permissions and limitations under the License. |
# +--------------------------------------------------------------------------+
# | Authors: Alex Pitigoi, Abhigyan Agrawal |
# | Contributors: Jaimy Azle, Mike Bayer |
# | Version: 0.3.x |
# +--------------------------------------------------------------------------+
"""Support for IBM DB2 database
"""
import datetime
from sqlalchemy import types as sa_types
from sqlalchemy import schema as sa_schema
from sqlalchemy.sql import compiler
from sqlalchemy.engine import default
from . import reflection as ibm_reflection
from sqlalchemy.types import BLOB, CHAR, CLOB, DATE, DATETIME, INTEGER,\
SMALLINT, BIGINT, DECIMAL, NUMERIC, REAL, TIME, TIMESTAMP,\
VARCHAR
# as documented from:
# http://publib.boulder.ibm.com/infocenter/db2luw/v9/index.jsp?topic=/com.ibm.db2.udb.doc/admin/r0001095.htm
RESERVED_WORDS = set(
['activate', 'disallow', 'locale', 'result', 'add', 'disconnect', 'localtime',
'result_set_locator', 'after', 'distinct', 'localtimestamp', 'return', 'alias',
'do', 'locator', 'returns', 'all', 'double', 'locators', 'revoke', 'allocate', 'drop',
'lock', 'right', 'allow', 'dssize', 'lockmax', 'rollback', 'alter', 'dynamic',
'locksize', 'routine', 'and', 'each', 'long', 'row', 'any', 'editproc', 'loop',
'row_number', 'as', 'else', 'maintained', 'rownumber', 'asensitive', 'elseif',
'materialized', 'rows', 'associate', 'enable', 'maxvalue', 'rowset', 'asutime',
'encoding', 'microsecond', 'rrn', 'at', 'encryption', 'microseconds', 'run',
'attributes', 'end', 'minute', 'savepoint', 'audit', 'end-exec', 'minutes', 'schema',
'authorization', 'ending', 'minvalue', 'scratchpad', 'aux', 'erase', 'mode', 'scroll',
'auxiliary', 'escape', 'modifies', 'search', 'before', 'every', 'month', 'second',
'begin', 'except', 'months', 'seconds', 'between', 'exception', 'new', 'secqty',
'binary', 'excluding', 'new_table', 'security', 'bufferpool', 'exclusive',
'nextval', 'select', 'by', 'execute', 'no', 'sensitive', 'cache', 'exists', 'nocache',
'sequence', 'call', 'exit', 'nocycle', 'session', 'called', 'explain', 'nodename',
'session_user', 'capture', 'external', 'nodenumber', 'set', 'cardinality',
'extract', 'nomaxvalue', 'signal', 'cascaded', 'fenced', 'nominvalue', 'simple',
'case', 'fetch', 'none', 'some', 'cast', 'fieldproc', 'noorder', 'source', 'ccsid',
'file', 'normalized', 'specific', 'char', 'final', 'not', 'sql', 'character', 'for',
'null', 'sqlid', 'check', 'foreign', 'nulls', 'stacked', 'close', 'free', 'numparts',
'standard', 'cluster', 'from', 'obid', 'start', 'collection', 'full', 'of', 'starting',
'collid', 'function', 'old', 'statement', 'column', 'general', 'old_table', 'static',
'comment', 'generated', 'on', 'stay', 'commit', 'get', 'open', 'stogroup', 'concat',
'global', 'optimization', 'stores', 'condition', 'go', 'optimize', 'style', 'connect',
'goto', 'option', 'substring', 'connection', 'grant', 'or', 'summary', 'constraint',
'graphic', 'order', 'synonym', 'contains', 'group', 'out', 'sysfun', 'continue',
'handler', 'outer', 'sysibm', 'count', 'hash', 'over', 'sysproc', 'count_big',
'hashed_value', 'overriding', 'system', 'create', 'having', 'package',
'system_user', 'cross', 'hint', 'padded', 'table', 'current', 'hold', 'pagesize',
'tablespace', 'current_date', 'hour', 'parameter', 'then', 'current_lc_ctype',
'hours', 'part', 'time', 'current_path', 'identity', 'partition', 'timestamp',
'current_schema', 'if', 'partitioned', 'to', 'current_server', 'immediate',
'partitioning', 'transaction', 'current_time', 'in', 'partitions', 'trigger',
'current_timestamp', 'including', 'password', 'trim', 'current_timezone',
'inclusive', 'path', 'type', 'current_user', 'increment', 'piecesize', 'undo',
'cursor', 'index', 'plan', 'union', 'cycle', 'indicator', 'position', 'unique', 'data',
'inherit', 'precision', 'until', 'database', 'inner', 'prepare', 'update',
'datapartitionname', 'inout', 'prevval', 'usage', 'datapartitionnum',
'insensitive', 'primary', 'user', 'date', 'insert', 'priqty', 'using', 'day',
'integrity', 'privileges', 'validproc', 'days', 'intersect', 'procedure', 'value',
'db2general', 'into', 'program', 'values', 'db2genrl', 'is', 'psid', 'variable',
'db2sql', 'isobid', 'query', 'variant', 'dbinfo', 'isolation', 'queryno', 'vcat',
'dbpartitionname', 'iterate', 'range', 'version', 'dbpartitionnum', 'jar', 'rank',
'view', 'deallocate', 'java', 'read', 'volatile', 'declare', 'join', 'reads', 'volumes',
'default', 'key', 'recovery', 'when', 'defaults', 'label', 'references', 'whenever',
'definition', 'language', 'referencing', 'where', 'delete', 'lateral', 'refresh',
'while', 'dense_rank', 'lc_ctype', 'release', 'with', 'denserank', 'leave', 'rename',
'without', 'describe', 'left', 'repeat', 'wlm', 'descriptor', 'like', 'reset', 'write',
'deterministic', 'linktype', 'resignal', 'xmlelement', 'diagnostics', 'local',
'restart', 'year', 'disable', 'localdate', 'restrict', 'years', '', 'abs', 'grouping',
'regr_intercept', 'are', 'int', 'regr_r2', 'array', 'integer', 'regr_slope',
'asymmetric', 'intersection', 'regr_sxx', 'atomic', 'interval', 'regr_sxy', 'avg',
'large', 'regr_syy', 'bigint', 'leading', 'rollup', 'blob', 'ln', 'scope', 'boolean',
'lower', 'similar', 'both', 'match', 'smallint', 'ceil', 'max', 'specifictype',
'ceiling', 'member', 'sqlexception', 'char_length', 'merge', 'sqlstate',
'character_length', 'method', 'sqlwarning', 'clob', 'min', 'sqrt', 'coalesce', 'mod',
'stddev_pop', 'collate', 'module', 'stddev_samp', 'collect', 'multiset',
'submultiset', 'convert', 'national', 'sum', 'corr', 'natural', 'symmetric',
'corresponding', 'nchar', 'tablesample', 'covar_pop', 'nclob', 'timezone_hour',
'covar_samp', 'normalize', 'timezone_minute', 'cube', 'nullif', 'trailing',
'cume_dist', 'numeric', 'translate', 'current_default_transform_group',
'octet_length', 'translation', 'current_role', 'only', 'treat',
'current_transform_group_for_type', 'overlaps', 'true', 'dec', 'overlay',
'uescape', 'decimal', 'percent_rank', 'unknown', 'deref', 'percentile_cont',
'unnest', 'element', 'percentile_disc', 'upper', 'exec', 'power', 'var_pop', 'exp',
'real', 'var_samp', 'false', 'recursive', 'varchar', 'filter', 'ref', 'varying',
'float', 'regr_avgx', 'width_bucket', 'floor', 'regr_avgy', 'window', 'fusion',
'regr_count', 'within'])
class _IBM_Boolean(sa_types.Boolean):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
if value == False:
return 0
elif value == True:
return 1
return process
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
if value == False:
return '0'
elif value == True:
return '1'
return process
class _IBM_Date(sa_types.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = datetime.date(value.year, value.month, value.day)
return value
return process
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = datetime.date(value.year, value.month, value.day)
return str(value)
return process
class DOUBLE(sa_types.Numeric):
__visit_name__ = 'DOUBLE'
class LONGVARCHAR(sa_types.VARCHAR):
__visit_name_ = 'LONGVARCHAR'
class DBCLOB(sa_types.CLOB):
__visit_name__ = "DBCLOB"
class GRAPHIC(sa_types.CHAR):
__visit_name__ = "GRAPHIC"
class VARGRAPHIC(sa_types.Unicode):
__visit_name__ = "VARGRAPHIC"
class LONGVARGRAPHIC(sa_types.UnicodeText):
__visit_name__ = "LONGVARGRAPHIC"
class XML(sa_types.Text):
__visit_name__ = "XML"
colspecs = {
sa_types.Boolean: _IBM_Boolean,
sa_types.Date: _IBM_Date,
# really ?
# sa_types.Unicode: DB2VARGRAPHIC
}
ischema_names = {
'BLOB': BLOB,
'CHAR': CHAR,
'CHARACTER': CHAR,
'CLOB': CLOB,
'DATE': DATE,
'DATETIME': DATETIME,
'INTEGER': INTEGER,
'SMALLINT': SMALLINT,
'BIGINT': BIGINT,
'DECIMAL': DECIMAL,
'NUMERIC': NUMERIC,
'REAL': REAL,
'DOUBLE': DOUBLE,
'TIME': TIME,
'TIMESTAMP': TIMESTAMP,
'VARCHAR': VARCHAR,
'LONGVARCHAR': LONGVARCHAR,
'XML': XML,
'GRAPHIC': GRAPHIC,
'VARGRAPHIC': VARGRAPHIC,
'LONGVARGRAPHIC': LONGVARGRAPHIC,
'DBCLOB': DBCLOB
}
class DB2TypeCompiler(compiler.GenericTypeCompiler):
def visit_TIMESTAMP(self, type_):
return "TIMESTAMP"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
return "TIME"
def visit_DATETIME(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_SMALLINT(self, type_):
return "SMALLINT"
def visit_INT(self, type_):
return "INT"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_REAL(self, type_):
return "REAL"
def visit_XML(self, type_):
return "XML"
def visit_CLOB(self, type_):
return "CLOB"
def visit_BLOB(self, type_):
return "BLOB(1M)" if type_.length in (None, 0) else \
"BLOB(%(length)s)" % {'length': type_.length}
def visit_DBCLOB(self, type_):
return "DBCLOB(1M)" if type_.length in (None, 0) else \
"DBCLOB(%(length)s)" % {'length': type_.length}
def visit_VARCHAR(self, type_):
return "VARCHAR(%(length)s)" % {'length': type_.length}
def visit_LONGVARCHAR(self, type_):
return "LONG VARCHAR"
def visit_VARGRAPHIC(self, type_):
return "VARGRAPHIC(%(length)s)" % {'length': type_.length}
def visit_LONGVARGRAPHIC(self, type_):
return "LONG VARGRAPHIC"
def visit_CHAR(self, type_):
return "CHAR" if type_.length in (None, 0) else \
"CHAR(%(length)s)" % {'length': type_.length}
def visit_GRAPHIC(self, type_):
return "GRAPHIC" if type_.length in (None, 0) else \
"GRAPHIC(%(length)s)" % {'length': type_.length}
def visit_DECIMAL(self, type_):
if not type_.precision:
return "DECIMAL(31, 0)"
elif not type_.scale:
return "DECIMAL(%(precision)s, 0)" % {'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % {
'precision': type_.precision, 'scale': type_.scale}
def visit_numeric(self, type_):
return self.visit_DECIMAL(type_)
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_date(self, type_):
return self.visit_DATE(type_)
def visit_time(self, type_):
return self.visit_TIME(type_)
def visit_integer(self, type_):
return self.visit_INT(type_)
def visit_boolean(self, type_):
return self.visit_SMALLINT(type_)
def visit_float(self, type_):
return self.visit_REAL(type_)
def visit_unicode(self, type_):
return self.visit_VARGRAPHIC(type_)
def visit_unicode_text(self, type_):
return self.visit_LONGVARGRAPHIC(type_)
def visit_string(self, type_):
return self.visit_VARCHAR(type_)
def visit_TEXT(self, type_):
return self.visit_VARCHAR(type_)
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
class DB2Compiler(compiler.SQLCompiler):
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def limit_clause(self, select):
if select._limit is not None:
return " FETCH FIRST %s ROWS ONLY" % select._limit
else:
return ""
def default_from(self):
# DB2 uses SYSIBM.SYSDUMMY1 table for row count
return " FROM SYSIBM.SYSDUMMY1"
#def visit_function(self, func, result_map=None, **kwargs):
# TODO: this is wrong but need to know what DB2 is expecting here
# if func.name.upper() == "LENGTH":
# return "LENGTH('%s')" % func.compile().params[func.name + '_1']
# else:
# return compiler.SQLCompiler.visit_function(self, func, **kwargs)
def visit_cast(self, cast, **kw):
type_ = cast.typeclause.type
# TODO: verify that CAST shouldn't be called with
# other types, I was able to CAST against VARCHAR
# for example
if isinstance(type_, (
sa_types.DateTime, sa_types.Date, sa_types.Time,
sa_types.DECIMAL)):
return super(DB2Compiler, self).visit_cast(cast, **kw)
else:
return self.process(cast.clause)
def get_select_precolumns(self, select):
if isinstance(select._distinct, basestring):
return select._distinct.upper() + " "
elif select._distinct:
return "DISTINCT "
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
# NOTE: this is the same method as that used in mysql/base.py
# to render INNER JOIN
return ''.join(
(self.process(join.left, asfrom=True, **kwargs),
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "),
self.process(join.right, asfrom=True, **kwargs),
" ON ",
self.process(join.onclause, **kwargs)))
class DB2DDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kw):
col_spec = [self.preparer.format_column(column)]
col_spec.append(self.dialect.type_compiler.process(column.type))
# column-options: "NOT NULL"
if not column.nullable or column.primary_key:
col_spec.append('NOT NULL')
# default-clause:
default = self.get_column_default_string(column)
if default is not None:
col_spec.append('WITH DEFAULT')
col_spec.append(default)
if column is column.table._autoincrement_column:
col_spec.append('GENERATED BY DEFAULT')
col_spec.append('AS IDENTITY')
col_spec.append('(START WITH 1)')
column_spec = ' '.join(col_spec)
return column_spec
def visit_drop_index(self, drop, **kw):
return "\nDROP INDEX %s" % (
self.preparer.quote(
self._index_identifier(drop.element.name),
drop.element.quote)
)
def visit_drop_constraint(self, drop, **kw):
constraint = drop.element
if isinstance(constraint, sa_schema.ForeignKeyConstraint):
qual = "FOREIGN KEY "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
qual = "PRIMARY KEY "
const = ""
elif isinstance(constraint, sa_schema.UniqueConstraint):
qual = "INDEX "
const = self.preparer.format_constraint(constraint)
else:
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % \
(self.preparer.format_table(constraint.table),
qual, const)
class DB2IdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
illegal_initial_characters = set(xrange(0, 10)).union(["_", "$"])
class DB2ExecutionContext(default.DefaultExecutionContext):
pass
class _SelectLastRowIDMixin(object):
_select_lastrowid = False
_lastrowid = None
def fire_sequence(self, seq, type_):
return self._execute_scalar("SELECT NEXTVAL FOR " +
self.dialect.identifier_preparer.format_sequence(seq) +
" FROM SYSIBM.SYSDUMMY1", type_)
def getlastrowid(self):
return self._lastrowid
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
self._select_lastrowid = insert_has_sequence and \
not self.compiled.returning and \
not self.compiled.inline
def post_exec(self):
conn = self.root_connection
if self._select_lastrowid:
conn._cursor_execute(self.cursor,
"SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM.SYSDUMMY1",
(), self)
row = self.cursor.fetchall()[0]
if row[0] is not None:
self._lastrowid = int(row[0])
class DB2Dialect(default.DefaultDialect):
name = 'db2'
max_identifier_length = 128
encoding = 'utf-8'
default_paramstyle = 'named'
colspecs = colspecs
ischema_names = ischema_names
supports_char_length = False
supports_unicode_statements = False
supports_unicode_binds = False
returns_unicode_strings = False
postfetch_lastrowid = True
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_decimal = True
preexecute_sequences = False
supports_alter = True
supports_sequences = True
sequences_optional = True
statement_compiler = DB2Compiler
ddl_compiler = DB2DDLCompiler
type_compiler = DB2TypeCompiler
preparer = DB2IdentifierPreparer
execution_ctx_cls = DB2ExecutionContext
_reflector_cls = ibm_reflection.DB2Reflector
def __init__(self, **kw):
super(DB2Dialect, self).__init__(**kw)
self._reflector = self._reflector_cls(self)
# reflection: these all defer to an BaseDB2Reflector
# object which selects between DB2 and AS/400 schemas
def normalize_name(self, name):
return self._reflector.normalize_name(name)
def denormalize_name(self, name):
return self._reflector.denormalize_name(name)
def _get_default_schema_name(self, connection):
return self._reflector._get_default_schema_name(connection)
def has_table(self, connection, table_name, schema=None):
return self._reflector.has_table(connection, table_name, schema=schema)
def has_sequence(self, connection, sequence_name, schema=None):
return self._reflector.has_sequence(connection, sequence_name,
schema=schema)
def get_schema_names(self, connection, **kw):
return self._reflector.get_schema_names(connection, **kw)
def get_table_names(self, connection, schema=None, **kw):
return self._reflector.get_table_names(connection, schema=schema, **kw)
def get_view_names(self, connection, schema=None, **kw):
return self._reflector.get_view_names(connection, schema=schema, **kw)
def get_view_definition(self, connection, viewname, schema=None, **kw):
return self._reflector.get_view_definition(
connection, viewname, schema=schema, **kw)
def get_columns(self, connection, table_name, schema=None, **kw):
return self._reflector.get_columns(
connection, table_name, schema=schema, **kw)
def get_primary_keys(self, connection, table_name, schema=None, **kw):
return self._reflector.get_primary_keys(
connection, table_name, schema=schema, **kw)
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
return self._reflector.get_foreign_keys(
connection, table_name, schema=schema, **kw)
def get_indexes(self, connection, table_name, schema=None, **kw):
return self._reflector.get_indexes(
connection, table_name, schema=schema, **kw)
|
|
#! /usr/bin/env python
#
# See README for usage instructions.
from distutils import util
import fnmatch
import glob
import os
import pkg_resources
import re
import subprocess
import sys
import sysconfig
import platform
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
from setuptools import setup, Extension, find_packages
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.build_py import build_py as _build_py
from distutils.command.clean import clean as _clean
from distutils.spawn import find_executable
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def GetVersion():
"""Gets the version from google/protobuf/__init__.py
Do not import google.protobuf.__init__ directly, because an installed
protobuf library may be loaded instead."""
with open(os.path.join('google', 'protobuf', '__init__.py')) as version_file:
exec(version_file.read(), globals())
global __version__
return __version__
def generate_proto(source, require = True):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/any_test.proto", False)
generate_proto("../src/google/protobuf/map_proto2_unittest.proto", False)
generate_proto("../src/google/protobuf/map_unittest.proto", False)
generate_proto("../src/google/protobuf/test_messages_proto3.proto", False)
generate_proto("../src/google/protobuf/test_messages_proto2.proto", False)
generate_proto("../src/google/protobuf/unittest_arena.proto", False)
generate_proto("../src/google/protobuf/unittest.proto", False)
generate_proto("../src/google/protobuf/unittest_custom_options.proto", False)
generate_proto("../src/google/protobuf/unittest_import.proto", False)
generate_proto("../src/google/protobuf/unittest_import_public.proto", False)
generate_proto("../src/google/protobuf/unittest_mset.proto", False)
generate_proto("../src/google/protobuf/unittest_mset_wire_format.proto", False)
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto", False)
generate_proto("../src/google/protobuf/unittest_proto3_arena.proto", False)
generate_proto("../src/google/protobuf/util/json_format.proto", False)
generate_proto("../src/google/protobuf/util/json_format_proto3.proto", False)
generate_proto("google/protobuf/internal/any_test.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test1.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test2.proto", False)
generate_proto("google/protobuf/internal/factory_test1.proto", False)
generate_proto("google/protobuf/internal/factory_test2.proto", False)
generate_proto("google/protobuf/internal/file_options_test.proto", False)
generate_proto("google/protobuf/internal/import_test_package/inner.proto", False)
generate_proto("google/protobuf/internal/import_test_package/outer.proto", False)
generate_proto("google/protobuf/internal/missing_enum_values.proto", False)
generate_proto("google/protobuf/internal/message_set_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto", False)
generate_proto("google/protobuf/internal/more_messages.proto", False)
generate_proto("google/protobuf/internal/no_package.proto", False)
generate_proto("google/protobuf/internal/packed_field_test.proto", False)
generate_proto("google/protobuf/internal/test_bad_identifiers.proto", False)
generate_proto("google/protobuf/internal/test_proto3_optional.proto", False)
generate_proto("google/protobuf/pyext/python.proto", False)
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o"):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
generate_proto("../src/google/protobuf/any.proto")
generate_proto("../src/google/protobuf/api.proto")
generate_proto("../src/google/protobuf/duration.proto")
generate_proto("../src/google/protobuf/empty.proto")
generate_proto("../src/google/protobuf/field_mask.proto")
generate_proto("../src/google/protobuf/source_context.proto")
generate_proto("../src/google/protobuf/struct.proto")
generate_proto("../src/google/protobuf/timestamp.proto")
generate_proto("../src/google/protobuf/type.proto")
generate_proto("../src/google/protobuf/wrappers.proto")
GenerateUnittestProtos()
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
def find_package_modules(self, package, package_dir):
exclude = (
"*test*",
"google/protobuf/internal/*_pb2.py",
"google/protobuf/internal/_parameterized.py",
"google/protobuf/pyext/python_pb2.py",
)
modules = _build_py.find_package_modules(self, package, package_dir)
return [(pkg, mod, fil) for (pkg, mod, fil) in modules
if not any(fnmatch.fnmatchcase(fil, pat=pat) for pat in exclude)]
class build_ext(_build_ext):
def get_ext_filename(self, ext_name):
# since python3.5, python extensions' shared libraries use a suffix that
# corresponds to the value of sysconfig.get_config_var('EXT_SUFFIX') and
# contains info about the architecture the library targets. E.g. on x64
# linux the suffix is ".cpython-XYZ-x86_64-linux-gnu.so" When
# crosscompiling python wheels, we need to be able to override this
# suffix so that the resulting file name matches the target architecture
# and we end up with a well-formed wheel.
filename = _build_ext.get_ext_filename(self, ext_name)
orig_ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
new_ext_suffix = os.getenv("PROTOCOL_BUFFERS_OVERRIDE_EXT_SUFFIX")
if new_ext_suffix and filename.endswith(orig_ext_suffix):
filename = filename[:-len(orig_ext_suffix)] + new_ext_suffix
return filename
class test_conformance(_build_py):
target = 'test_python'
def run(self):
# Python 2.6 dodges these extra failures.
os.environ["CONFORMANCE_PYTHON_EXTRA_FAILURES"] = (
"--failure_list failure_list_python-post26.txt")
cmd = 'cd ../conformance && make %s' % (test_conformance.target)
status = subprocess.check_call(cmd, shell=True)
def get_option_from_sys_argv(option_str):
if option_str in sys.argv:
sys.argv.remove(option_str)
return True
return False
if __name__ == '__main__':
ext_module_list = []
warnings_as_errors = '--warnings_as_errors'
if get_option_from_sys_argv('--cpp_implementation'):
# Link libprotobuf.a and libprotobuf-lite.a statically with the
# extension. Note that those libraries have to be compiled with
# -fPIC for this to work.
compile_static_ext = get_option_from_sys_argv('--compile_static_extension')
libraries = ['protobuf']
extra_objects = None
if compile_static_ext:
libraries = None
extra_objects = ['../src/.libs/libprotobuf.a',
'../src/.libs/libprotobuf-lite.a']
test_conformance.target = 'test_python_cpp'
extra_compile_args = []
message_extra_link_args = None
api_implementation_link_args = None
if "darwin" in sys.platform:
if sys.version_info[0] == 2:
message_init_symbol = 'init_message'
api_implementation_init_symbol = 'init_api_implementation'
else:
message_init_symbol = 'PyInit__message'
api_implementation_init_symbol = 'PyInit__api_implementation'
message_extra_link_args = ['-Wl,-exported_symbol,_%s' % message_init_symbol]
api_implementation_link_args = ['-Wl,-exported_symbol,_%s' % api_implementation_init_symbol]
if sys.platform != 'win32':
extra_compile_args.append('-Wno-write-strings')
extra_compile_args.append('-Wno-invalid-offsetof')
extra_compile_args.append('-Wno-sign-compare')
extra_compile_args.append('-Wno-unused-variable')
extra_compile_args.append('-std=c++11')
if sys.platform == 'darwin':
extra_compile_args.append("-Wno-shorten-64-to-32");
extra_compile_args.append("-Wno-deprecated-register");
# https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes
# C++ projects must now migrate to libc++ and are recommended to set a
# deployment target of macOS 10.9 or later, or iOS 7 or later.
if sys.platform == 'darwin':
mac_target = str(sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET'))
if mac_target and (pkg_resources.parse_version(mac_target) <
pkg_resources.parse_version('10.9.0')):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
r'macosx-[0-9]+\.[0-9]+-(.+)', r'macosx-10.9-\1',
util.get_platform())
# https://github.com/Theano/Theano/issues/4926
if sys.platform == 'win32':
extra_compile_args.append('-D_hypot=hypot')
# https://github.com/tpaviot/pythonocc-core/issues/48
if sys.platform == 'win32' and '64 bit' in sys.version:
extra_compile_args.append('-DMS_WIN64')
# MSVS default is dymanic
if (sys.platform == 'win32'):
extra_compile_args.append('/MT')
if "clang" in os.popen('$CC --version 2> /dev/null').read():
extra_compile_args.append('-Wno-shorten-64-to-32')
if warnings_as_errors in sys.argv:
extra_compile_args.append('-Werror')
sys.argv.remove(warnings_as_errors)
# C++ implementation extension
ext_module_list.extend([
Extension(
"google.protobuf.pyext._message",
glob.glob('google/protobuf/pyext/*.cc'),
include_dirs=[".", "../src"],
libraries=libraries,
extra_objects=extra_objects,
extra_link_args=message_extra_link_args,
library_dirs=['../src/.libs'],
extra_compile_args=extra_compile_args,
),
Extension(
"google.protobuf.internal._api_implementation",
glob.glob('google/protobuf/internal/api_implementation.cc'),
extra_compile_args=extra_compile_args + ['-DPYTHON_PROTO2_CPP_IMPL_V2'],
extra_link_args=api_implementation_link_args,
),
])
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
# Keep this list of dependencies in sync with tox.ini.
install_requires = []
setup(
name='protobuf',
version=GetVersion(),
description='Protocol Buffers',
download_url='https://github.com/protocolbuffers/protobuf/releases',
long_description="Protocol Buffers are Google's data interchange format",
url='https://developers.google.com/protocol-buffers/',
maintainer='protobuf@googlegroups.com',
maintainer_email='protobuf@googlegroups.com',
license='3-Clause BSD License',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
namespace_packages=['google'],
packages=find_packages(
exclude=[
'import_test_package',
'protobuf_distutils',
],),
test_suite='google.protobuf.internal',
cmdclass={
'clean': clean,
'build_py': build_py,
'build_ext': build_ext,
'test_conformance': test_conformance,
},
install_requires=install_requires,
ext_modules=ext_module_list,
python_requires=">=3.5",
)
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 17 2017 at 12:07UTC
@author: Mathias Aschwanden (mathias.aschwanden@gmail.com)
"""
import copy
import time as time_module
import numpy as np
import dill as pickle
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from attrdict import AttrDict
from . import box as bs_box
from . import validation as bs_validation
from . import descriptors as bs_descriptors
from . import entities as bs_entities
from . import errors as bs_errors
from . import process as bs_process
from . import system as bs_system
from . import transport as bs_transport
from . import utils as bs_utils
from . import ur
class Solution:
"""Storage of a simulation's solution.
An instance of Solution stores the outcome and additional
meta-information of the simulation.
Additionaly, the Solution class offers various plotting functions to
visualize the result of the simulation
Args:
system (BoxModelSystem): System that is simulated.
total_integration_time (pint.Quantity [T]): Total length of the simulation.
dt (pint.Quantity [T]): Integration timestep.
Attributes:
total_integration_time (pint.Quantity): Total length of the simulation.
dt (pint.Quantity): Integration timestep.
time (list of pint.Quantity): List of all times at which the system
was solved (at which a result is available).
system (BoxModelSystem): System which is simulated.
time_units (pint.Units): Units of Quantities within the time attribute.
time_magnitude (float): Magnitudes of Quantities within the time
attribute.
ts (AttrDict of AttrDict): For every box, there
exists one AttrDict which contains time series of all its
quantities (Fluid mass, Variable mass...) and the box instance.
"""
total_integration_time = bs_descriptors.PintQuantityDescriptor(
'total_integration_time', ur.second)
dt = bs_descriptors.PintQuantityDescriptor('dt', ur.second)
def __init__(self, system, N_timesteps, dt):
self.system = system
self.N_timesteps = N_timesteps
self.dt = 1 * dt
self.total_integration_time = N_timesteps * dt
self.time_array = np.linspace(0, self.total_integration_time.magnitude,
num=self.N_timesteps)
self.time_units = self.dt.units
self._setup_solution_dataframe()
self.default_figsize = [7,4]
self.default_yaxis_log = False
def _setup_solution_dataframe(self):
"""Setup Dataframe for timeseries of quantities (masses, volumes..)."""
quantities = ['mass', 'volume'] + self.system.variable_names
col_tuples = [(box, quant) for box in self.system.box_names
for quant in quantities]
index = pd.MultiIndex.from_tuples(col_tuples,
names=['Box', 'Quantity'])
self.df = pd.DataFrame(index=index).T
self.df.units = ur.kg
self.df.index.name = 'Timestep'
# Setup Dataframe for timeseries of rates (proecesses, flows..)
col_tuples = []
for box_name, box in self.system.boxes.items():
for variable_name, variable in self.system.variables.items():
col_tuples.append((box_name, variable_name, 'flow'))
col_tuples.append((box_name, variable_name, 'flux'))
col_tuples.append((box_name, variable_name, 'process'))
col_tuples.append((box_name, variable_name, 'reaction'))
# flows = self.system.flows
# fluxes = self.system.fluxes
# print('---------------')
# print('box: {}; variable: {}'.format(box_name, variable_name))
# for flow in bs_transport.Flow.get_all_from(box, flows):
# print(flow)
# print(flow.source_box, flow.target_box)
# col_tuples.append((box_name, variable_name, 'flow',
# flow.name))
# for flow in bs_transport.Flow.get_all_to(box, flows):
# print(flow)
# print(flow.source_box, flow.target_box)
# col_tuples.append((box_name, variable_name, 'flow',
# flow.name))
# for flux in bs_transport.Flux.get_all_from(box, fluxes):
# if flux.variable == variable:
# col_tuples.append((box_name, variable_name, 'flow',
# flux.name))
# for flux in bs_transport.Flux.get_all_to(box, fluxes):
# if flux.variable == variable:
# col_tuples.append((box_name, variable_name, 'flow',
# flux.name))
# for process in box.processes:
# if process.variable == variable:
# col_tuples.append((box_name, variable_name,
# 'process', process.name))
# for reaction in box.reactions:
# if variable in reaction.variables:
# col_tuples.append((box_name, variable_name,
# 'reaction', reaction.name))
index = pd.MultiIndex.from_tuples(col_tuples,
names=['Box', 'Variable', 'Mechanism'])
self.df_rates = pd.DataFrame(index=index).sort_index().T
self.df_rates.units = ur.kg/ur.second
self.df_rates.index.name = 'Starting Timestep'
# VISUALIZATION
def plot_masses(self, entity, boxes=None, figsize=None,
yaxis_log=False, **kwargs):
"""Plot masses of a variable or fluid as a function of time."""
if not boxes:
boxes = self.system.box_list
fig, ax = self._gs(title='Mass of {}'.format(entity.name),
xlabel='xlabel', ylabel='ylabel', **kwargs)
for box in boxes:
if isinstance(entity, bs_entities.Fluid):
masses = self.df[(box.name, 'mass')].tolist()
elif isinstance(entity, bs_entities.Variable):
masses = self.df[(box.name, entity.name)].tolist()
else:
raise bs_errors.must_be_fluid_or_variable_error
ax.plot(self.time_array, masses,
label='Box {}'.format(box.name))
ax.legend()
return fig, ax
def plot_variable_mass(self, variable, boxes=None, figsize=None,
yaxis_log=False, **kwargs):
return self.plot_masses(variable, boxes, figsize, yaxis_log, **kwargs)
def plot_variable_concentration(self, variable, boxes=None,
figsize=None, yaxis_log=False, volumetric=False,
units=None, **kwargs):
"""Plot concentration of a variable as a function of time."""
if not boxes:
boxes = self.system.box_list
fig, ax = self._gs(title='Concentration of {}'.format(variable.name),
xlabel='xlabel', ylabel='ylabel', **kwargs)
for box in boxes:
box_masses = self.df[(box.name, 'mass')].replace(0, np.nan)
var_masses = self.df[(box.name, variable.name)]
concentrations = (var_masses/box_masses).tolist()
ax.plot(self.time_array, concentrations,
label='Box {}'.format(box.name))
ax.legend()
return fig, ax
def plot_all_variable_mass_of_box(self, box, figsize=None,
yaxis_log=None):
if not yaxis_log:
yaxis_log = self.yaxis_log
if not self.time_units:
self.time_units = self.time[0].units
if not self.time_magnitude:
self.time_magnitude = [t.magnitude for t in self.time]
if not figsize:
figsize = self.default_figsize
if yaxis_log:
yaxis_log = 'log'
else:
yaxis_log = None
fig, ax = self._get_subplots(
title='Total Variable Masses',
xlabel=self.time_units,
ylabel='kg',
figsize=figsize,
yaxis_scale=yaxis_log)
var_mass = []
for variable in self.system.variable_list:
var_mass += self.ts[box.name][variable.name]
mass_magnitude = [mass.magnitude for mass in var_mass]
ax.plot(self.time_magnitude, mass_magnitude,
label='Variable {}'.format(variable.name))
ax.legend()
return fig, ax
def plot_total_variable_masses(self, figsize=None, yaxis_log=None):
if not yaxis_log:
yaxis_log = self.yaxis_log
if not self.time_units:
self.time_units = self.time[0].units
if not self.time_magnitude:
self.time_magnitude = [t.magnitude for t in self.time]
if not figsize:
figsize = self.default_figsize
if yaxis_log:
yaxis_log = 'log'
else:
yaxis_log = None
fig, ax = self._get_subplots(
title='Total Variable Mass',
xlabel=self.time_units,
ylabel='kg',
figsize=figsize,
yaxis_scale=yaxis_log)
for variable in self.system.variable_list:
var_masses = np.zeros(len(self.time_magnitude))
i = 0
for box_name, ts in self.ts.items():
vm = bs_utils.get_array_quantity_from_array_of_quantities(
self.ts[box_name][variable.name])
var_masses += vm
i += 1
mass_magnitude = [mass.magnitude for mass in var_masses]
ax.plot(self.time_magnitude, mass_magnitude,
label='Variable {}'.format(variable.name))
ax.legend()
return fig, ax
def _gs(self, title, xlabel, ylabel, figsize=None, yaxis_log=False):
"""Get Subplots. Return subplots: fig, ax"""
if not figsize:
figsize = self.default_figsize
fig, ax = plt.subplots(figsize=figsize)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
if not yaxis_log:
yaxis_log = self.default_yaxis_log
if yaxis_log:
ax.set_yscale('log')
return fig, ax
# PICKLING
def save(self, file_name):
"""Pickle instance and save to file_name."""
with open(file_name, 'wb') as f:
pickle.dump(self, f)
@classmethod
def load(self, file_name):
"""Load pickled instance from file_name."""
with open(file_name, 'rb') as f:
solution = pickle.load(f)
if not isinstance(solution, Solution):
raise ValueError(
'Loaded pickle object is not a Solution instance!')
return solution
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from op_test import OpTest
from paddle.fluid.framework import grad_var_name
def _reference_testing(x, scale, offset, mean, var, epsilon, data_format):
x_shape = x.shape
if len(x_shape) == 2:
if data_format == "NCHW":
x = np.reshape(x, (x.shape[0], x.shape[1], 1, 1))
else:
x = np.reshape(x, (x.shape[0], 1, 1, x.shape[1]))
if data_format == "NCHW":
n, c, h, w = x.shape
mean_tile = np.reshape(mean, (1, c, 1, 1))
mean_tile = np.tile(mean_tile, (n, 1, h, w))
var_tile = np.reshape(var, (1, c, 1, 1))
var_tile = np.tile(var_tile, (n, 1, h, w))
normalized = (x - mean_tile) / np.sqrt(var_tile + epsilon)
scale_tile = np.reshape(scale, (1, c, 1, 1))
scale_tile = np.tile(scale_tile, (n, 1, h, w))
offset_tile = np.reshape(offset, (1, c, 1, 1))
offset_tile = np.reshape(offset_tile, (1, c, 1, 1))
y = normalized * scale_tile + offset_tile
elif data_format == "NHWC":
normalized = (x - mean) / np.sqrt(var + epsilon)
y = normalized * scale + offset
else:
raise ValueError("Unknown data order.")
if len(x_shape) == 2:
y = np.reshape(y, x_shape)
return y
def _cal_mean_variance(x, epsilon, data_format):
assert data_format in ['NCHW', 'NHWC']
x_square = x * x
axis = (0, 2, 3) if data_format == 'NCHW' else (0, 1, 2)
C = x.shape[1] if data_format == 'NCHW' else x.shape[-1]
x_square_sum = np.sum(x_square, axis)
x_sum = np.sum(x, axis=axis)
element_count = np.size(x) / C
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
return mean, var
def _reference_training(x, scale, offset, epsilon, data_format):
x_shape = x.shape
if data_format == "NCHW":
n, c, h, w = x.shape
x_square = x * x
x_square_sum = np.sum(x_square, (0, 2, 3))
x_sum = np.sum(x, axis=(0, 2, 3))
element_count = np.size(x) / int(np.shape(x)[1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
mean_tile = np.reshape(mean, (1, c, 1, 1))
mean_tile = np.tile(mean_tile, (n, 1, h, w))
var_tile = np.reshape(var, (1, c, 1, 1))
var_tile = np.tile(var_tile, (n, 1, h, w))
normalized = (x - mean_tile) / np.sqrt(var_tile + epsilon)
scale_tile = np.reshape(scale, (1, c, 1, 1))
scale_tile = np.tile(scale_tile, (n, 1, h, w))
offset_tile = np.reshape(offset, (1, c, 1, 1))
offset_tile = np.reshape(offset_tile, (1, c, 1, 1))
y = normalized * scale_tile + offset_tile
return y, mean, var
elif data_format == "NHWC":
x_square = x * x
x_square_sum = np.sum(x_square, (0, 1, 2))
x_sum = np.sum(x, axis=(0, 1, 2))
element_count = np.size(x) / int(np.shape(x)[-1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
normalized = (x - mean) / np.sqrt(var + epsilon)
y = normalized * scale + offset
return y, mean, var
else:
raise ValueError("Unknown data order.")
def _reference_grad(x, y_grad, scale, mean, var, epsilon, data_format):
# Use the following formulas to calculate gradients:
# grad_scale =
# sum(grad_y * (x - mean)) * rsqrt(var + epsilon)
#
# grad_offset = sum(output_y)
#
# x_grad =
# 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) -
# (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon))
# transfer from (N, C, H, W) to (N, H, W, C) to simplify computation
if data_format != "NCHW" and data_format != "NHWC":
raise ValueError("Unknown data order.")
if data_format == "NCHW":
x = np.transpose(x, (0, 2, 3, 1))
y_grad = np.transpose(y_grad, (0, 2, 3, 1))
x_grad = scale * (y_grad - np.mean(
y_grad, axis=(0, 1, 2)) - (x - mean) * np.mean(
y_grad * (x - mean), axis=(0, 1, 2)) /
(var + epsilon)) / np.sqrt(var + epsilon)
grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon),
axis=(0, 1, 2))
grad_offset = np.sum(y_grad, axis=(0, 1, 2))
# transfer back to N, C, H, W
if data_format == "NCHW":
x_grad = np.transpose(x_grad, (0, 3, 1, 2))
x = np.transpose(x, (0, 3, 1, 2))
y_grad = np.transpose(y_grad, (0, 3, 1, 2))
return x_grad, grad_scale, grad_offset
def create_or_get_tensor(scope, var_name, var, place):
tensor = scope.var(var_name).get_tensor()
if var is not None:
assert isinstance(var, np.ndarray)
tensor.set_recursive_sequence_lengths([])
tensor.set(var, place)
return tensor
def set_output_grad(scope, outputs, place, feed_dict=None):
def __set_tensor__(name, data=None):
out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.var(grad_var_name(name)).get_tensor()
out_dtype = out_tensor.dtype()
if data is None:
if out_dtype == core.VarDesc.VarType.FP64:
data = np.ones(out_tensor.shape(), dtype=np.float64)
elif out_dtype == core.VarDesc.VarType.FP32:
data = np.ones(out_tensor.shape(), dtype=np.float32)
else:
raise ValueError("Not supported data type " + str(out_dtype))
grad_tensor.set(data, place)
for output in outputs:
data = None
if output in feed_dict:
data = feed_dict[output]
__set_tensor__(output, data)
class TestBatchNormOpInference(unittest.TestCase):
def setUp(self):
self.dtype = np.float32
self.use_mkldnn = False
self.fuse_with_relu = False
self.init_kernel_type()
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
def check_with_place(self, place, data_layout, dtype, shape):
epsilon = 0.00001
if len(shape) == 2:
x_shape = shape
c = x_shape[1]
else:
n, h, w, c = shape[0], shape[1], shape[2], shape[3]
if data_layout == "NHWC":
x_shape = [n, h, w, c]
elif data_layout == "NCHW":
x_shape = [n, c, h, w]
else:
raise ValueError("Unknown data layout.")
scale_shape = [c]
x_val = np.random.random_sample(x_shape).astype(dtype)
# generate some negative values to test case with relu fused
x_val = x_val - 0.5
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
bias_val = np.random.random_sample(scale_shape).astype(np.float32)
mean = np.zeros(scale_shape).astype(np.float32)
variance = np.ones(scale_shape).astype(np.float32)
y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance,
epsilon, data_layout).astype(dtype)
if self.fuse_with_relu:
y_out = np.maximum(y_out, 0)
scope = core.Scope()
# create input
x_tensor = create_or_get_tensor(scope, "x_val",
OpTest.np_dtype_to_fluid_dtype(x_val),
place)
scale_tensor = create_or_get_tensor(
scope, "scale_val",
OpTest.np_dtype_to_fluid_dtype(scale_val), place)
bias_tensor = create_or_get_tensor(
scope, "bias_val", OpTest.np_dtype_to_fluid_dtype(bias_val), place)
mean_tensor = create_or_get_tensor(scope, "mean",
OpTest.np_dtype_to_fluid_dtype(mean),
place)
variance_tensor = create_or_get_tensor(
scope, "variance", OpTest.np_dtype_to_fluid_dtype(variance), place)
# create output
y_tensor = create_or_get_tensor(scope, "y_out", None, place)
saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None,
place)
saved_variance_tensor = create_or_get_tensor(scope, "saved_variance",
None, place)
mean_out_tensor = mean_tensor
variance_out_tensor = variance_tensor
batch_norm_op = Operator(
"batch_norm",
# inputs
X="x_val",
Scale="scale_val",
Bias="bias_val",
Mean="mean",
Variance="variance",
# outputs
Y="y_out",
MeanOut="mean",
VarianceOut="variance",
SavedMean="saved_mean",
SavedVariance="saved_variance",
# attrs
is_test=True,
data_layout=data_layout,
use_mkldnn=self.use_mkldnn,
fuse_with_relu=self.fuse_with_relu,
epsilon=epsilon)
batch_norm_op.run(scope, place)
# check inference result
self.__assert_close(
y_tensor,
y_out,
"inference output are different at " + str(place) + ", " +
data_layout + ", " + str(np.dtype(dtype)) +
str(np.array(y_tensor)) + str(y_out),
atol=1e-3)
def test_check_output(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
places.append(core.CUDAPlace(0))
for place in places:
for data_format in ["NCHW", "NHWC"]:
self.check_with_place(place, data_format, self.dtype,
[2, 3, 4, 5])
self.check_with_place(place, data_format, self.dtype, [2, 3])
def init_kernel_type(self):
pass
class TestFP16BatchNormOpInference(TestBatchNormOpInference):
def setUp(self):
self.dtype = np.float16
self.use_mkldnn = False
self.fuse_with_relu = False
self.init_kernel_type()
def test_check_output(self):
places = []
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
places.append(place)
for place in places:
for data_format in ["NCHW", "NHWC"]:
self.check_with_place(place, data_format, self.dtype,
[2, 3, 4, 5])
self.check_with_place(place, data_format, self.dtype, [2, 3])
class TestBatchNormOpTraining(unittest.TestCase):
def setUp(self):
self.use_mkldnn = False
self.fuse_with_relu = False
self.data_formats = ["NCHW", "NHWC"]
self.momentum = 0.9
self.epsilon = 0.00001
self.init_kernel_type()
self.init_test_case()
def init_test_case(self):
self.use_global_stats = False
self.no_grad_set = set()
self.fetch_list = [
'y', 'mean', 'variance', 'saved_mean', 'saved_variance', 'x@GRAD',
'scale@GRAD', 'bias@GRAD'
]
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
np.allclose(np.array(tensor), np_array, atol=atol)
def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance,
epsilon, momentum, shape, data_layout):
# run forward
y, saved_mean, var_ref = _reference_training(x, scale, bias, epsilon,
data_layout)
mean_out = saved_mean * (1. - momentum) + momentum * mean
variance_out = var_ref * (1. - momentum) + momentum * variance
saved_variance = 1. / np.sqrt(var_ref + epsilon)
# run backward
x_grad, scale_grad, bias_grad = _reference_grad(
x, y_grad, scale, saved_mean, var_ref, epsilon, data_layout)
return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad
def set_mean_variance(self, scale_shape, x, data_layout):
mean = np.zeros(scale_shape).astype(np.float32)
variance = np.ones(scale_shape).astype(np.float32)
# computing global mean/variance for one step
if self.use_global_stats:
mom = self.momentum
x_mean, x_var = _cal_mean_variance(x, self.epsilon, data_layout)
mean = x_mean * (1. - mom) + mom * mean
variance = x_var * (1. - mom) + mom * variance
return mean, variance
def test_forward_backward(self):
def test_with_place(place, data_layout, shape):
# attr
epsilon = self.epsilon
momentum = self.momentum
if data_layout == "NCHW":
n, c, h, w = shape[0], shape[1], shape[2], shape[3]
else:
n, h, w, c = shape[0], shape[1], shape[2], shape[3]
scale_shape = [c]
np.random.seed(123)
x = np.random.random_sample(shape).astype(np.float32)
scale = np.random.random_sample(scale_shape).astype(np.float32)
bias = np.random.random_sample(scale_shape).astype(np.float32)
mean, variance = self.set_mean_variance(scale_shape, x, data_layout)
y_grad = np.random.random_sample(shape).astype(np.float32)
y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward(
x, y_grad, scale, bias, mean, variance, epsilon, momentum,
shape, data_layout)
var_dict = locals()
var_dict['y@GRAD'] = y_grad
var_dict['x@GRAD'] = x_grad
var_dict['scale@GRAD'] = scale_grad
var_dict['bias@GRAD'] = bias_grad
var_names = [
'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean',
'saved_variance'
]
ground_truth = {name: var_dict[name] for name in var_names}
program = fluid.Program()
with fluid.program_guard(program):
block = program.global_block()
for name in ground_truth:
block.create_var(
name=name,
dtype='float32',
shape=ground_truth[name].shape)
bn_op = block.append_op(
type="batch_norm",
inputs={
"X": block.var('x'),
"Scale": block.var('scale'),
"Bias": block.var('bias'),
"Mean": block.var('mean'),
"Variance": block.var('variance')
},
outputs={
"Y": block.var('y'),
"MeanOut": block.var('mean'), # share memory
"VarianceOut": block.var('variance'), # share memory
"SavedMean": block.var('saved_mean'),
"SavedVariance": block.var('saved_variance')
},
attrs={
"momentum": momentum,
"epsilon": epsilon,
"is_test": False,
"data_layout": data_layout,
"use_mkldnn": self.use_mkldnn,
"fuse_with_relu": self.fuse_with_relu,
"use_global_stats": self.use_global_stats
})
block.create_var(name='y@GRAD', dtype='float32', shape=y.shape)
# generate backward op_desc
grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
bn_op.desc, self.no_grad_set, [])
grad_op_desc = grad_op_desc_list[0]
new_op_desc = block.desc.append_op()
new_op_desc.copy_from(grad_op_desc)
for var_name in grad_op_desc.output_arg_names():
block.desc.var(var_name.encode("ascii"))
grad_op_desc.infer_var_type(block.desc)
grad_op_desc.infer_shape(block.desc)
for arg in grad_op_desc.output_arg_names():
grad_var = block.desc.find_var(arg.encode("ascii"))
grad_var.set_dtype(core.VarDesc.VarType.FP32)
exe = fluid.Executor(place)
out = exe.run(
program,
feed={
name: var_dict[name]
for name in
['x', 'scale', 'bias', 'mean', 'variance', 'y@GRAD']
},
fetch_list=self.fetch_list)
for id, name in enumerate(self.fetch_list):
self.__assert_close(var_dict[name], out[id], name)
print("op test forward passed: ", str(place), data_layout)
places = [core.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
places.append(core.CUDAPlace(0))
for place in places:
for data_format in self.data_formats:
test_with_place(place, data_format, [2, 3, 4, 5])
def init_kernel_type(self):
pass
class TestBatchNormOpFreezeStatsTraining(TestBatchNormOpTraining):
def init_test_case(self):
self.use_global_stats = True
self.no_grad_set = set()
self.fetch_list = [
'y', 'mean', 'variance', 'x@GRAD', 'scale@GRAD', 'bias@GRAD'
]
def reference_grad(self, x, y_grad, scale, mean, var, epsilon, data_format):
if data_format == "NCHW":
x = np.transpose(x, (0, 2, 3, 1))
y_grad = np.transpose(y_grad, (0, 2, 3, 1))
x_grad = scale * y_grad / np.sqrt(var + epsilon)
grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon),
axis=(0, 1, 2))
grad_offset = np.sum(y_grad, axis=(0, 1, 2))
# transfer back to N, C, H, W
if data_format == "NCHW":
x_grad = np.transpose(x_grad, (0, 3, 1, 2))
x = np.transpose(x, (0, 3, 1, 2))
y_grad = np.transpose(y_grad, (0, 3, 1, 2))
return x_grad, grad_scale, grad_offset
def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance,
epsilon, momentum, shape, data_layout):
if data_layout != "NCHW" and data_layout != "NHWC":
raise ValueError("Unknown data order.")
if data_layout == "NCHW":
x = np.transpose(x, (0, 2, 3, 1))
# run normalizaton
normalized = (x - mean) / np.sqrt(variance + epsilon)
y = normalized * scale + bias
# transfer back to N, C, H, W
if data_layout == "NCHW":
x = np.transpose(x, (0, 3, 1, 2))
y = np.transpose(y, (0, 3, 1, 2))
mean_out = mean
variance_out = variance
saved_variance = 1. / np.sqrt(variance + epsilon)
# run backward
x_grad, scale_grad, bias_grad = self.reference_grad(
x, y_grad, scale, mean, variance, epsilon, data_layout)
return y, mean_out, variance_out, mean, saved_variance, x_grad, scale_grad, bias_grad
class TestBatchNormOpFreezeStatsAndScaleBiasTraining(
TestBatchNormOpFreezeStatsTraining):
def init_test_case(self):
self.use_global_stats = True
self.no_grad_set = set(['scale@GRAD', 'bias@GRAD'])
self.fetch_list = ['y', 'mean', 'variance', 'x@GRAD']
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# Copyright 2008 Google Inc. All Rights Reserved.
"""Tests for ActiveResource objects."""
__author__ = 'Mark Roach (mrroach@google.com)'
import unittest
import pickle
import urllib
from pyactiveresource import activeresource
from pyactiveresource import connection
from pyactiveresource import util
from pyactiveresource.tests import http_fake
class Error(Exception):
pass
class ActiveResourceTest(unittest.TestCase):
"""Tests for activeresource.ActiveResource."""
def setUp(self):
"""Create test objects."""
self.arnold = {'id': 1, 'name': 'Arnold Ziffel'}
self.eb = {'id': 2, 'name': 'Eb Dawson'}
self.sam = {'id': 3, 'name': 'Sam Drucker'}
self.soup = {'id': 1, 'name': 'Hot Water Soup'}
self.store_new = {'name': 'General Store'}
self.general_store = {'id': 1, 'name': 'General Store'}
self.store_update = {'manager_id': 3, 'id': 1, 'name': 'General Store'}
self.xml_headers = {'Content-type': 'application/xml'}
self.matz = util.to_xml(
{'id': 1, 'name': 'Matz'}, root='person')
self.matz_deep = util.to_xml(
{'id': 1, 'name': 'Matz', 'other': 'other'},
root='person')
self.matz_array = util.to_xml(
[{'id': 1, 'name': 'Matz'}], root='people')
self.ryan = util.to_xml(
{'name': 'Ryan'}, root='person')
self.addy = util.to_xml(
{'id': 1, 'street': '12345 Street'},
root='address')
self.addy_deep = util.to_xml(
{'id': 1, 'street': '12345 Street', 'zip': "27519"},
root='address')
http_fake.initialize() # Fake all http requests
self.http = http_fake.TestHandler
self.http.set_response(Error('Bad request'))
self.http.site = 'http://localhost'
self.zero_length_content_headers = {'Content-length': '0',
'Content-type': 'application/xml'}
class Person(activeresource.ActiveResource):
_site = 'http://localhost'
self.person = Person
class Store(activeresource.ActiveResource):
_site = 'http://localhost'
self.store = Store
class Address(activeresource.ActiveResource):
_site = 'http://localhost/people/$person_id/'
self.address = Address
def test_find_one(self):
# Return an object for a specific one-off url
self.http.respond_to(
'GET', '/what_kind_of_soup.xml', {},
util.to_xml(self.soup, root='soup'))
class Soup(activeresource.ActiveResource):
_site = 'http://localhost'
soup = Soup.find_one(from_='/what_kind_of_soup.xml')
self.assertEqual(self.soup, soup.attributes)
def test_find(self):
# Return a list of people for a find method call
self.http.respond_to(
'GET', '/people.xml', {},
util.to_xml([self.arnold, self.eb], root='people'))
people = self.person.find()
self.assertEqual([self.arnold, self.eb],
[p.attributes for p in people])
def test_find_parses_non_array_collection(self):
collection_xml = '''<people>
<person><name>bob</name><id>1</id></person>
<person><name>jim</name><id>2</id></person>
</people>'''
self.http.respond_to('GET', '/people.xml', {}, collection_xml)
results = self.person.find()
self.assertEqual(2, len(results))
def test_find_parses_single_item_non_array_collection(self):
collection_xml = '''<people>
<person><name>jim</name><id>2</id></person>
</people>'''
self.http.respond_to('GET', '/people.xml', {}, collection_xml)
results = self.person.find()
self.assertEqual(1, len(results))
def test_find_by_id(self):
# Return a single person for a find(id=<id>) call
self.http.respond_to(
'GET', '/people/1.xml', {}, util.to_xml(self.arnold, root='person'))
arnold = self.person.find(1)
self.assertEqual(self.arnold, arnold.attributes)
def test_reload(self):
self.http.respond_to(
'GET', '/people/1.xml', {}, util.to_xml(self.arnold, root='person'))
arnold = self.person.find(1)
arnold.name = 'someone else'
arnold.reload()
self.assertEqual(self.arnold, arnold.attributes)
def test_find_with_query_options(self):
# Return a single-item people list for a find() call with kwargs
self.http.respond_to(
'GET', '/people.xml?name=Arnold', {},
util.to_xml([self.arnold], root='people'))
# Query options only
arnold = self.person.find(name='Arnold')[0]
self.assertEqual(self.arnold, arnold.attributes)
def test_find_should_handle_unicode_query_args(self):
self.http.respond_to(
'GET', '/people.xml?name=%C3%83%C3%A9', {},
util.to_xml([self.arnold], root='people'))
arnold = self.person.find_first(name=u'\xc3\xe9')
self.assertEqual(self.arnold, arnold.attributes)
def test_find_should_handle_integer_query_args(self):
self.http.respond_to(
'GET', '/people.xml?employee_id=12345', {},
util.to_xml([self.arnold], root='people'))
arnold = self.person.find_first(employee_id=12345)
self.assertEqual(self.arnold, arnold.attributes)
def test_find_should_handle_long_query_args(self):
self.http.respond_to(
'GET', '/people.xml?employee_id=12345', {},
util.to_xml([self.arnold], root='people'))
arnold = self.person.find_first(employee_id=12345L)
self.assertEqual(self.arnold, arnold.attributes)
def test_find_should_handle_array_query_args(self):
query = urllib.urlencode({'vars[]': ['a', 'b', 'c']}, True)
self.http.respond_to(
'GET', '/people.xml?%s' % query, {},
util.to_xml([self.arnold], root='people'))
arnold = self.person.find_first(vars=['a', 'b', 'c'])
self.assertEqual(self.arnold, arnold.attributes)
def test_find_should_handle_dictionary_query_args(self):
query = urllib.urlencode({'vars[key]': 'val'}, True)
self.http.respond_to(
'GET', '/people.xml?%s' % query, {},
util.to_xml([self.arnold], root='people'))
arnold = self.person.find_first(vars={'key': 'val'})
self.assertEqual(self.arnold, arnold.attributes)
def test_find_should_handle_dictionary_query_args_with_array_value(self):
query = urllib.urlencode({'vars[key][]': ['val1', 'val2']}, True)
self.http.respond_to(
'GET', '/people.xml?%s' % query, {},
util.to_xml([self.arnold], root='people'))
arnold = self.person.find_first(vars={'key': ['val1', 'val2']})
self.assertEqual(self.arnold, arnold.attributes)
def test_find_with_prefix_options(self):
# Paths for prefix_options related requests
self.http.respond_to(
'GET', '/stores/1/people.xml', {},
util.to_xml([self.sam], root='people'))
# Prefix options only
self.person._site = 'http://localhost/stores/$store_id/'
sam = self.person.find(store_id=1)[0]
self.assertEqual(self.sam, sam.attributes)
def test_find_with_prefix_and_query_options(self):
self.http.respond_to(
'GET', '/stores/1/people.xml?name=Ralph', {},
util.to_xml([], root='people'))
# Query & prefix options
self.person._site = 'http://localhost/stores/$store_id/'
nobody = self.person.find(store_id=1, name='Ralph')
self.assertEqual([], nobody)
def test_set_prefix_source(self):
self.http.respond_to(
'GET', '/stores/1/people.xml?name=Ralph', {},
util.to_xml([], root='people'))
self.person.prefix_source = '/stores/${store_id}/'
nobody = self.person.find(store_id=1, name='Ralph')
self.assertEqual([], nobody)
def test_save(self):
# Return an object with id for a post(save) request.
self.http.respond_to(
'POST', '/stores.xml', self.xml_headers,
util.to_xml(self.general_store))
# Return an object for a put request.
self.http.respond_to(
'PUT', '/stores/1.xml', self.xml_headers,
util.to_xml(self.store_update, root='store'))
store = self.store(self.store_new)
store.save()
self.assertEqual(self.general_store, store.attributes)
store.manager_id = 3
store.save()
def test_class_get(self):
self.http.respond_to('GET', '/people/retrieve.xml?name=Matz',
{}, self.matz_array)
self.assertEqual([{'id': 1, 'name': 'Matz'}],
self.person.get('retrieve', name='Matz'))
def test_class_post(self):
self.http.respond_to('POST', '/people/hire.xml?name=Matz',
self.zero_length_content_headers, '')
self.assertEqual(connection.Response(200, ''),
self.person.post('hire', name='Matz'))
def test_class_put(self):
self.http.respond_to('PUT', '/people/promote.xml?name=Matz',
self.xml_headers, '')
self.assertEqual(connection.Response(200, ''),
self.person.put('promote', 'atestbody', name='Matz'))
def test_class_put_nested(self):
self.http.respond_to('PUT', '/people/1/addresses/sort.xml?by=name',
self.zero_length_content_headers, '')
self.assertEqual(connection.Response(200, ''),
self.address.put('sort', person_id=1, by='name'))
def test_class_delete(self):
self.http.respond_to('DELETE', '/people/deactivate.xml?name=Matz',
{}, '')
self.assertEqual(connection.Response(200, ''),
self.person.delete('deactivate', name='Matz'))
def test_instance_get(self):
self.http.respond_to('GET', '/people/1.xml', {}, self.matz)
self.http.respond_to('GET', '/people/1/shallow.xml', {}, self.matz)
self.assertEqual({'id': 1, 'name': 'Matz'},
self.person.find(1).get('shallow'))
self.http.respond_to('GET', '/people/1/deep.xml', {}, self.matz_deep)
self.assertEqual({'id': 1, 'name': 'Matz', 'other': 'other'},
self.person.find(1).get('deep'))
def test_instance_post_new(self):
ryan = self.person({'name': 'Ryan'})
self.http.respond_to('POST', '/people/new/register.xml',
self.xml_headers, '')
self.assertEqual(
connection.Response(200, ''), ryan.post('register'))
def test_instance_post(self):
self.http.respond_to('POST', '/people/1/register.xml',
self.zero_length_content_headers, self.matz)
matz = self.person({'id': 1, 'name': 'Matz'})
self.assertEqual(connection.Response(200, self.matz),
matz.post('register'))
def test_instance_put(self):
self.http.respond_to('GET', '/people/1.xml', {}, self.matz)
self.http.respond_to(
'PUT', '/people/1/promote.xml?position=Manager',
self.xml_headers, '')
self.assertEqual(
connection.Response(200, ''),
self.person.find(1).put('promote', 'body', position='Manager'))
def test_instance_put_nested(self):
self.http.respond_to(
'GET', '/people/1/addresses/1.xml', {}, self.addy)
self.http.respond_to(
'PUT', '/people/1/addresses/1/normalize_phone.xml?locale=US',
self.zero_length_content_headers, '', 204)
self.assertEqual(
connection.Response(204, ''),
self.address.find(1, person_id=1).put('normalize_phone',
locale='US'))
def test_instance_get_nested(self):
self.http.respond_to(
'GET', '/people/1/addresses/1.xml', {}, self.addy)
self.http.respond_to(
'GET', '/people/1/addresses/1/deep.xml', {}, self.addy_deep)
self.assertEqual({'id': 1, 'street': '12345 Street', 'zip': "27519"},
self.address.find(1, person_id=1).get('deep'))
def test_instance_delete(self):
self.http.respond_to('GET', '/people/1.xml', {}, self.matz)
self.http.respond_to('DELETE', '/people/1/deactivate.xml', {}, '')
self.assertEqual('', self.person.find(1).delete('deactivate').body)
def test_save_should_get_id_from_location(self):
self.http.respond_to(
'POST', '/people.xml', self.xml_headers,
'', 200, {'Location': '/people/7.xml'})
person = self.person.create({})
self.assertEqual(7, person.id)
def test_save_should_get_id_from_lowercase_location(self):
# There seems to be some inconsistency in how headers are reformatted
# This will ensure that we catch the two sensible cases (init caps and
# all lowercase)
self.http.respond_to(
'POST', '/people.xml', self.xml_headers,
'', 200, {'location': '/people/7.xml'})
person = self.person.create({})
self.assertEqual(7, person.id)
def test_should_accept_setting_user(self):
self.person.user = 'david'
self.assertEqual('david', self.person.user)
self.assertEqual('david', self.person.connection.user)
def test_should_accept_setting_password(self):
self.person.password = 'test123'
self.assertEqual('test123', self.person.password)
self.assertEqual('test123', self.person.connection.password)
def test_should_accept_setting_timeout(self):
self.person.timeout = 77
self.assertEqual(77, self.person.timeout)
self.assertEqual(77, self.person.connection.timeout)
def test_user_variable_can_be_reset(self):
class Actor(activeresource.ActiveResource):
pass
Actor.site = 'http://cinema'
self.assert_(Actor.user is None)
Actor.user = 'username'
Actor.user = None
self.assert_(Actor.user is None)
self.assertFalse(Actor.connection.user)
def test_password_variable_can_be_reset(self):
class Actor(activeresource.ActiveResource):
pass
Actor.site = 'http://cinema'
self.assert_(Actor.password is None)
Actor.password = 'password'
Actor.password = None
self.assert_(Actor.password is None)
self.assertFalse(Actor.connection.password)
def test_format_variable_can_by_reset(self):
class Actor(activeresource.ActiveResource):
pass
Actor.site = 'http://cinema'
Actor.format = None
self.assert_(Actor.connection.format is None)
Actor.format = object()
self.assertEqual(Actor.format, Actor.connection.format)
def test_timeout_variable_can_be_reset(self):
class Actor(activeresource.ActiveResource):
pass
Actor.site = 'http://cinema'
self.assert_(Actor.timeout is None)
Actor.timeout = 5
Actor.timeout = None
self.assert_(Actor.timeout is None)
self.assert_(Actor.connection.timeout is None)
def test_credentials_from_site_are_decoded(self):
class Actor(activeresource.ActiveResource):
pass
Actor.site = 'http://my%40email.com:%31%32%33@cinema'
self.assertEqual('my@email.com', Actor.user)
self.assertEqual('123', Actor.password)
def test_site_attribute_declaration_is_parsed(self):
class Actor(activeresource.ActiveResource):
_site = 'http://david:test123@localhost.localsite:4000/api'
self.assertEqual(['david', 'test123'], [Actor.user, Actor.password])
def test_changing_subclass_site_does_not_affect_superclass(self):
class Actor(self.person):
pass
Actor.site = 'http://actor-site'
self.assertNotEqual(Actor.site, self.person.site)
def test_changing_superclass_site_affects_unset_subclass_site(self):
class Actor(self.person):
pass
self.person.site = 'http://person-site'
self.assertEqual(Actor.site, self.person.site)
def test_changing_superclass_site_does_not_affect_set_subclass_set(self):
class Actor(self.person):
pass
Actor.site = 'http://actor-site'
self.person.site = 'http://person-site'
self.assertNotEqual(Actor.site, self.person.site)
def test_updating_superclass_site_resets_descendent_connection(self):
class Actor(self.person):
pass
self.assert_(self.person.connection is Actor.connection)
self.person.site = 'http://another-site'
self.assert_(self.person.connection is Actor.connection)
def test_updating_superclass_user_resets_descendent_connection(self):
class Actor(self.person):
pass
self.assert_(self.person.connection is Actor.connection)
self.person.user = 'username'
self.assert_(self.person.connection is Actor.connection)
def test_updating_superclass_password_resets_descendent_connection(self):
class Actor(self.person):
pass
self.assert_(self.person.connection is Actor.connection)
self.person.password = 'password'
self.assert_(self.person.connection is Actor.connection)
def test_updating_superclass_timeout_resets_descendent_connection(self):
class Actor(self.person):
pass
self.assert_(self.person.connection is Actor.connection)
self.person.timeout = 10
self.assert_(self.person.connection is Actor.connection)
def test_repeated_attribute_modification_updates_attributes_dict(self):
res = activeresource.ActiveResource()
res.name = 'first'
res.name = 'second'
res.name = 'third'
self.assertEqual('third', res.attributes['name'])
def test_resources_should_be_picklable_and_unpicklable(self):
res = activeresource.ActiveResource({'name': 'resource', 'id': 5})
pickle_string = pickle.dumps(res)
unpickled = pickle.loads(pickle_string)
self.assertEqual(res, unpickled)
def test_to_dict_should_handle_attributes_containing_lists_of_dicts(self):
children = [{'name': 'child1'}, {'name': 'child2'}]
res = activeresource.ActiveResource()
res.children = children
self.assertEqual(children, res.to_dict()['children'])
def test_to_xml_should_handle_attributes_containing_lists_of_dicts(self):
children = [{'name': 'child1'}, {'name': 'child2'}]
res = activeresource.ActiveResource()
res.children = children
xml = res.to_xml()
parsed = util.xml_to_dict(xml, saveroot=False)
self.assertEqual(children, parsed['children'])
def test_to_xml_should_handle_dasherize_option(self):
res = activeresource.ActiveResource({'attr_name': 'value'})
xml = res.to_xml(dasherize=False)
self.assert_('<attr_name>value</attr_name>' in xml)
if __name__ == '__main__':
unittest.main()
|
|
""" Models for representing top-level plot objects.
"""
from __future__ import absolute_import
from six import string_types
import warnings
from ..core.query import find
from ..core import validation
from ..core.validation.warnings import (MISSING_RENDERERS, NO_DATA_RENDERERS,
EMPTY_LAYOUT, MALFORMED_CATEGORY_LABEL)
from ..core.enums import Location
from ..core.property_mixins import LineProps, TextProps, FillProps
from ..model import Model
from ..core.properties import (Bool, Int, String, Enum, Auto, Instance, Either,
List, Dict, Include, Override)
from ..util.string import nice_join
from ..core.validation.errors import REQUIRED_RANGE
from .annotations import Annotation, Legend
from .axes import Axis
from .glyphs import Glyph
from .grids import Grid
from .ranges import Range, FactorRange
from .renderers import Renderer, GlyphRenderer, DataRenderer, TileRenderer, DynamicImageRenderer
from .sources import DataSource, ColumnDataSource
from .tools import Tool, ToolEvents
from .layouts import LayoutDOM
class _list_attr_splat(list):
def __setattr__(self, attr, value):
for x in self:
setattr(x, attr, value)
def __dir__(self):
if len(set(type(x) for x in self)) == 1:
return dir(self[0])
else:
return dir(self)
def _select_helper(args, kwargs):
"""
Allow flexible selector syntax.
Returns:
a dict
"""
if len(args) > 1:
raise TypeError("select accepts at most ONE positional argument.")
if len(args) > 0 and len(kwargs) > 0:
raise TypeError("select accepts EITHER a positional argument, OR keyword arguments (not both).")
if len(args) == 0 and len(kwargs) == 0:
raise TypeError("select requires EITHER a positional argument, OR keyword arguments.")
if args:
arg = args[0]
if isinstance(arg, dict):
selector = arg
elif isinstance(arg, string_types):
selector = dict(name=arg)
elif issubclass(arg, Model):
selector = {"type" : arg}
else:
raise RuntimeError("Selector must be a dictionary, string or plot object.")
else:
selector = kwargs
return selector
class Plot(LayoutDOM):
""" Model representing a plot, containing glyphs, guides, annotations.
"""
def __init__(self, **kwargs):
if "tool_events" not in kwargs:
kwargs["tool_events"] = ToolEvents()
if "border_fill" in kwargs and "border_fill_color" in kwargs:
raise ValueError("Conflicting properties set on plot: border_fill, border_fill_color.")
if "background_fill" in kwargs and "background_fill_color" in kwargs:
raise ValueError("Conflicting properties set on plot: background_fill, background_fill_color.")
super(Plot, self).__init__(**kwargs)
def select(self, *args, **kwargs):
''' Query this object and all of its references for objects that
match the given selector.
There are a few different ways to call the ``select`` method.
The most general is to supply a JSON-like query dictionary as the
single argument or as keyword arguments:
Args:
selector (JSON-like) : some sample text
Keyword Arguments:
kwargs : query dict key/values as keyword arguments
For convenience, queries on just names can be made by supplying
the ``name`` string as the single parameter:
Args:
name (str) : the name to query on
Also queries on just type can be made simply by supplying the
``Model`` subclass as the single parameter:
Args:
type (Model) : the type to query on
Returns:
seq[Model]
Examples:
.. code-block:: python
# These two are equivalent
p.select({"type": HoverTool})
p.select(HoverTool)
# These two are also equivalent
p.select({"name": "mycircle"})
p.select("mycircle")
# Keyword arguments can be supplied in place of selector dict
p.select({"name": "foo", "type": HoverTool})
p.select(name="foo", type=HoverTool)
'''
selector = _select_helper(args, kwargs)
# Want to pass selector that is a dictionary
return _list_attr_splat(find(self.references(), selector, {'plot': self}))
def row(self, row, gridplot):
''' Return whether this plot is in a given row of a GridPlot.
Args:
row (int) : index of the row to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.row(row)
def column(self, col, gridplot):
''' Return whether this plot is in a given column of a GridPlot.
Args:
col (int) : index of the column to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.column(col)
def _axis(self, *sides):
objs = []
for s in sides:
objs.extend(getattr(self, s, []))
axis = [obj for obj in objs if isinstance(obj, Axis)]
return _list_attr_splat(axis)
@property
def xaxis(self):
""" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension.
"""
return self._axis("above", "below")
@property
def yaxis(self):
""" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension.
"""
return self._axis("left", "right")
@property
def axis(self):
""" Splattable list of :class:`~bokeh.models.axes.Axis` objects.
"""
return _list_attr_splat(self.xaxis + self.yaxis)
@property
def legend(self):
"""Splattable list of :class:`~bokeh.models.annotations.Legend` objects.
"""
legends = [obj for obj in self.renderers if isinstance(obj, Legend)]
return _list_attr_splat(legends)
def _grid(self, dimension):
grid = [obj for obj in self.renderers if isinstance(obj, Grid) and obj.dimension==dimension]
return _list_attr_splat(grid)
@property
def xgrid(self):
""" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension.
"""
return self._grid(0)
@property
def ygrid(self):
""" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension.
"""
return self._grid(1)
@property
def grid(self):
""" Splattable list of :class:`~bokeh.models.grids.Grid` objects.
"""
return _list_attr_splat(self.xgrid + self.ygrid)
def add_layout(self, obj, place='center'):
''' Adds an object to the plot in a specified place.
Args:
obj (Renderer) : the object to add to the Plot
place (str, optional) : where to add the object (default: 'center')
Valid places are: 'left', 'right', 'above', 'below', 'center'.
Returns:
None
'''
valid_places = ['left', 'right', 'above', 'below', 'center']
if place not in valid_places:
raise ValueError(
"Invalid place '%s' specified. Valid place values are: %s" % (place, nice_join(valid_places))
)
if hasattr(obj, 'plot'):
if obj.plot is not None:
raise ValueError("object to be added already has 'plot' attribute set")
obj.plot = self
self.renderers.append(obj)
if place is not 'center':
getattr(self, place).append(obj)
def add_tools(self, *tools):
''' Adds an tools to the plot.
Args:
*tools (Tool) : the tools to add to the Plot
Returns:
None
'''
if not all(isinstance(tool, Tool) for tool in tools):
raise ValueError("All arguments to add_tool must be Tool subclasses.")
for tool in tools:
if tool.plot is not None:
raise ValueError("tool %s to be added already has 'plot' attribute set" % tool)
tool.plot = self
if hasattr(tool, 'overlay'):
self.renderers.append(tool.overlay)
self.tools.append(tool)
def add_glyph(self, source_or_glyph, glyph=None, **kw):
''' Adds a glyph to the plot with associated data sources and ranges.
This function will take care of creating and configuring a Glyph object,
and then add it to the plot's list of renderers.
Args:
source (DataSource) : a data source for the glyphs to all use
glyph (Glyph) : the glyph to add to the Plot
Keyword Arguments:
Any additional keyword arguments are passed on as-is to the
Glyph initializer.
Returns:
Glyph
'''
if glyph is not None:
source = source_or_glyph
else:
source, glyph = ColumnDataSource(), source_or_glyph
if not isinstance(source, DataSource):
raise ValueError("'source' argument to add_glyph() must be DataSource subclass")
if not isinstance(glyph, Glyph):
raise ValueError("'glyph' argument to add_glyph() must be Glyph subclass")
g = GlyphRenderer(data_source=source, glyph=glyph, **kw)
self.renderers.append(g)
return g
def add_tile(self, tile_source, **kw):
'''Adds new TileRenderer into the Plot.renderers
Args:
tile_source (TileSource) : a tile source instance which contain tileset configuration
Keyword Arguments:
Additional keyword arguments are passed on as-is to the tile renderer
Returns:
TileRenderer : TileRenderer
'''
tile_renderer = TileRenderer(tile_source=tile_source, **kw)
self.renderers.append(tile_renderer)
return tile_renderer
def add_dynamic_image(self, image_source, **kw):
'''Adds new DynamicImageRenderer into the Plot.renderers
Args:
image_source (ImageSource) : a image source instance which contain image configuration
Keyword Arguments:
Additional keyword arguments are passed on as-is to the dynamic image renderer
Returns:
DynamicImageRenderer : DynamicImageRenderer
'''
image_renderer = DynamicImageRenderer(image_source=image_source, **kw)
self.renderers.append(image_renderer)
return image_renderer
@validation.error(REQUIRED_RANGE)
def _check_required_range(self):
missing = []
if not self.x_range: missing.append('x_range')
if not self.y_range: missing.append('y_range')
if missing:
return ", ".join(missing) + " [%s]" % self
@validation.warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
if len(self.renderers) == 0:
return str(self)
@validation.warning(NO_DATA_RENDERERS)
def _check_no_data_renderers(self):
if len(self.select(DataRenderer)) == 0:
return str(self)
@validation.warning(MALFORMED_CATEGORY_LABEL)
def _check_colon_in_category_label(self):
if not self.x_range: return
if not self.y_range: return
broken = []
for range_name in ['x_range', 'y_range']:
category_range = getattr(self, range_name)
if not isinstance(category_range, FactorRange): continue
for value in category_range.factors:
if not isinstance(value, string_types): break
if ':' in value:
broken.append((range_name, value))
break
if broken:
field_msg = ' '.join('[range:%s] [first_value: %s]' % (field, value)
for field, value in broken)
return '%s [renderer: %s]' % (field_msg, self)
__deprecated_attributes__ = ('background_fill', 'border_fill')
x_range = Instance(Range, help="""
The (default) data range of the horizontal dimension of the plot.
""")
y_range = Instance(Range, help="""
The (default) data range of the vertical dimension of the plot.
""")
x_mapper_type = Either(Auto, String, help="""
What kind of mapper to use to convert x-coordinates in data space
into x-coordinates in screen space.
Typically this can be determined automatically, but this property
can be useful to, e.g., show datetime values as floating point
"seconds since epoch" instead of formatted dates.
""")
y_mapper_type = Either(Auto, String, help="""
What kind of mapper to use to convert y-coordinates in data space
into y-coordinates in screen space.
Typically this can be determined automatically, but this property
can be useful to, e.g., show datetime values as floating point
"seconds since epoch" instead of formatted dates
""")
extra_x_ranges = Dict(String, Instance(Range), help="""
Additional named ranges to make available for mapping x-coordinates.
This is useful for adding additional axes.
""")
extra_y_ranges = Dict(String, Instance(Range), help="""
Additional named ranges to make available for mapping y-coordinates.
This is useful for adding additional axes.
""")
hidpi = Bool(default=True, help="""
Whether to use HiDPI mode when available.
""")
title_standoff = Int(default=8, help="""
How far (in screen units) to place a title away from the central
plot region.
""")
title = String('', help="""
A title for the plot.
""")
title_props = Include(TextProps, help="""
The %s for the plot title.
""")
title_text_align = Override(default='center')
title_text_baseline = Override(default='alphabetic')
title_text_font_size = Override(default={ 'value' : '20pt' })
outline_props = Include(LineProps, help="""
The %s for the plot border outline.
""")
outline_line_color = Override(default="#aaaaaa")
renderers = List(Instance(Renderer), help="""
A list of all renderers for this plot, including guides and annotations
in addition to glyphs and markers.
This property can be manipulated by hand, but the ``add_glyph`` and
``add_layout`` methods are recommended to help make sure all necessary
setup is performed.
""")
tools = List(Instance(Tool), help="""
A list of tools to add to the plot.
""")
tool_events = Instance(ToolEvents, help="""
A ToolEvents object to share and report tool events.
""")
left = List(Instance(Renderer), help="""
A list of renderers to occupy the area to the left of the plot.
""")
right = List(Instance(Renderer), help="""
A list of renderers to occupy the area to the right of the plot.
""")
above = List(Instance(Renderer), help="""
A list of renderers to occupy the area above of the plot.
""")
below = List(Instance(Renderer), help="""
A list of renderers to occupy the area below of the plot.
""")
toolbar_location = Enum(Location, help="""
Where the toolbar will be located. If set to None, no toolbar
will be attached to the plot.
""")
logo = Enum("normal", "grey", help="""
What version of the Bokeh logo to display on the toolbar. If
set to None, no logo will be displayed.
""")
plot_height = Int(600, help="""
Total height of the entire plot (including any axes, titles,
border padding, etc.)
.. note::
This corresponds directly to the height of the HTML
canvas that will be used.
""")
plot_width = Int(600, help="""
Total width of the entire plot (including any axes, titles,
border padding, etc.)
.. note::
This corresponds directly to the width of the HTML
canvas that will be used.
""")
@property
def background_fill(self):
warnings.warn(
"""
Plot property 'background_fill' was deprecated in Bokeh
0.11.0 and will be removed. Use 'background_fill_color' instead.
""")
return self.background_fill_color
@background_fill.setter
def background_fill(self, color):
warnings.warn(
"""
Plot property 'background_fill' was deprecated in Bokeh
0.11.0 and will be removed. Use 'background_fill_color' instead.
""")
self.background_fill_color = color
@property
def border_fill(self):
warnings.warn(
"""
Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and
will be removed. Use 'border_fill_color' instead.
""")
return self.border_fill_color
@border_fill.setter
def border_fill(self, color):
warnings.warn(
"""
Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and
will be removed. Use 'border_fill_color' instead.
""")
self.border_fill_color = color
background_props = Include(FillProps, help="""
The %s for the plot background style.
""")
background_fill_color = Override(default='#ffffff')
border_props = Include(FillProps, help="""
The %s for the plot border style.
""")
border_fill_color = Override(default='#ffffff')
min_border_top = Int(50, help="""
Minimum size in pixels of the padding region above the top of the
central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border_bottom = Int(50, help="""
Minimum size in pixels of the padding region below the bottom of
the central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border_left = Int(50, help="""
Minimum size in pixels of the padding region to the left of
the central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border_right = Int(50, help="""
Minimum size in pixels of the padding region to the right of
the central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border = Int(50, help="""
A convenience property to set all all the ``min_border_X`` properties
to the same value. If an individual border property is explicitly set,
it will override ``min_border``.
""")
h_symmetry = Bool(True, help="""
Whether the total horizontal padding on both sides of the plot will
be made equal (the left or right padding amount, whichever is larger).
""")
v_symmetry = Bool(False, help="""
Whether the total vertical padding on both sides of the plot will
be made equal (the top or bottom padding amount, whichever is larger).
""")
lod_factor = Int(10, help="""
Decimation factor to use when applying level-of-detail decimation.
""")
lod_threshold = Int(2000, help="""
A number of data points, above which level-of-detail downsampling may
be performed by glyph renderers. Set to ``None`` to disable any
level-of-detail downsampling.
""")
lod_interval = Int(300, help="""
Interval (in ms) during which an interactive tool event will enable
level-of-detail downsampling.
""")
lod_timeout = Int(500, help="""
Timeout (in ms) for checking whether interactive tool events are still
occurring. Once level-of-detail mode is enabled, a check is made every
``lod_timeout`` ms. If no interactive tool events have happened,
level-of-detail mode is disabled.
""")
webgl = Bool(False, help="""
Whether WebGL is enabled for this plot. If True, the glyphs that
support this will render via WebGL instead of the 2D canvas.
""")
responsive = Bool(False, help="""
If True, the plot will automatically resize based on the size of its container. The
aspect ratio of the plot will be preserved, but ``plot_width`` and ``plot_height`` will
act only to set the initial aspect ratio.
.. warning::
The responsive setting is known not to work with HBox layout and may not work
in combination with other widgets or layouts.
""")
class GridPlot(LayoutDOM):
""" A 2D grid of plots rendered on separate canvases in an HTML table.
"""
@validation.error(REQUIRED_RANGE)
def _check_required_range(self):
pass
@validation.warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
pass
@validation.warning(NO_DATA_RENDERERS)
def _check_no_data_renderers(self):
pass
@validation.warning(EMPTY_LAYOUT)
def _check_empty_layout(self):
from itertools import chain
if not list(chain(self.children)):
return str(self)
children = List(List(Instance(Plot)), default=[[]], help="""
An array of plots to display in a grid, given as a list of lists of
Plot objects. To leave a position in the grid empty, pass None for
that position in the ``children`` list.
""")
border_space = Int(0, help="""
Distance (in pixels) between adjacent plots.
""")
toolbar_location = Enum(Location, default="left", help="""
Where the toolbar will be located. If set to None, no toolbar
will be attached to the plot.
""")
def select(self, *args, **kwargs):
''' Query this object and all of its references for objects that
match the given selector. See Plot.select for detailed usage information.
Returns:
seq[Model]
'''
selector = _select_helper(args, kwargs)
# Want to pass selector that is a dictionary
return _list_attr_splat(find(self.references(), selector, {'gridplot': self}))
def column(self, col):
''' Return a given column of plots from this GridPlot.
Args:
col (int) : index of the column to return
Returns:
seq[Plot] : column of plots
'''
try:
return [row[col] for row in self.children]
except:
return []
def row(self, row):
''' Return a given row of plots from this GridPlot.
Args:
rwo (int) : index of the row to return
Returns:
seq[Plot] : row of plots
'''
try:
return self.children[row]
except:
return []
|
|
#!/usr/bin/env python
''' Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
'''
from __future__ import absolute_import
__rcs_id__ = '$Id: dbapi20.py,v 1.11 2005/01/02 02:41:01 zenzen Exp $'
__version__ = '$Revision: 1.12 $'[11:-2]
__author__ = 'Stuart Bishop <stuart@stuartbishop.net>'
import time
import sys
from six.moves import range
from impala.tests.compat import unittest
# Revision 1.12 2009/02/06 03:35:11 kf7xm
# Tested okay with Python 3.0, includes last minute patches from Mark H.
#
# Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole
# Include latest changes from main branch
# Updates for py3k
#
# Revision 1.11 2005/01/02 02:41:01 zenzen
# Update author email address
#
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize
# defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception heirarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propogates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
def str2bytes(sval):
if sys.version_info < (3,0) and isinstance(sval, str):
sval = sval.decode("latin1")
return sval.encode("latin1")
class DatabaseAPI20Test(unittest.TestCase):
''' Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
'''
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = None
connect_args = () # List of arguments to pass to connect
connect_kw_args = {} # Keyword arguments for connect
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self,cursor):
cursor.execute(self.ddl1)
def executeDDL2(self,cursor):
cursor.execute(self.ddl2)
def setUp(self):
''' self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.
'''
pass
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1,self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(
*self.connect_args,**self.connect_kw_args
)
except AttributeError:
self.fail("No connect method found in self.driver module")
def test_connect(self):
con = self._connect()
con.close()
def test_apilevel(self):
try:
# Must exist
apilevel = self.driver.apilevel
# Must equal 2.0
self.assertEqual(apilevel,'2.0')
except AttributeError:
self.fail("Driver doesn't define apilevel")
def test_threadsafety(self):
try:
# Must exist
threadsafety = self.driver.threadsafety
# Must be a valid value
self.failUnless(threadsafety in (0,1,2,3))
except AttributeError:
self.fail("Driver doesn't define threadsafety")
def test_paramstyle(self):
try:
# Must exist
paramstyle = self.driver.paramstyle
# Must be a valid value
self.failUnless(paramstyle in (
'qmark','numeric','named','format','pyformat'
))
except AttributeError:
self.fail("Driver doesn't define paramstyle")
def test_Exceptions(self):
# Make sure required exceptions exist, and are in the
# defined heirarchy.
if sys.version[0] == '3': #under Python 3 StardardError no longer exists
self.failUnless(issubclass(self.driver.Warning,Exception))
self.failUnless(issubclass(self.driver.Error,Exception))
else:
self.failUnless(issubclass(self.driver.Warning,Exception))
self.failUnless(issubclass(self.driver.Error,Exception))
self.failUnless(
issubclass(self.driver.InterfaceError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.DatabaseError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.OperationalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.IntegrityError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.InternalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.ProgrammingError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.NotSupportedError,self.driver.Error)
)
def test_ExceptionsAsConnectionAttributes(self):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
con = self._connect()
drv = self.driver
self.failUnless(con.Warning is drv.Warning)
self.failUnless(con.Error is drv.Error)
self.failUnless(con.InterfaceError is drv.InterfaceError)
self.failUnless(con.DatabaseError is drv.DatabaseError)
self.failUnless(con.OperationalError is drv.OperationalError)
self.failUnless(con.IntegrityError is drv.IntegrityError)
self.failUnless(con.InternalError is drv.InternalError)
self.failUnless(con.ProgrammingError is drv.ProgrammingError)
self.failUnless(con.NotSupportedError is drv.NotSupportedError)
def test_commit(self):
con = self._connect()
try:
# Commit must work, even if it doesn't do anything
con.commit()
finally:
con.close()
def test_rollback(self):
con = self._connect()
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con,'rollback'):
try:
con.rollback()
except self.driver.NotSupportedError:
pass
def test_cursor(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
def test_cursor_isolation(self):
con = self._connect()
try:
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
self.executeDDL1(cur1)
cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
cur2.execute("select name from %sbooze" % self.table_prefix)
booze = cur2.fetchall()
self.assertEqual(len(booze),1)
self.assertEqual(len(booze[0]),1)
self.assertEqual(booze[0][0],'Victoria Bitter')
finally:
con.close()
def test_description(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.description,None,
'cursor.description should be none after executing a '
'statement that can return no rows (such as DDL)'
)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(len(cur.description),1,
'cursor.description describes too many columns'
)
self.assertEqual(len(cur.description[0]),7,
'cursor.description[x] tuples must have 7 elements'
)
self.assertEqual(cur.description[0][0].lower(),'name',
'cursor.description[x][0] must return column name'
)
self.assertEqual(cur.description[0][1],self.driver.STRING,
'cursor.description[x][1] must return column type. Got %r'
% cur.description[0][1]
)
# Make sure self.description gets reset
self.executeDDL2(cur)
self.assertEqual(cur.description,None,
'cursor.description not being set to None when executing '
'no-result statements (eg. DDL)'
)
finally:
con.close()
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount should be -1 after executing no-result '
'statements'
)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number or rows inserted, or '
'set to -1 after executing an insert statement'
)
cur.execute("select name from %sbooze" % self.table_prefix)
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount not being reset to -1 after executing '
'no-result statements'
)
finally:
con.close()
lower_func = 'lower'
def test_callproc(self):
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur,'callproc'):
r = cur.callproc(self.lower_func,('FOO',))
self.assertEqual(len(r),1)
self.assertEqual(r[0],'FOO')
r = cur.fetchall()
self.assertEqual(len(r),1,'callproc produced no result set')
self.assertEqual(len(r[0]),1,
'callproc produced invalid result set'
)
self.assertEqual(r[0][0],'foo',
'callproc produced invalid results'
)
finally:
con.close()
def test_close(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
# cursor.execute should raise an Error if called after connection
# closed
self.assertRaises(self.driver.Error,self.executeDDL1,cur)
# laserson note: the next to assertions are not clear to me from PEP 249
# so I am leaving them out
# connection.commit should raise an Error if called after connection'
# closed.'
# self.assertRaises(self.driver.Error,con.commit)
# connection.close should raise an Error if called more than once
# self.assertRaises(self.driver.Error,con.close)
def test_execute(self):
con = self._connect()
try:
cur = con.cursor()
self._paraminsert(cur)
finally:
con.close()
def _paraminsert(self,cur):
self.executeDDL1(cur)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1))
if self.driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % self.table_prefix,
{'beer':"Cooper's"}
)
elif self.driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % self.table_prefix,
{'beer':"Cooper's"}
)
else:
self.fail('Invalid paramstyle')
self.failUnless(cur.rowcount in (-1,1))
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Cooper's",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
self.assertEqual(beers[1],"Victoria Bitter",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
def test_executemany(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
largs = [ ("Cooper's",) , ("Boag's",) ]
margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
if self.driver.paramstyle == 'qmark':
cur.executemany(
'insert into %sbooze values (?)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'numeric':
cur.executemany(
'insert into %sbooze values (:1)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'named':
cur.executemany(
'insert into %sbooze values (:beer)' % self.table_prefix,
margs
)
elif self.driver.paramstyle == 'format':
cur.executemany(
'insert into %sbooze values (%%s)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'pyformat':
cur.executemany(
'insert into %sbooze values (%%(beer)s)' % (
self.table_prefix
),
margs
)
else:
self.fail('Unknown paramstyle')
self.failUnless(cur.rowcount in (-1,2),
'insert using cursor.executemany set cursor.rowcount to '
'incorrect value %r' % cur.rowcount
)
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,
'cursor.fetchall retrieved incorrect number of rows'
)
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.failUnless(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if no more rows available'
)
self.failUnless(cur.rowcount in (-1,1))
finally:
con.close()
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate(self):
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %sbooze values ('%s')" % (self.table_prefix,s)
for s in self.samples
]
return populate
def test_fetchmany(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchmany should raise an Error if called without
#issuing a query
self.assertRaises(self.driver.Error,cur.fetchmany,4)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany()
self.assertEqual(len(r),1,
'cursor.fetchmany retrieved incorrect number of rows, '
'default of arraysize is one.'
)
cur.arraysize=10
r = cur.fetchmany(3) # Should get 3 rows
self.assertEqual(len(r),3,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should get 2 more
self.assertEqual(len(r),2,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should be an empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence after '
'results are exhausted'
)
self.failUnless(cur.rowcount in (-1,6))
# Same as above, using cursor.arraysize
cur.arraysize=4
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany() # Should get 4 rows
self.assertEqual(len(r),4,
'cursor.arraysize not being honoured by fetchmany'
)
r = cur.fetchmany() # Should get 2 more
self.assertEqual(len(r),2)
r = cur.fetchmany() # Should be an empty sequence
self.assertEqual(len(r),0)
self.failUnless(cur.rowcount in (-1,6))
cur.arraysize=6
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchmany() # Should get all rows
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows),6)
self.assertEqual(len(rows),6)
rows = [r[0] for r in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0,6):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved by cursor.fetchmany'
)
rows = cur.fetchmany() # Should return an empty list
self.assertEqual(len(rows),0,
'cursor.fetchmany should return an empty sequence if '
'called after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,6))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
r = cur.fetchmany() # Should get empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence if '
'query retrieved no rows'
)
self.failUnless(cur.rowcount in (-1,0))
finally:
con.close()
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_mixedfetch(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
rows1 = cur.fetchone()
rows23 = cur.fetchmany(2)
rows4 = cur.fetchone()
rows56 = cur.fetchall()
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows23),2,
'fetchmany returned incorrect number of rows'
)
self.assertEqual(len(rows56),2,
'fetchall returned incorrect number of rows'
)
rows = [rows1[0]]
rows.extend([rows23[0][0],rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0],rows56[1][0]])
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved or inserted'
)
finally:
con.close()
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
raise NotImplementedError('Helper not implemented')
#sql="""
# create procedure deleteme as
# begin
# select count(*) from booze
# select name from booze
# end
#"""
#cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
raise NotImplementedError('Helper not implemented')
#cur.execute("drop procedure deleteme")
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_nextset(self):
raise NotImplementedError('Drivers need to override this test')
def test_arraysize(self):
# Not much here - rest of the tests for this are in test_fetchmany
con = self._connect()
try:
cur = con.cursor()
self.failUnless(hasattr(cur,'arraysize'),
'cursor.arraysize must be defined'
)
finally:
con.close()
def test_setinputsizes(self):
con = self._connect()
try:
cur = con.cursor()
cur.setinputsizes( (25,) )
self._paraminsert(cur) # Make sure cursor still works
finally:
con.close()
def test_setoutputsize_basic(self):
# Basic test is to make sure setoutputsize doesn't blow up
con = self._connect()
try:
cur = con.cursor()
cur.setoutputsize(1000)
cur.setoutputsize(2000,0)
self._paraminsert(cur) # Make sure the cursor still works
finally:
con.close()
def test_setoutputsize(self):
# Real test for setoutputsize is driver dependant
raise NotImplementedError('Driver needed to override this test')
def test_None(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
cur.execute('insert into %sbooze values (NULL)' % self.table_prefix)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchall()
self.assertEqual(len(r),1)
self.assertEqual(len(r[0]),1)
self.assertEqual(r[0][0],None,'NULL value not returned as None')
finally:
con.close()
def test_Date(self):
d1 = self.driver.Date(2002,12,25)
d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = self.driver.Time(13,45,30)
t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = self.driver.Timestamp(2002,12,25,13,45,30)
t2 = self.driver.TimestampFromTicks(
time.mktime((2002,12,25,13,45,30,0,0,0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = self.driver.Binary(str2bytes('Something'))
b = self.driver.Binary(str2bytes(''))
def test_STRING(self):
self.failUnless(hasattr(self.driver,'STRING'),
'module.STRING must be defined'
)
def test_BINARY(self):
self.failUnless(hasattr(self.driver,'BINARY'),
'module.BINARY must be defined.'
)
def test_NUMBER(self):
self.failUnless(hasattr(self.driver,'NUMBER'),
'module.NUMBER must be defined.'
)
def test_DATETIME(self):
self.failUnless(hasattr(self.driver,'DATETIME'),
'module.DATETIME must be defined.'
)
def test_ROWID(self):
self.failUnless(hasattr(self.driver,'ROWID'),
'module.ROWID must be defined.'
)
|
|
import unittest
from playhouse.test_utils import test_database
from peewee import *
from datetime import datetime, timedelta
from mqttsqlite.orm.models import Log, Topic
from tests.utils import msg
from mqttsqlite.core.logs_controller import LogController
import mqttsqlite.settings.private_settings as Settings
import json
test_db = SqliteDatabase('test_database.db')
class TestLogsController(unittest.TestCase):
def setUp(self):
self.payload = {}
self.payload['client'] = 'testClient'
self.payload['password'] = Settings.QUERY_PASSWORD
self.payload['topic'] = '/test/topic'
self.payload['options'] = '25'
self.msg = msg(topic=Settings.ROOT_TOPIC + '/topics/add', payload=json.dumps(self.payload))
def test_add_log_entry_response_ok(self):
message = msg(topic='/test/home/sensor', payload='123445')
with test_database(test_db, (Log, Topic), create_tables=True):
logs = LogController()
result = logs.add_entry(message)
parsedResponse = json.loads(result)
self.assertEqual('OK', parsedResponse['result'])
def test_add_log_entry(self):
with test_database(test_db, (Log, Topic), create_tables=True):
message = msg(topic='/test/home/sensor', payload='123445')
logs = LogController()
result = logs.add_entry(message)
parsedResponse = json.loads(result)
self.assertEqual('OK', parsedResponse['result'])
self.assertEqual(1, Log.select().count())
def test_private_method_get_log_newer_than(self):
with test_database(test_db, (Log, Topic), create_tables=True):
Log.create(timestamp=datetime.now() - timedelta(seconds=60), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=50), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=40), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs._LogController__get_logs_newer_than('/test/topic', 25)
self.assertEqual(2, len(query_result))
def test_private_method_get_log_from_desired_topic_newer_than(self):
with test_database(test_db, (Log, Topic), create_tables=True):
Log.create(timestamp=datetime.now() - timedelta(seconds=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs._LogController__get_logs_newer_than('/test/topic', 25)
self.assertEqual(2, len(query_result))
def test_private_method_get_last_entry_from_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
Log.create(timestamp=datetime.now() - timedelta(seconds=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic')
timestamp = datetime.now()
Log.create(timestamp=timestamp, value="12", topic='/test/topic')
Log.create(timestamp=datetime.now(), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now(), value="12", topic='/test/topic3')
logs = LogController()
query_result = logs._LogController__get_last_entry_from_topic('/test/topic')
self.assertEqual(timestamp.strftime("%Y-%m-%d %H:%M:%S"), query_result['timestamp'])
def test_private_method_get_last_entry_from_invalid_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
Log.create(timestamp=datetime.now() - timedelta(seconds=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic')
timestamp = datetime.now()
Log.create(timestamp=timestamp, value="12", topic='/test/topic')
logs = LogController()
query_result = logs._LogController__get_last_entry_from_topic('/test/topic3')
self.assertEqual({}, query_result)
def test_private_method_get_last_entry_from_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
Log.create(timestamp=datetime.now() - timedelta(seconds=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic')
timestamp = datetime.now()
Log.create(timestamp=timestamp, value="12", topic='/test/topic')
logs = LogController()
query_result = logs._LogController__get_last_entry_from_topic('/test/topic')
self.assertEqual(timestamp.strftime("%Y-%m-%d %H:%M:%S"), query_result['timestamp'])
def test_get_last_entry_from_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/log/last'
Log.create(timestamp=datetime.now() - timedelta(seconds=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic')
timestamp = datetime.now()
Log.create(timestamp=timestamp, value="12", topic='/test/topic')
logs = LogController()
query_result = logs.get_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual(timestamp.strftime("%Y-%m-%d %H:%M:%S"), dic_result['values'][0]['timestamp'])
def test_get_entries_newer_than_25_minutes(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/log/minutes'
Log.create(timestamp=datetime.now() - timedelta(minutes=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(minutes=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(minutes=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(minutes=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.get_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual(2, len(dic_result['values']))
def test_get_entries_newer_than_25_hours(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/log/hours'
Log.create(timestamp=datetime.now() - timedelta(hours=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(hours=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(hours=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(hours=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.get_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual(2, len(dic_result['values']))
def test_get_entries_newer_than_25_days(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/log/days'
Log.create(timestamp=datetime.now() - timedelta(days=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.get_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual(2, len(dic_result['values']))
def test_get_entries_newer_than_25_days_invalid_password(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/log/days'
self.payload['password'] = 'badPassword'
self.msg.payload = json.dumps(self.payload)
Log.create(timestamp=datetime.now() - timedelta(days=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.get_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual('KO', dic_result['result'])
self.assertFalse('values' in dic_result)
def test_get_entries_newer_than_25_days_invalid_options(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/log/days'
self.payload['options'] = 'invalidOptions'
self.msg.payload = json.dumps(self.payload)
Log.create(timestamp=datetime.now() - timedelta(days=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.get_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual('KO', dic_result['result'])
self.assertFalse('values' in dic_result)
def test_get_entries_newer_than_25_days_invalid_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/log/days'
self.payload['topic'] = '/test/invalid/topic'
self.msg.payload = json.dumps(self.payload)
Log.create(timestamp=datetime.now() - timedelta(days=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic2')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.get_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual('OK', dic_result['result'])
self.assertFalse('values' in dic_result)
def test_private_method_delete_last_entry_from_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
Log.create(timestamp=datetime.now() - timedelta(days=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=10), value="12", topic='/test/topic')
logs = LogController()
result = logs._LogController__delete_last_entry_from_topic('/test/topic')
self.assertTrue(result)
data_after_delete = Log.select()
self.assertEqual(2, data_after_delete.count())
def test_private_method_delete_last_entry_from_non_existing_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
Log.create(timestamp=datetime.now() - timedelta(days=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=10), value="12", topic='/test/topic')
logs = LogController()
result = logs._LogController__delete_last_entry_from_topic('/test/topic2')
self.assertTrue(result)
data_after_delete = Log.select()
self.assertEqual(3, data_after_delete.count())
def test_private_method_delete_entries_older_than_from_existing_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
Log.create(timestamp=datetime.now() - timedelta(seconds=50), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=40), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=10), value="12", topic='/test/topic')
logs = LogController()
result = logs._LogController__delete_entries_from_topic_older_than('/test/topic', 25)
self.assertEqual(3, result)
data_after_delete = Log.select()
self.assertEqual(2, data_after_delete.count())
def test_private_method_delete_entries_older_than_from_non_existing_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
Log.create(timestamp=datetime.now() - timedelta(seconds=50), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=40), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=10), value="12", topic='/test/topic')
logs = LogController()
result = logs._LogController__delete_entries_from_topic_older_than('/test/topic2', 25)
self.assertEqual('0', result)
data_after_delete = Log.select()
self.assertEqual(5, data_after_delete.count())
def test_delete_last_entry_from_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/delete/last'
Log.create(timestamp=datetime.now() - timedelta(seconds=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(seconds=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.delete_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertTrue(dic_result['values'])
data_after_delete = Log.select()
self.assertEqual(2, data_after_delete.count())
def test_delete_older_than_x_minutes_from_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/delete/minutes'
Log.create(timestamp=datetime.now() - timedelta(minutes=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(minutes=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(minutes=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.delete_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual(1, dic_result['values'])
data_after_delete = Log.select()
self.assertEqual(2, data_after_delete.count())
def test_delete_older_than_x_hours_from_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/delete/hours'
Log.create(timestamp=datetime.now() - timedelta(hours=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(hours=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(hours=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.delete_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual(1, dic_result['values'])
data_after_delete = Log.select()
self.assertEqual(2, data_after_delete.count())
def test_delete_older_than_x_days_from_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/delete/days'
Log.create(timestamp=datetime.now() - timedelta(days=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.delete_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual(1, dic_result['values'])
data_after_delete = Log.select()
self.assertEqual(2, data_after_delete.count())
def test_delete_older_than_x_days_from_non_existing_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/delete/days'
self.payload['topic'] = '/test/invalid/topic'
self.msg.payload = json.dumps(self.payload)
Log.create(timestamp=datetime.now() - timedelta(days=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.delete_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual('0', dic_result['values'])
data_after_delete = Log.select()
self.assertEqual(3, data_after_delete.count())
def test_delete_older_than_x_invalid_unit_time_from_topic(self):
with test_database(test_db, (Log, Topic), create_tables=True):
self.msg.topic = Settings.ROOT_TOPIC + '/delete/years'
Log.create(timestamp=datetime.now() - timedelta(days=30), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=20), value="12", topic='/test/topic')
Log.create(timestamp=datetime.now() - timedelta(days=10), value="12", topic='/test/topic')
logs = LogController()
query_result = logs.delete_topic_entries(self.msg)
dic_result = json.loads(query_result)
self.assertEqual('KO', dic_result['result'])
if __name__ == '__main__':
unittest.main()
|
|
import base64
import binascii
import functools
import hashlib
import importlib
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils.crypto import (
constant_time_compare, get_random_string, pbkdf2,
)
from django.utils.module_loading import import_string
from django.utils.translation import gettext_noop as _
UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
def is_password_usable(encoded):
"""
Return True if this password wasn't generated by
User.set_unusable_password(), i.e. make_password(None).
"""
return encoded is None or not encoded.startswith(UNUSABLE_PASSWORD_PREFIX)
def check_password(password, encoded, setter=None, preferred='default'):
"""
Return a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
try:
hasher = identify_hasher(encoded)
except ValueError:
# encoded is gibberish or uses a hasher that's no longer installed.
return False
hasher_changed = hasher.algorithm != preferred.algorithm
must_update = hasher_changed or preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
# If the hasher didn't change (we don't protect against enumeration if it
# does) and the password should get updated, try to close the timing gap
# between the work factor of the current encoded password and the default
# work factor.
if not is_correct and not hasher_changed and must_update:
hasher.harden_runtime(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
if not isinstance(password, (bytes, str)):
raise TypeError(
'Password must be a string or bytes, got %s.'
% type(password).__qualname__
)
hasher = get_hasher(hasher)
salt = salt or hasher.salt()
return hasher.encode(password, salt)
@functools.lru_cache()
def get_hashers():
hashers = []
for hasher_path in settings.PASSWORD_HASHERS:
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers
@functools.lru_cache()
def get_hashers_by_algorithm():
return {hasher.algorithm: hasher for hasher in get_hashers()}
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
get_hashers.cache_clear()
get_hashers_by_algorithm.cache_clear()
def get_hasher(algorithm='default'):
"""
Return an instance of a loaded password hasher.
If algorithm is 'default', return the default hasher. Lazily import hashers
specified in the project's settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
def identify_hasher(encoded):
"""
Return an instance of a loaded password hasher.
Identify hasher algorithm by examining encoded hash, and call
get_hasher() to return hasher. Raise ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Return the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher:
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError("Couldn't load %r algorithm library: %s" %
(self.__class__.__name__, e))
return module
raise ValueError("Hasher %r doesn't specify a library attribute" %
self.__class__.__name__)
def salt(self):
"""Generate a cryptographically secure nonce salt in ASCII."""
# 12 returns a 71-bit value, log_2((26+26+10)^12) =~ 71 bits
return get_random_string(12)
def verify(self, password, encoded):
"""Check if the given password is correct."""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method')
def encode(self, password, salt):
"""
Create an encoded database value.
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method')
def safe_summary(self, encoded):
"""
Return a summary of safe values.
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method')
def must_update(self, encoded):
return False
def harden_runtime(self, password, encoded):
"""
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
"""
warnings.warn('subclasses of BasePasswordHasher should provide a harden_runtime() method')
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 260000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
assert password is not None
assert salt and '$' not in salt
iterations = iterations or self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return {
_('algorithm'): algorithm,
_('iterations'): iterations,
_('salt'): mask_hash(salt),
_('hash'): mask_hash(hash),
}
def must_update(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
return int(iterations) != self.iterations
def harden_runtime(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
extra_iterations = self.iterations - int(iterations)
if extra_iterations > 0:
self.encode(password, salt, extra_iterations)
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class Argon2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the argon2 algorithm.
This is the winner of the Password Hashing Competition 2013-2015
(https://password-hashing.net). It requires the argon2-cffi library which
depends on native C code and might cause portability issues.
"""
algorithm = 'argon2'
library = 'argon2'
time_cost = 2
memory_cost = 512
parallelism = 2
def encode(self, password, salt):
argon2 = self._load_library()
data = argon2.low_level.hash_secret(
password.encode(),
salt.encode(),
time_cost=self.time_cost,
memory_cost=self.memory_cost,
parallelism=self.parallelism,
hash_len=argon2.DEFAULT_HASH_LENGTH,
type=argon2.low_level.Type.I,
)
return self.algorithm + data.decode('ascii')
def verify(self, password, encoded):
argon2 = self._load_library()
algorithm, rest = encoded.split('$', 1)
assert algorithm == self.algorithm
try:
return argon2.low_level.verify_secret(
('$' + rest).encode('ascii'),
password.encode(),
type=argon2.low_level.Type.I,
)
except argon2.exceptions.VerificationError:
return False
def safe_summary(self, encoded):
(algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data) = self._decode(encoded)
assert algorithm == self.algorithm
return {
_('algorithm'): algorithm,
_('variety'): variety,
_('version'): version,
_('memory cost'): memory_cost,
_('time cost'): time_cost,
_('parallelism'): parallelism,
_('salt'): mask_hash(salt),
_('hash'): mask_hash(data),
}
def must_update(self, encoded):
(algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data) = self._decode(encoded)
assert algorithm == self.algorithm
argon2 = self._load_library()
return (
argon2.low_level.ARGON2_VERSION != version or
self.time_cost != time_cost or
self.memory_cost != memory_cost or
self.parallelism != parallelism
)
def harden_runtime(self, password, encoded):
# The runtime for Argon2 is too complicated to implement a sensible
# hardening algorithm.
pass
def _decode(self, encoded):
"""
Split an encoded hash and return: (
algorithm, variety, version, time_cost, memory_cost,
parallelism, salt, data,
).
"""
bits = encoded.split('$')
if len(bits) == 5:
# Argon2 < 1.3
algorithm, variety, raw_params, salt, data = bits
version = 0x10
else:
assert len(bits) == 6
algorithm, variety, raw_version, raw_params, salt, data = bits
assert raw_version.startswith('v=')
version = int(raw_version[len('v='):])
params = dict(bit.split('=', 1) for bit in raw_params.split(','))
assert len(params) == 3 and all(x in params for x in ('t', 'm', 'p'))
time_cost = int(params['t'])
memory_cost = int(params['m'])
parallelism = int(params['p'])
return (
algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data,
)
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
password = password.encode()
# Hash the password prior to using bcrypt to prevent password
# truncation as described in #20138.
if self.digest is not None:
# Use binascii.hexlify() because a hex encoded bytestring is str.
password = binascii.hexlify(self.digest(password).digest())
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, data.decode('ascii'))
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, data.encode('ascii'))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return {
_('algorithm'): algorithm,
_('work factor'): work_factor,
_('salt'): mask_hash(salt),
_('checksum'): mask_hash(checksum),
}
def must_update(self, encoded):
algorithm, empty, algostr, rounds, data = encoded.split('$', 4)
return int(rounds) != self.rounds
def harden_runtime(self, password, encoded):
_, data = encoded.split('$', 1)
salt = data[:29] # Length of the salt in bcrypt.
rounds = data.split('$')[2]
# work factor is logarithmic, adding one doubles the load.
diff = 2**(self.rounds - int(rounds)) - 1
while diff > 0:
self.encode(password, salt.encode('ascii'))
diff -= 1
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
bcrypt's 72 bytes password truncation. Most use cases should prefer the
BCryptSHA256PasswordHasher.
"""
algorithm = "bcrypt"
digest = None
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.sha1((salt + password).encode()).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return {
_('algorithm'): algorithm,
_('salt'): mask_hash(salt, show=2),
_('hash'): mask_hash(hash),
}
def harden_runtime(self, password, encoded):
pass
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.md5((salt + password).encode()).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return {
_('algorithm'): algorithm,
_('salt'): mask_hash(salt, show=2),
_('hash'): mask_hash(hash),
}
def harden_runtime(self, password, encoded):
pass
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; store SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(password.encode()).hexdigest()
return 'sha1$$%s' % hash
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return {
_('algorithm'): self.algorithm,
_('hash'): mask_hash(hash),
}
def harden_runtime(self, password, encoded):
pass
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(password.encode()).hexdigest()
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return {
_('algorithm'): self.algorithm,
_('hash'): mask_hash(encoded, show=3),
}
def harden_runtime(self, password, encoded):
pass
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(password, salt)
assert data is not None # A platform like OpenBSD with a dummy crypt module.
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(password, data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return {
_('algorithm'): algorithm,
_('salt'): salt,
_('hash'): mask_hash(data, show=3),
}
def harden_runtime(self, password, encoded):
pass
|
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Autoencoder model for training on spectrograms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.models.nsynth import utils
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
def get_hparams(config_name):
"""Set hyperparameters.
Args:
config_name: Name of config module to use.
Returns:
A HParams object (magenta) with defaults.
"""
hparams = tf.contrib.training.HParams(
# Optimization
batch_size=16,
learning_rate=1e-4,
adam_beta=0.5,
max_steps=6000 * 50000,
samples_per_second=16000,
num_samples=64000,
# Preprocessing
n_fft=1024,
hop_length=256,
mask=True,
log_mag=True,
use_cqt=False,
re_im=False,
dphase=True,
mag_only=False,
pad=True,
mu_law_num=0,
raw_audio=False,
# Graph
num_latent=64, # dimension of z.
cost_phase_mask=False,
phase_loss_coeff=1.0,
fw_loss_coeff=1.0, # Frequency weighted cost
fw_loss_cutoff=1000,
)
# Set values from a dictionary in the config
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hasattr(config, "config_hparams"):
config_hparams = config.config_hparams
hparams.update(config_hparams)
return hparams
def compute_mse_loss(x, xhat, hparams):
"""MSE loss function.
Args:
x: Input data tensor.
xhat: Reconstruction tensor.
hparams: Hyperparameters.
Returns:
total_loss: MSE loss scalar.
"""
with tf.name_scope("Losses"):
if hparams.raw_audio:
total_loss = tf.reduce_mean((x - xhat)**2)
else:
# Magnitude
m = x[:, :, :, 0] if hparams.cost_phase_mask else 1.0
fm = utils.frequency_weighted_cost_mask(
hparams.fw_loss_coeff,
hz_flat=hparams.fw_loss_cutoff,
n_fft=hparams.n_fft)
mag_loss = tf.reduce_mean(fm * (x[:, :, :, 0] - xhat[:, :, :, 0])**2)
if hparams.mag_only:
total_loss = mag_loss
else:
# Phase
if hparams.dphase:
phase_loss = tf.reduce_mean(fm * m *
(x[:, :, :, 1] - xhat[:, :, :, 1])**2)
else:
# Von Mises Distribution "Circular Normal"
# Added constant to keep positive (Same Probability) range [0, 2]
phase_loss = 1 - tf.reduce_mean(fm * m * tf.cos(
(x[:, :, :, 1] - xhat[:, :, :, 1]) * np.pi))
total_loss = mag_loss + hparams.phase_loss_coeff * phase_loss
tf.summary.scalar("Loss/Mag", mag_loss)
tf.summary.scalar("Loss/Phase", phase_loss)
tf.summary.scalar("Loss/Total", total_loss)
return total_loss
def train_op(batch, hparams, config_name):
"""Define a training op, including summaries and optimization.
Args:
batch: Dictionary produced by NSynthDataset.
hparams: Hyperparameters dictionary.
config_name: Name of config module.
Returns:
train_op: A complete iteration of training with summaries.
"""
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hparams.raw_audio:
x = batch["audio"]
# Add height and channel dims
x = tf.expand_dims(tf.expand_dims(x, 1), -1)
else:
x = batch["spectrogram"]
# Define the model
with tf.name_scope("Model"):
z = config.encode(x, hparams)
xhat = config.decode(z, batch, hparams)
# For interpolation
tf.add_to_collection("x", x)
tf.add_to_collection("pitch", batch["pitch"])
tf.add_to_collection("z", z)
tf.add_to_collection("xhat", xhat)
# Compute losses
total_loss = compute_mse_loss(x, xhat, hparams)
# Apply optimizer
with tf.name_scope("Optimizer"):
global_step = tf.get_variable(
"global_step", [],
tf.int64,
initializer=tf.constant_initializer(0),
trainable=False)
optimizer = tf.train.AdamOptimizer(hparams.learning_rate, hparams.adam_beta)
train_step = slim.learning.create_train_op(total_loss,
optimizer,
global_step=global_step)
return train_step
def eval_op(batch, hparams, config_name):
"""Define a evaluation op.
Args:
batch: Batch produced by NSynthReader.
hparams: Hyperparameters.
config_name: Name of config module.
Returns:
eval_op: A complete evaluation op with summaries.
"""
phase = not (hparams.mag_only or hparams.raw_audio)
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hparams.raw_audio:
x = batch["audio"]
# Add height and channel dims
x = tf.expand_dims(tf.expand_dims(x, 1), -1)
else:
x = batch["spectrogram"]
# Define the model
with tf.name_scope("Model"):
z = config.encode(x, hparams, is_training=False)
xhat = config.decode(z, batch, hparams, is_training=False)
# For interpolation
tf.add_to_collection("x", x)
tf.add_to_collection("pitch", batch["pitch"])
tf.add_to_collection("z", z)
tf.add_to_collection("xhat", xhat)
total_loss = compute_mse_loss(x, xhat, hparams)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
"Loss": slim.metrics.mean(total_loss),
})
# Define the summaries
for name, value in names_to_values.iteritems():
slim.summaries.add_scalar_summary(value, name, print_summary=True)
# Interpolate
with tf.name_scope("Interpolation"):
xhat = config.decode(z, batch, hparams, reuse=True, is_training=False)
# Linear interpolation
z_shift_one_example = tf.concat([z[1:], z[:1]], 0)
z_linear_half = (z + z_shift_one_example) / 2.0
xhat_linear_half = config.decode(z_linear_half, batch, hparams, reuse=True,
is_training=False)
# Pitch shift
pitch_plus_2 = tf.clip_by_value(batch["pitch"] + 2, 0, 127)
pitch_minus_2 = tf.clip_by_value(batch["pitch"] - 2, 0, 127)
batch["pitch"] = pitch_minus_2
xhat_pitch_minus_2 = config.decode(z, batch, hparams,
reuse=True, is_training=False)
batch["pitch"] = pitch_plus_2
xhat_pitch_plus_2 = config.decode(z, batch, hparams,
reuse=True, is_training=False)
utils.specgram_summaries(x, "Training Examples", hparams, phase=phase)
utils.specgram_summaries(xhat, "Reconstructions", hparams, phase=phase)
utils.specgram_summaries(
x - xhat, "Difference", hparams, audio=False, phase=phase)
utils.specgram_summaries(
xhat_linear_half, "Linear Interp. 0.5", hparams, phase=phase)
utils.specgram_summaries(xhat_pitch_plus_2, "Pitch +2", hparams, phase=phase)
utils.specgram_summaries(xhat_pitch_minus_2, "Pitch -2", hparams, phase=phase)
return names_to_updates.values()
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
// Copyright (c) 2014 Dyffy, Inc.
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class SidecoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = SidecoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TopShops'
db.create_table(u'catalog_topshops', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Shop'])),
('score', self.gf('django.db.models.fields.IntegerField')()),
('time', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'catalog', ['TopShops'])
def backwards(self, orm):
# Deleting model 'TopShops'
db.delete_table(u'catalog_topshops')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductDescription']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.liketutorial': {
'Meta': {'object_name': 'LikeTutorial'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"})
},
u'catalog.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': u"orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeylikes'", 'to': u"orm['django_facebook.FacebookCustomUser']", 'through': u"orm['catalog.LikeMakey']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': u"orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
u'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"})
},
u'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog']
|
|
""" Restore Vertica from Swift backups - see README.md
Copyright 2014 Hewlett-Packard Development Company, L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import socket
import tempfile
from fabric.api import *
from fabric.colors import *
# todo setup detection of backup_dir, data_dir and catalog_dir from the the config files and remove references to /var/vertica, update readme
@task
@runs_once
def restore(dbname=None, restore_domain=None):
""" The master task that calls all the sub tasks walking through the entire process from download to restore to restoration of the previous db.
Run this with the name of one node in the run list, other nodes will be discovered from there.
"""
env.abort_on_prompts = False
env.warn_only = False
env.sudo_prefix = env.sudo_prefix.replace('sudo ', 'sudo -i ') # Proper users with sudo see https://github.com/fabric/fabric/issues/503
# Check that only one box is in the hosts, the others in the cluster discovered from here
if len(env.hosts) != 1:
abort('When running vertica.restore only 1 host should be in the host list, this host will run the vbr commands.')
with settings(hide('running', 'output')):
current_domain = run('hostname -d').strip()
if dbname is None:
dbname = prompt('Which db should be restored?')
if restore_domain is None:
restore_domain = prompt('Which domain should be restored?')
# Discover details of the cluster
primary_node = env.hosts[0]
cluster_nodes = get_cluster_nodes(dbname)
# todo the old code assumes hostnames with -cluster doesn't work with ips or other hostnames, the new assumes only 1 interface per node
env.hosts = cluster_nodes.values()
# nodes = {} # 'fqdn':'v_node name'
# for vnode in cluster_nodes.iterkeys():
# cluster_fqdn = socket.gethostbyaddr(cluster_nodes[vnode])[0]
# nodes[cluster_fqdn.replace('-cluster', '')] = vnode # This relies on a specific cluster ip naming scheme
# env.hosts = nodes.keys()
execute(set_active_backup, suffix=restore_domain)
# First download the db this will take awhile, so can be skipped when not needed
if prompt('Skip Download? [y/n] ') != 'y':
day = prompt('Please specify YYYY_MM_DD of the backup you would like to restore:')
execute(download_backup, restore_domain, dbname, day=day)
# Switch to the new
prompt(magenta('Ready to disable the running db and switch to the restored db, press enter to continue.'))
execute(stop_db, hosts=primary_node)
execute(switch_active_dataset, to_set='%s_%s' % (dbname, restore_domain),
from_set='%s_%s' % (dbname, current_domain), dbname=dbname)
try:
execute(prep_restore, restore_domain, dbname)
execute(vbr_restore, dbname, hosts=primary_node)
#Link the server ssl certs again
execute(ssl_link, dbname)
execute(start_db, dbname, hosts=primary_node)
except SystemExit:
prompt(red('Restore error encountered press enter to revert to previous db setup.'))
else:
prompt(magenta('Verify the db restore worked then press enter to continue.'))
execute(stop_db, hosts=primary_node)
finally:
#Revert back to the previous db version
execute(unset_active_backup, suffix=restore_domain)
# Save the existing database, the backup dir remains so a full restore is done each time
execute(switch_active_dataset, to_set='%s_%s' % (dbname, current_domain),
from_set='%s_%s' % (dbname, restore_domain), dbname=dbname)
execute(start_db, dbname, hosts=primary_node)
@task
@parallel
def download_backup(domain, dbname, day=''):
""" Download a Vertica backup from swift.
"""
with settings(hide('running', 'output')):
# set the snapshot name in dbname_backup.yaml
sudo('cp /opt/vertica/config/%s_backup.yaml /opt/vertica/config/%s_backup.yaml-backup' % (dbname, dbname))
sudo(
"sed 's/^snapshot_name:.*/snapshot_name: %s/' /opt/vertica/config/%s_backup.yaml-backup > /opt/vertica/config/%s_backup.yaml" %
(domain.replace('.', '_') + '_' + dbname, dbname, dbname)
)
# todo this assumes you are downloading to a cluster with an existing db
data_v_node = sudo('ls /var/vertica/data/%s' % dbname)
v_node = data_v_node[:data_v_node.index('_data')]
sudo('vertica_restore_download /opt/vertica/config/%s_backup.yaml %s %s %s' % (dbname, domain, v_node, day))
@task
@runs_once
def get_cluster_nodes(dbname):
""" For a vertica node in the run list discover the remaining nodes in the cluster returning a
"""
nodes = {}
with settings(hide('running', 'output')):
for line in sudo('grep ^v_%s_node /opt/vertica/config/admintools.conf' % dbname).splitlines():
name, ip = line.split(',')[0].split('=')
nodes[name.strip()] = ip.strip()
return nodes
@task
def prep_restore(domain, dbname):
""" Prepare the backup for restore, performing all the steps needed to restore to an existing cluster.
"""
#The backups sometimes have some rsync artifacts in them, remove these
with(settings(hide('everything'), warn_only=True)):
sudo('rm -f /var/vertica/data/backup/v_%s_node*/*/.deldelay*' % dbname)
# config changesonly needed for restoring to a cluster with different ips, which is the case for all test restores, they are no-op otherwise.
# update vbr snapshot name
snapshot_name = domain.replace('.', '_') + '_' + dbname
with(settings(hide('commands'))):
sudo('sed "s/snapshotName =.*/snapshotName = %s/" /opt/vertica/config/%s_backup.ini > /tmp/%s_backup.ini' % (snapshot_name, dbname, dbname))
sudo('cp /tmp/%s_backup.ini /opt/vertica/config/%s_backup.ini' % (dbname, dbname))
# Edit the expected ips in the backup config putting in the cluster ips, easier to do in python
# TODO this is all pretty ugly code, come up with a better way to do this. There are lots if the python is run where the files exist
# but since it isn't I have to be creative.
nodes = get_cluster_nodes(dbname)
with settings(hide('running', 'output')):
new_backup_info = tempfile.NamedTemporaryFile(delete=False)
for line in sudo('cat /var/vertica/data/backup/v_%s_node*/*/*.info' % dbname).splitlines():
if line.startswith('name:'):
splits = line.split()
new_backup_info.write(splits[0] + ' address:%s ' % nodes[splits[0].split(':')[1]] + splits[2] + "\n")
else:
new_backup_info.write(line + "\n")
new_backup_info.close()
with(settings(hide('everything'), warn_only=True)):
sudo('rm -f /tmp/new_backup.info')
put(new_backup_info.name, '/tmp/new_backup.info')
sudo('cp /tmp/new_backup.info /var/vertica/data/backup/v_%s_node*/*/*.info' % dbname)
os.remove(new_backup_info.name)
#todo script this, if the file does not exist it is vertica 6 and can be skipped.
prompt("If running Vertica 7 and doing a test restore to another cluster an additional file needs to be edited.\n" +
"Change all backup ips to their restore equivalent in this file on each restore node, press enter when finished " +
"/var/vertica/data/backup/v_*_node*/*/var/vertica/catalog/%s/v_*_node*_catalog/Snapshots" % dbname)
@task
def ssl_link(dbname=None):
""" Link the ssl certs for Vertica into the catalog dir. """
# Todo I should work on a class variable for dbname so not every single task needs to ask for it
if dbname is None:
dbname = prompt('Which db should be restored?')
with settings(hide('everything'), warn_only=True):
v7_location = sudo('ls /var/vertica/server*')
if v7_location.succeeded:
sudo('ln -s /var/vertica/server* /var/vertica/catalog/%s/v_%s_node000?_catalog/' % (dbname, dbname))
else: # Vertica 6 installs have the certs in a different configuration
sudo('ln -s /var/vertica/catalog/server* /var/vertica/catalog/%s/v_%s_node000?_catalog/' % (dbname, dbname))
@task
@runs_once
def start_db(dbname=None):
""" Start up vertica, run this on one box only"""
if dbname is None:
dbname = prompt('Which db should be restored?')
dbpass = prompt('Please enter the db password needed to start up the database: ')
with settings(hide('running')):
sudo('/opt/vertica/bin/admintools -t start_db -d %s -p %s' % (dbname, dbpass), user='dbadmin')
@task
@runs_once
def stop_db():
""" Stop vertica, run this on one box only"""
puts(magenta('Stopping database'))
with settings(warn_only=True):
shutdown = sudo('/opt/vertica/bin/vsql -c "SELECT SHUTDOWN(true);"', user='dbadmin') # Will prompt for the dbadmin password
if shutdown.failed:
if prompt('Warning the shutdown failed, possibly because no db was running, continue? [y/n] ') == 'n':
abort('Aborting restore, db did not shutdown correctly.')
@parallel
def set_active_backup(suffix):
""" Switch the active backup dir to allow restoring from multiple datasources to the same cluster.
"""
#Chef runs will make the backup dir so I need to make sure it isn't there and empty
with settings(hide('everything'), warn_only=True):
backup_dir_exists = sudo('ls -d /var/vertica/data/backup').succeeded
new_backup_dir_exists = sudo('ls -d /var/vertica/data/backup_%s' % suffix).succeeded
if backup_dir_exists:
sudo('rmdir /var/vertica/data/backup') # Fails if it isn't empty
if new_backup_dir_exists:
sudo('mv /var/vertica/data/backup_%s /var/vertica/data/backup' % suffix)
else:
sudo('mkdir /var/vertica/data/backup')
@parallel
def unset_active_backup(suffix):
""" Disable active backup dir.
"""
#TODO make sure destination doesn't exist
sudo('mv /var/vertica/data/backup /var/vertica/data/backup_%s' % suffix)
@parallel
def switch_active_dataset(to_set, from_set, dbname, delete_from=False):
""" Switches the active data/catalog directories used by vertica.
This is used during test restores to move aside dev data to test the restored data and then again
to switch it back.
The to_set is the name of the data/catalog to put in place.
the from_set is the name the currently active set will be given.
If delete_from is set instead of moving the files aside they are deleted.
"""
#TODO: check to make sure the db is not running first
data_basepath = '/var/vertica/data/'
catalog_basepath = '/var/vertica/data/catalog_'
link_basepath = '/var/vertica/catalog/' # just the symbolic link
with(settings(hide('everything'), warn_only=True)):
sudo('rm -r {link_basepath}{dbname}'.format(link_basepath=link_basepath, dbname=dbname))
if delete_from:
sudo('rm -r {data_basepath}{dbname}'.format(data_basepath=data_basepath, dbname=dbname))
sudo('rm -r {catalog_basepath}{dbname}'.format(catalog_basepath=catalog_basepath, dbname=dbname))
else:
sudo('mv {data_basepath}{dbname} {data_basepath}{from_set}'.format(data_basepath=data_basepath,
dbname=dbname, from_set=from_set))
sudo('mv {catalog_basepath}{dbname} {catalog_basepath}{from_set}'.format(catalog_basepath=catalog_basepath,
dbname=dbname, from_set=from_set))
# If the to_set exists move it otherwise create empty dirs
with(settings(hide('everything'), warn_only=True)):
to_ls = sudo('ls {data_basepath}{to_set}'.format(data_basepath=data_basepath, to_set=to_set))
if to_ls.succeeded:
sudo('mv {data_basepath}{to_set} {data_basepath}{dbname}'.format(data_basepath=data_basepath,
to_set=to_set, dbname=dbname))
sudo('mv {catalog_basepath}{to_set} {catalog_basepath}{dbname}'.format(catalog_basepath=catalog_basepath,
to_set=to_set, dbname=dbname))
else:
sudo('mkdir {data_basepath}{dbname}'.format(data_basepath=data_basepath, dbname=dbname), user='dbadmin')
sudo('mkdir {catalog_basepath}{dbname}'.format(catalog_basepath=catalog_basepath, dbname=dbname),
user='dbadmin')
# vbr encounters 'Invalid cross-device link' when the catalog is on a different partition, despite this being the best practice setup
sudo('ln -s {catalog_basepath}{dbname} {link_basepath}{dbname}'.format(catalog_basepath=catalog_basepath,
link_basepath=link_basepath, dbname=dbname),
user='dbadmin')
@task
@runs_once
def vbr_restore(dbname):
""" Run the vbr restore command. This should only run on one node per cluster. """
#with(settings(hide('everything'), warn_only = True)):
with(settings(warn_only=True)):
vbr = sudo('/opt/vertica/bin/vbr.py --task restore --config-file /opt/vertica/config/%s_backup.ini' % dbname, user='dbadmin')
if vbr.failed:
abort('The vbr restore command failed! Review logs in /tmp/vbr')
|
|
"""
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.1+](http://packages.python.org/Markdown/)
Pull request to include the below code in Python-Markdown:
https://github.com/waylan/Python-Markdown/pull/191
Until it's released, we have a copy here.
/benjaoming
"""
import re
import markdown
from markdown.util import etree
from markdown.extensions.headerid import slugify, unique, itertext
from know.plugins.macros import settings
def order_toc_list(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
if not remaining_list:
return [], []
current = remaining_list.pop(0)
if not 'children' in current.keys():
current['children'] = []
if not prev_elements:
# This happens for instance with [8, 1, 1], ie. when some
# header level is outside a scope. We treat it as a
# top-level
next_elements, children = build_correct(remaining_list, [current])
current['children'].append(children)
return [current] + next_elements, []
prev_element = prev_elements.pop()
children = []
next_elements = []
# Is current part of the child list or next list?
if current['level'] > prev_element['level']:
#print "%d is a child of %d" % (current['level'], prev_element['level'])
prev_elements.append(prev_element)
prev_elements.append(current)
prev_element['children'].append(current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children += children2
next_elements += next_elements2
else:
#print "%d is ancestor of %d" % (current['level'], prev_element['level'])
if not prev_elements:
#print "No previous elements, so appending to the next set"
next_elements.append(current)
prev_elements = [current]
next_elements2, children2 = build_correct(remaining_list, prev_elements)
current['children'].extend(children2)
else:
#print "Previous elements, comparing to those first"
remaining_list.insert(0, current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children.extend(children2)
next_elements += next_elements2
return next_elements, children
flattened_list, __ = build_correct(toc_list)
return flattened_list
class TocTreeprocessor(markdown.treeprocessors.Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def add_anchor(self, c, elem_id): # @ReservedAssignment
if self.use_anchors:
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c.getchildren():
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def build_toc_etree(self, div, toc_list):
# Add title to the div
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
header_rgx = re.compile("[Hh][123456]")
self.use_anchors = self.config["anchorlink"] in [1, '1', True, 'True', 'true']
# Get a list of id attributes
used_ids = set()
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.add(c.attrib["id"])
toc_list = []
marker_found = False
for (p, c) in self.iterparent(doc):
text = ''.join(itertext(c)).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
# Do not override pre-existing ids
if not "id" in c.attrib:
elem_id = unique(self.config["slugify"](text, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1])
toc_list.append({
'level': tag_level,
'id': elem_id,
'name': c.text})
self.add_anchor(c, elem_id)
if marker_found:
toc_list_nested = order_toc_list(toc_list)
self.build_toc_etree(div, toc_list_nested)
# serialize and attach to markdown instance.
prettify = self.markdown.treeprocessors.get('prettify')
if prettify:
prettify.run(div)
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(markdown.Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, configs=[]):
self.config = {"marker": ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify": [slugify,
"Function to generate anchors based on header text-"
"Defaults to the headerid ext's slugify function."],
"title": [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink": [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md)
tocext.config = self.getConfigs()
# Headerid ext is set to '>inline'. With this set to '<prettify',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "<prettify")
def makeExtension(configs={}):
return TocExtension(configs=configs)
class WikiTreeProcessorClass(TocTreeprocessor):
def build_toc_etree(self, div, toc_list):
# Add title to the div
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
class WikiTocExtension(TocExtension):
TreeProcessorClass = WikiTreeProcessorClass
def extendMarkdown(self, md, md_globals):
if 'toc' in settings.METHODS:
TocExtension.extendMarkdown(self, md, md_globals)
|
|
"""
Finite Discrete Random Variables - Prebuilt variable types
Contains
========
FiniteRV
DiscreteUniform
Die
Bernoulli
Coin
Binomial
Hypergeometric
"""
from sympy.stats.frv import SingleFinitePSpace
from sympy import S, sympify, Rational, binomial
__all__ = ['FiniteRV', 'DiscreteUniform', 'Die', 'Bernoulli', 'Coin',
'Binomial', 'Hypergeometric']
def FiniteRV(name, density):
"""
Create a Finite Random Variable given a dict representing the density.
Returns a RandomSymbol.
>>> from sympy.stats import FiniteRV, P, E
>>> density = {0: .1, 1: .2, 2: .3, 3: .4}
>>> X = FiniteRV('X', density)
>>> E(X)
2.00000000000000
>>> P(X>=2)
0.700000000000000
"""
return SingleFinitePSpace.fromdict(name, density).value
class DiscreteUniformPSpace(SingleFinitePSpace):
"""
Create a Finite Random Variable representing a discrete uniform
distribution.
This class is for internal use.
Create DiscreteUniform Random Symbols using DiscreteUniform function
Examples
========
>>> from sympy.stats import DiscreteUniform, density
>>> from sympy import symbols
>>> X = DiscreteUniform('X', symbols('a b c')) # equally likely over a, b, c
>>> density(X)
{a: 1/3, b: 1/3, c: 1/3}
>>> Y = DiscreteUniform('Y', range(5)) # distribution over a range
>>> density(Y)
{0: 1/5, 1: 1/5, 2: 1/5, 3: 1/5, 4: 1/5}
"""
def __new__(cls, name, items):
density = dict((sympify(item), Rational(1, len(items)))
for item in items)
return cls.fromdict(name, density)
def DiscreteUniform(name, items):
"""
Create a Finite Random Variable representing a uniform distribution over
the input set.
Returns a RandomSymbol.
Examples
========
>>> from sympy.stats import DiscreteUniform, density
>>> from sympy import symbols
>>> X = DiscreteUniform('X', symbols('a b c')) # equally likely over a, b, c
>>> density(X)
{a: 1/3, b: 1/3, c: 1/3}
>>> Y = DiscreteUniform('Y', range(5)) # distribution over a range
>>> density(Y)
{0: 1/5, 1: 1/5, 2: 1/5, 3: 1/5, 4: 1/5}
"""
return DiscreteUniformPSpace(name, items).value
class DiePSpace(DiscreteUniformPSpace):
"""
Create a Finite Random Variable representing a fair die.
This class is for internal use.
Create Dice Random Symbols using Die function
>>> from sympy.stats import Die, density
>>> D6 = Die('D6', 6) # Six sided Die
>>> density(D6)
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> D4 = Die('D4', 4) # Four sided Die
>>> density(D4)
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
"""
def __new__(cls, name, sides):
return DiscreteUniformPSpace.__new__(cls, name, range(1, sides+1))
def Die(name, sides=6):
"""
Create a Finite Random Variable representing a fair die.
Returns a RandomSymbol.
>>> from sympy.stats import Die, density
>>> D6 = Die('D6', 6) # Six sided Die
>>> density(D6)
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> D4 = Die('D4', 4) # Four sided Die
>>> density(D4)
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
"""
return DiePSpace(name, sides).value
class BernoulliPSpace(SingleFinitePSpace):
"""
Create a Finite Random Variable representing a Bernoulli process.
Returns a RandomSymbol.
This class is for internal use.
Create Bernoulli Random Symbols using Bernoulli function.
>>> from sympy.stats import Bernoulli, density
>>> from sympy import S
>>> X = Bernoulli('X', S(3)/4) # 1-0 Bernoulli variable, probability = 3/4
>>> density(X)
{0: 1/4, 1: 3/4}
>>> X = Bernoulli('X', S.Half, 'Heads', 'Tails') # A fair coin toss
>>> density(X)
{Heads: 1/2, Tails: 1/2}
"""
def __new__(cls, name, p, succ, fail):
succ, fail, p = map(sympify, (succ, fail, p))
density = {succ: p, fail: (1-p)}
return cls.fromdict(name, density)
def Bernoulli(name, p, succ=1, fail=0):
"""
Create a Finite Random Variable representing a Bernoulli process.
Returns a RandomSymbol
>>> from sympy.stats import Bernoulli, density
>>> from sympy import S
>>> X = Bernoulli('X', S(3)/4) # 1-0 Bernoulli variable, probability = 3/4
>>> density(X)
{0: 1/4, 1: 3/4}
>>> X = Bernoulli('X', S.Half, 'Heads', 'Tails') # A fair coin toss
>>> density(X)
{Heads: 1/2, Tails: 1/2}
"""
return BernoulliPSpace(name, p, succ, fail).value
class CoinPSpace(BernoulliPSpace):
"""
A probability space representing a coin toss.
Probability p is the chance of gettings "Heads." Half by default
This class is for internal use.
Create Coin's using Coin function
>>> from sympy.stats import Coin, density
>>> from sympy import Rational
>>> C = Coin('C') # A fair coin toss
>>> density(C)
{H: 1/2, T: 1/2}
>>> C2 = Coin('C2', Rational(3, 5)) # An unfair coin
>>> density(C2)
{H: 3/5, T: 2/5}
"""
def __new__(cls, name, p):
return BernoulliPSpace.__new__(cls, name, p, 'H', 'T')
def Coin(name, p=S.Half):
"""
Create a Finite Random Variable representing a Coin toss.
Probability p is the chance of gettings "Heads." Half by default
Returns a RandomSymbol.
>>> from sympy.stats import Coin, density
>>> from sympy import Rational
>>> C = Coin('C') # A fair coin toss
>>> density(C)
{H: 1/2, T: 1/2}
>>> C2 = Coin('C2', Rational(3, 5)) # An unfair coin
>>> density(C2)
{H: 3/5, T: 2/5}
"""
return CoinPSpace(name, p).value
class BinomialPSpace(SingleFinitePSpace):
"""
Create a Finite Random Variable representing a binomial distribution.
This class is for internal use.
Create Binomial Random Symbols using Binomial function.
Examples
========
>>> from sympy.stats import Binomial, density
>>> from sympy import S
>>> X = Binomial('X', 4, S.Half) # Four "coin flips"
>>> density(X)
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
"""
def __new__(cls, name, n, p, succ, fail):
n, p, succ, fail = map(sympify, (n, p, succ, fail))
density = dict((k*succ + (n-k)*fail,
binomial(n, k) * p**k * (1-p)**(n-k)) for k in range(0, n+1))
return cls.fromdict(name, density)
def Binomial(name, n, p, succ=1, fail=0):
"""
Create a Finite Random Variable representing a binomial distribution.
Returns a RandomSymbol.
Examples
========
>>> from sympy.stats import Binomial, density
>>> from sympy import S
>>> X = Binomial('X', 4, S.Half) # Four "coin flips"
>>> density(X)
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
"""
return BinomialPSpace(name, n, p, succ, fail).value
class HypergeometricPSpace(SingleFinitePSpace):
"""
Create a Finite Random Variable representing a hypergeometric distribution.
This class is for internal use.
Create Hypergeometric Random Symbols using Hypergeometric function.
Examples
========
>>> from sympy.stats import Hypergeometric, density
>>> from sympy import S
>>> X = Hypergeometric('X', 10, 5, 3) # 10 marbles, 5 white (success), 3 draws
>>> density(X)
{0: 1/12, 1: 5/12, 2: 5/12, 3: 1/12}
"""
def __new__(cls, name, N, m, n):
N, m, n = map(sympify, (N, m, n))
density = dict((k, binomial(m, k) * binomial(N-m, n-k) / binomial(N, n))
for k in range(max(0, n+m-N), min(m, n) + 1))
return cls.fromdict(name, density)
def Hypergeometric(name, N, m, n):
"""
Create a Finite Random Variable representing a hypergeometric distribution.
Returns a RandomSymbol.
Examples
========
>>> from sympy.stats import Hypergeometric, density
>>> from sympy import S
>>> X = Hypergeometric('X', 10, 5, 3) # 10 marbles, 5 white (success), 3 draws
>>> density(X)
{0: 1/12, 1: 5/12, 2: 5/12, 3: 1/12}
"""
return HypergeometricPSpace(name, N, m, n).value
|
|
from copy import copy
from django.conf import settings
from django.contrib.contenttypes.generic import GenericRelation
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_model, IntegerField, CharField, FloatField
from django.db.models.signals import post_save, post_delete
class BaseGenericRelation(GenericRelation):
"""
Extends ``GenericRelation`` to:
- Add a consistent default value for ``object_id_field`` and
check for a ``related_model`` attribute which can be defined
on subclasses as a default for the ``to`` argument.
- Add one or more custom fields to the model that the relation
field is applied to, and then call a ``related_items_changed``
method each time related items are saved or deleted, so that a
calculated value can be stored against the custom fields since
aggregates aren't available for GenericRelation instances.
"""
# Mapping of field names to model fields that will be added.
fields = {}
def __init__(self, *args, **kwargs):
"""
Set up some defaults and check for a ``related_model``
attribute for the ``to`` argument.
"""
self.frozen_by_south = kwargs.pop("frozen_by_south", False)
kwargs.setdefault("object_id_field", "object_pk")
to = getattr(self, "related_model", None)
if to:
kwargs.setdefault("to", to)
super(BaseGenericRelation, self).__init__(*args, **kwargs)
def db_type(self, connection):
"""
South expects this to return a string for initial migrations
against MySQL, to check for text or geometery columns. These
generic fields are neither of those, but returning an empty
string here at least allows migrations to run successfully.
See http://south.aeracode.org/ticket/1204
"""
if self.frozen_by_south:
return ""
return None
def contribute_to_class(self, cls, name):
"""
Add each of the names and fields in the ``fields`` attribute
to the model the relationship field is applied to, and set up
the related item save and delete signals for calling
``related_items_changed``.
"""
for field in cls._meta.many_to_many:
if isinstance(field, self.__class__):
e = "Multiple %s fields are not supported (%s.%s, %s.%s)" % (
self.__class__.__name__, cls.__name__, cls.__name__,
name, field.name)
raise ImproperlyConfigured(e)
self.related_field_name = name
super(BaseGenericRelation, self).contribute_to_class(cls, name)
# Not applicable to abstract classes, and in fact will break.
if not cls._meta.abstract and not self.frozen_by_south:
for (name_string, field) in self.fields.items():
if "%s" in name_string:
name_string = name_string % name
if not field.verbose_name:
field.verbose_name = self.verbose_name
cls.add_to_class(name_string, copy(field))
# Add a getter function to the model we can use to retrieve
# the field/manager by name.
getter_name = "get_%s_name" % self.__class__.__name__.lower()
cls.add_to_class(getter_name, lambda self: name)
# For some unknown reason the signal won't be triggered
# if given a sender arg, particularly when running
# Cartridge with the field RichTextPage.keywords - so
# instead of specifying self.rel.to as the sender, we
# check for it inside the signal itself.
post_save.connect(self._related_items_changed)
post_delete.connect(self._related_items_changed)
def _related_items_changed(self, **kwargs):
"""
Ensure that the given related item is actually for the model
this field applies to, and pass the instance to the real
``related_items_changed`` handler.
"""
# Manually check that the instance matches the relation,
# since we don't specify a sender for the signal.
try:
to = self.rel.to
if isinstance(to, basestring):
to = get_model(*to.split(".", 1))
assert isinstance(kwargs["instance"], to)
except (TypeError, ValueError, AssertionError):
return
for_model = kwargs["instance"].content_type.model_class()
if issubclass(for_model, self.model):
instance_id = kwargs["instance"].object_pk
try:
instance = for_model.objects.get(id=instance_id)
except self.model.DoesNotExist:
# Instance itself was deleted - signals are irrelevant.
return
if hasattr(instance, "get_content_model"):
instance = instance.get_content_model()
related_manager = getattr(instance, self.related_field_name)
self.related_items_changed(instance, related_manager)
def related_items_changed(self, instance, related_manager):
"""
Can be implemented by subclasses - called whenever the
state of related items change, eg they're saved or deleted.
The instance for this field and the related manager for the
field are passed as arguments.
"""
pass
class CommentsField(BaseGenericRelation):
"""
Stores the number of comments against the
``COMMENTS_FIELD_NAME_count`` field when a comment is saved or
deleted.
"""
related_model = "generic.ThreadedComment"
fields = {"%s_count": IntegerField(editable=False, default=0)}
def related_items_changed(self, instance, related_manager):
"""
Stores the number of comments. A custom ``count_filter``
queryset gets checked for, allowing managers to implement
custom count logic.
"""
try:
count = related_manager.count_queryset()
except AttributeError:
count = related_manager.count()
count_field_name = self.fields.keys()[0] % self.related_field_name
setattr(instance, count_field_name, count)
instance.save()
class KeywordsField(BaseGenericRelation):
"""
Stores the keywords as a single string into the
``KEYWORDS_FIELD_NAME_string`` field for convenient access when
searching.
"""
related_model = "generic.AssignedKeyword"
fields = {"%s_string": CharField(editable=False, blank=True,
max_length=500)}
def __init__(self, *args, **kwargs):
"""
Mark the field as editable so that it can be specified in
admin class fieldsets and pass validation, and also so that
it shows up in the admin form.
"""
super(KeywordsField, self).__init__(*args, **kwargs)
self.editable = True
def formfield(self, **kwargs):
"""
Provide the custom form widget for the admin, since there
isn't a form field mapped to ``GenericRelation`` model fields.
"""
from mezzanine.generic.forms import KeywordsWidget
kwargs["widget"] = KeywordsWidget
return super(KeywordsField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
"""
The ``KeywordsWidget`` field will return data as a string of
comma separated IDs for the ``Keyword`` model - convert these
into actual ``AssignedKeyword`` instances. Also delete
``Keyword`` instances if their last related ``AssignedKeyword``
instance is being removed.
"""
from mezzanine.generic.models import AssignedKeyword, Keyword
related_manager = getattr(instance, self.name)
# Get a list of Keyword IDs being removed.
old_ids = [str(a.keyword_id) for a in related_manager.all()]
new_ids = data.split(",")
removed_ids = set(old_ids) - set(new_ids)
# Remove current AssignedKeyword instances.
related_manager.all().delete()
# Convert the data into AssignedKeyword instances.
if data:
data = [AssignedKeyword(keyword_id=i) for i in new_ids]
# Remove Keyword instances than no longer have a
# related AssignedKeyword instance.
existing = AssignedKeyword.objects.filter(keyword__id__in=removed_ids)
existing_ids = set([str(a.keyword_id) for a in existing])
unused_ids = removed_ids - existing_ids
Keyword.objects.filter(id__in=unused_ids).delete()
super(KeywordsField, self).save_form_data(instance, data)
def contribute_to_class(self, cls, name):
"""
Swap out any reference to ``KeywordsField`` with the
``KEYWORDS_FIELD_string`` field in ``search_fields``.
"""
super(KeywordsField, self).contribute_to_class(cls, name)
string_field_name = self.fields.keys()[0] % self.related_field_name
if hasattr(cls, "search_fields") and name in cls.search_fields:
try:
weight = cls.search_fields[name]
except TypeError:
# search_fields is a sequence.
index = cls.search_fields.index(name)
search_fields_type = type(cls.search_fields)
cls.search_fields = list(cls.search_fields)
cls.search_fields[index] = string_field_name
cls.search_fields = search_fields_type(cls.search_fields)
else:
del cls.search_fields[name]
cls.search_fields[string_field_name] = weight
def related_items_changed(self, instance, related_manager):
"""
Stores the keywords as a single string for searching.
"""
assigned = related_manager.select_related("keyword")
keywords = " ".join([unicode(a.keyword) for a in assigned])
string_field_name = self.fields.keys()[0] % self.related_field_name
if getattr(instance, string_field_name) != keywords:
setattr(instance, string_field_name, keywords)
instance.save()
class RatingField(BaseGenericRelation):
"""
Stores the rating count and average against the
``RATING_FIELD_NAME_count`` and ``RATING_FIELD_NAME_average``
fields when a rating is saved or deleted.
"""
related_model = "generic.Rating"
fields = {"%s_count": IntegerField(default=0, editable=False),
"%s_sum": IntegerField(default=0, editable=False),
"%s_average": FloatField(default=0, editable=False)}
def related_items_changed(self, instance, related_manager):
"""
Calculates and saves the average rating.
"""
ratings = [r.value for r in related_manager.all()]
count = len(ratings)
_sum = sum(ratings)
average = _sum / float(count) if count > 0 else 0
setattr(instance, "%s_count" % self.related_field_name, count)
setattr(instance, "%s_sum" % self.related_field_name, _sum)
setattr(instance, "%s_average" % self.related_field_name, average)
instance.save()
# South requires custom fields to be given "rules".
# See http://south.aeracode.org/docs/customfields.html
if "south" in settings.INSTALLED_APPS:
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules(rules=[((BaseGenericRelation,), [],
{"frozen_by_south": [True, {"is_value": True}]})],
patterns=["mezzanine\.generic\.fields\."])
except ImportError:
pass
|
|
# -*- coding: utf-8 -*-
from cms.apphook_pool import apphook_pool
from cms.exceptions import NoHomeFound
from cms.utils.moderator import get_page_queryset
from django.conf import settings
from django.conf.urls.defaults import patterns
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import RegexURLResolver, Resolver404, reverse, \
RegexURLPattern
from django.utils.importlib import import_module
APP_RESOLVERS = []
def clear_app_resolvers():
global APP_RESOLVERS
APP_RESOLVERS = []
def applications_page_check(request, current_page=None, path=None):
"""Tries to find if given path was resolved over application.
Applications have higher priority than other cms pages.
"""
if current_page:
return current_page
if path is None:
# We should get in this branch only if an apphook is active on /
# This removes the non-CMS part of the URL.
path = request.path.replace(reverse('pages-root'), '', 1)
# check if application resolver can resolve this
for resolver in APP_RESOLVERS:
try:
page_id = resolver.resolve_page_id(path)
# yes, it is application page
page = get_page_queryset(request).get(id=page_id)
# If current page was matched, then we have some override for content
# from cms, but keep current page. Otherwise return page to which was application assigned.
return page
except Resolver404:
# Raised if the page is not managed by an apphook
pass
return None
class AppRegexURLResolver(RegexURLResolver):
page_id = None
url_patterns = None
def resolve_page_id(self, path):
"""Resolves requested path similar way how resolve does, but instead
of return callback,.. returns page_id to which was application
assigned.
"""
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404, e:
if 'tried' in e.args[0]:
tried.extend([(pattern.regex.pattern + ' ' + t) for t in e.args[0]['tried']])
elif 'path' in e.args[0]:
tried.extend([(pattern.regex.pattern + ' ' + t) for t in e.args[0]['path']])
else:
if sub_match:
return pattern.page_id
tried.append(pattern.regex.pattern)
raise Resolver404, {'tried': tried, 'path': new_path}
def recurse_patterns(path, pattern_list, page_id):
"""
Recurse over a list of to-be-hooked patterns for a given path prefix
"""
newpatterns = []
for pattern in pattern_list:
app_pat = pattern.regex.pattern
if app_pat.startswith('^'):
app_pat = app_pat[1:]
regex = r'^%s%s' % (path, app_pat)
if isinstance(pattern, RegexURLResolver):
# this is an 'include', recurse!
resolver = RegexURLResolver(regex, 'cms_appresolver',
pattern.default_kwargs, pattern.app_name, pattern.namespace)
resolver.page_id = page_id
# see lines 243 and 236 of urlresolvers.py to understand the next line
resolver._urlconf_module = recurse_patterns(regex, pattern.url_patterns, page_id)
else:
# Re-do the RegexURLPattern with the new regular expression
resolver = RegexURLPattern(regex, pattern.callback,
pattern.default_args, pattern.name)
resolver.page_id = page_id
newpatterns.append(resolver)
return newpatterns
def _flatten_patterns(patterns):
flat = []
for pattern in patterns:
if isinstance(pattern, RegexURLResolver):
flat += _flatten_patterns(pattern.url_patterns)
else:
flat.append(pattern)
return flat
def get_app_urls(urls):
for urlconf in urls:
if isinstance(urlconf, basestring):
mod = import_module(urlconf)
if not hasattr(mod, 'urlpatterns'):
raise ImproperlyConfigured(
"URLConf `%s` has no urlpatterns attribute" % urlconf)
yield getattr(mod, 'urlpatterns')
else:
yield urlconf
def get_patterns_for_title(path, title):
"""
Resolve the urlconf module for a path+title combination
Returns a list of url objects.
"""
app = apphook_pool.get_apphook(title.application_urls)
patterns = []
for pattern_list in get_app_urls(app.urls):
if not path.endswith('/'):
path += '/'
page_id = title.page.id
patterns += recurse_patterns(path, pattern_list, page_id)
patterns = _flatten_patterns(patterns)
return patterns
def get_app_patterns():
"""
Get a list of patterns for all hooked apps.
How this works:
By looking through all titles with an app hook (application_urls) we find all
urlconf modules we have to hook into titles.
If we use the ML URL Middleware, we namespace those patterns with the title
language.
All 'normal' patterns from the urlconf get re-written by prefixing them with
the title path and then included into the cms url patterns.
"""
from cms.models import Title
from cms.models.pagemodel import Page
try:
current_site = Site.objects.get_current()
except Site.DoesNotExist:
current_site = None
included = []
# we don't have a request here so get_page_queryset() can't be used,
# so, if CMS_MODERATOR, use, public() queryset, otherwise
# use draft(). This can be done, because url patterns are used just
# in frontend
is_draft = not settings.CMS_MODERATOR
try:
home = Page.objects.get_home()
home_titles = home.title_set.all()
except NoHomeFound:
home_titles = []
home_slugs = {}
for title in home_titles:
home_slugs[title.language] = title.slug
title_qs = Title.objects.filter(page__publisher_is_draft=is_draft, page__site=current_site)
if 'cms.middleware.multilingual.MultilingualURLMiddleware' in settings.MIDDLEWARE_CLASSES:
use_namespaces = True
hooked_applications = {}
else:
use_namespaces = False
hooked_applications = []
# Loop over all titles with an application hooked to them
for title in title_qs.exclude(application_urls=None).exclude(application_urls='').select_related():
if settings.CMS_FLAT_URLS:
if title.language in home_slugs:
path = title.slug.split(home_slugs[title.language] + "/", 1)[-1]
else:
path = title.slug
if use_namespaces:
mixid = "%s:%s:%s" % (path + "/", title.application_urls, title.language)
else:
mixid = "%s:%s" % (path + "/", title.application_urls)
else:
if title.language in home_slugs:
path = title.path.split(home_slugs[title.language] + "/", 1)[-1]
else:
path = title.path
if use_namespaces:
mixid = "%s:%s:%s" % (path + "/", title.application_urls, title.language)
else:
mixid = "%s:%s" % (path + "/", title.application_urls)
if mixid in included:
# don't add the same thing twice
continue
if not settings.APPEND_SLASH:
path += '/'
if use_namespaces:
if title.language not in hooked_applications:
hooked_applications[title.language] = []
hooked_applications[title.language] += get_patterns_for_title(path, title)
else:
hooked_applications += get_patterns_for_title(path, title)
included.append(mixid)
# Build the app patterns to be included in the cms urlconfs
app_patterns = []
if use_namespaces:
for ns, currentpatterns in hooked_applications.items():
extra_patterns = patterns('', *currentpatterns)
resolver = AppRegexURLResolver(r'', 'app_resolver', namespace=ns)
resolver.url_patterns = extra_patterns
app_patterns.append(resolver)
APP_RESOLVERS.append(resolver)
else:
extra_patterns = patterns('', *hooked_applications)
resolver = AppRegexURLResolver(r'', 'app_resolver')
resolver.url_patterns = extra_patterns
app_patterns.append(resolver)
APP_RESOLVERS.append(resolver)
return app_patterns
|
|
"""Spectral Embedding."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import eigsh
from scipy.sparse.csgraph import connected_components
from scipy.sparse.csgraph import laplacian as csgraph_laplacian
from ..base import BaseEstimator
from ..utils import (
check_array,
check_random_state,
check_symmetric,
)
from ..utils._arpack import _init_arpack_v0
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.fixes import lobpcg
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph, NearestNeighbors
from ..utils.deprecation import deprecated
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node.
Parameters
----------
graph : array-like of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
node_id : int
The index of the query node of the graph.
Returns
-------
connected_components_matrix : array-like of shape (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node.
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=bool)
nodes_to_explore = np.zeros(n_node, dtype=bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
neighbors = graph[i].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def _graph_is_connected(graph):
"""Return whether the graph is connected (True) or Not (False).
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not.
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition.
Parameters
----------
laplacian : {ndarray, sparse matrix}
The graph laplacian.
value : float
The value of the diagonal.
norm_laplacian : bool
Whether the value of the diagonal should be changed or not.
Returns
-------
laplacian : {array, sparse matrix}
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
if norm_laplacian:
laplacian.flat[:: n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = laplacian.row == laplacian.col
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(
adjacency,
*,
n_components=8,
eigen_solver=None,
random_state=None,
eigen_tol=0.0,
norm_laplacian=True,
drop_first=True,
):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
norm_laplacian : bool, default=True
If True, then compute symmetric normalized Laplacian.
drop_first : bool, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : ndarray of shape (n_samples, n_components)
The reduced samples.
Notes
-----
Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
has one connected component. If there graph has many components, the first
few eigenvectors will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
https://doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError as e:
if eigen_solver == "amg":
raise ValueError(
"The eigen_solver was set to 'amg', but pyamg is not available."
) from e
if eigen_solver is None:
eigen_solver = "arpack"
elif eigen_solver not in ("arpack", "lobpcg", "amg"):
raise ValueError(
"Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver
)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn(
"Graph is not fully connected, spectral embedding may not work as expected."
)
laplacian, dd = csgraph_laplacian(
adjacency, normed=norm_laplacian, return_diag=True
)
if (
eigen_solver == "arpack"
or eigen_solver != "lobpcg"
and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)
):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = _init_arpack_v0(laplacian.shape[0], random_state)
_, diffusion_map = eigsh(
laplacian, k=n_components, sigma=1.0, which="LM", tol=eigen_tol, v0=v0
)
embedding = diffusion_map.T[n_components::-1]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
elif eigen_solver == "amg":
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# The Laplacian matrix is always singular, having at least one zero
# eigenvalue, corresponding to the trivial eigenvector, which is a
# constant. Using a singular matrix for preconditioning may result in
# random failures in LOBPCG and is not supported by the existing
# theory:
# see https://doi.org/10.1007/s10208-015-9297-1
# Shift the Laplacian so its diagononal is not all ones. The shift
# does change the eigenpairs however, so we'll feed the shifted
# matrix to the solver and afterward set it back to the original.
diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
laplacian += diag_shift
ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse="csr"))
laplacian -= diag_shift
M = ml.aspreconditioner()
# Create initial approximation X to eigenvectors
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
_, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.0e-5, largest=False)
embedding = diffusion_map.T
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
if eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
_, diffusion_map = eigh(laplacian, check_finite=False)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension and create initial
# approximation X to eigenvectors
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
_, diffusion_map = lobpcg(
laplacian, X, tol=1e-15, largest=False, maxiter=2000
)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
n_components : int, default=2
The dimension of the projected subspace.
affinity : {'nearest_neighbors', 'rbf', 'precomputed', \
'precomputed_nearest_neighbors'} or callable, \
default='nearest_neighbors'
How to construct the affinity matrix.
- 'nearest_neighbors' : construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf' : construct the affinity matrix by computing a radial basis
function (RBF) kernel.
- 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
- 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, default=None
Kernel coefficient for rbf kernel. If None, gamma will be set to
1/n_features.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems.
If None, then ``'arpack'`` is used.
n_neighbors : int, default=None
Number of nearest neighbors for nearest_neighbors graph building.
If None, n_neighbors will be set to max(n_samples/10, 1).
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_neighbors_ : int
Number of nearest neighbors effectively used.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import SpectralEmbedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = SpectralEmbedding(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2001
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(
self,
n_components=2,
*,
affinity="nearest_neighbors",
gamma=None,
random_state=None,
eigen_solver=None,
n_neighbors=None,
n_jobs=None,
):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
def _more_tags(self):
return {
"pairwise": self.affinity
in ["precomputed", "precomputed_nearest_neighbors"]
}
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `_pairwise` was deprecated in "
"version 0.24 and will be removed in 1.1 (renaming of 0.26)."
)
@property
def _pairwise(self):
return self.affinity in ["precomputed", "precomputed_nearest_neighbors"]
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Y: Ignored
Returns
-------
affinity_matrix of shape (n_samples, n_samples)
"""
if self.affinity == "precomputed":
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
return self.affinity_matrix_
if self.affinity == "nearest_neighbors":
if sparse.issparse(X):
warnings.warn(
"Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity"
)
self.affinity = "rbf"
else:
self.n_neighbors_ = (
self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1)
)
self.affinity_matrix_ = kneighbors_graph(
X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs
)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (
self.affinity_matrix_ + self.affinity_matrix_.T
)
return self.affinity_matrix_
if self.affinity == "rbf":
self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix}, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(
X, accept_sparse="csr", ensure_min_samples=2, estimator=self
)
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, str):
if self.affinity not in {
"nearest_neighbors",
"rbf",
"precomputed",
"precomputed_nearest_neighbors",
}:
raise ValueError(
"%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable."
% self.affinity
)
elif not callable(self.affinity):
raise ValueError(
"'affinity' is expected to be an affinity name or a callable. Got: %s"
% self.affinity
)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(
affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix} of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Returns
-------
X_new : array-like of shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
|
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import collections
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdataset
import dns.rdatatype
import dns.rrset
import dns.serial
import dns.ttl
class TransactionManager:
def reader(self):
"""Begin a read-only transaction."""
raise NotImplementedError # pragma: no cover
def writer(self, replacement=False):
"""Begin a writable transaction.
*replacement*, a ``bool``. If `True`, the content of the
transaction completely replaces any prior content. If False,
the default, then the content of the transaction updates the
existing content.
"""
raise NotImplementedError # pragma: no cover
def origin_information(self):
"""Returns a tuple
(absolute_origin, relativize, effective_origin)
giving the absolute name of the default origin for any
relative domain names, the "effective origin", and whether
names should be relativized. The "effective origin" is the
absolute origin if relativize is False, and the empty name if
relativize is true. (The effective origin is provided even
though it can be computed from the absolute_origin and
relativize setting because it avoids a lot of code
duplication.)
If the returned names are `None`, then no origin information is
available.
This information is used by code working with transactions to
allow it to coordinate relativization. The transaction code
itself takes what it gets (i.e. does not change name
relativity).
"""
raise NotImplementedError # pragma: no cover
def get_class(self):
"""The class of the transaction manager.
"""
raise NotImplementedError # pragma: no cover
def from_wire_origin(self):
"""Origin to use in from_wire() calls.
"""
(absolute_origin, relativize, _) = self.origin_information()
if relativize:
return absolute_origin
else:
return None
class DeleteNotExact(dns.exception.DNSException):
"""Existing data did not match data specified by an exact delete."""
class ReadOnly(dns.exception.DNSException):
"""Tried to write to a read-only transaction."""
class AlreadyEnded(dns.exception.DNSException):
"""Tried to use an already-ended transaction."""
class Transaction:
def __init__(self, manager, replacement=False, read_only=False):
self.manager = manager
self.replacement = replacement
self.read_only = read_only
self._ended = False
#
# This is the high level API
#
def get(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Return the rdataset associated with *name*, *rdtype*, and *covers*,
or `None` if not found.
Note that the returned rdataset is immutable.
"""
self._check_ended()
if isinstance(name, str):
name = dns.name.from_text(name, None)
rdtype = dns.rdatatype.RdataType.make(rdtype)
rdataset = self._get_rdataset(name, rdtype, covers)
if rdataset is not None and \
not isinstance(rdataset, dns.rdataset.ImmutableRdataset):
rdataset = dns.rdataset.ImmutableRdataset(rdataset)
return rdataset
def _check_read_only(self):
if self.read_only:
raise ReadOnly
def add(self, *args):
"""Add records.
The arguments may be:
- rrset
- name, rdataset...
- name, ttl, rdata...
"""
self._check_ended()
self._check_read_only()
return self._add(False, args)
def replace(self, *args):
"""Replace the existing rdataset at the name with the specified
rdataset, or add the specified rdataset if there was no existing
rdataset.
The arguments may be:
- rrset
- name, rdataset...
- name, ttl, rdata...
Note that if you want to replace the entire node, you should do
a delete of the name followed by one or more calls to add() or
replace().
"""
self._check_ended()
self._check_read_only()
return self._add(True, args)
def delete(self, *args):
"""Delete records.
It is not an error if some of the records are not in the existing
set.
The arguments may be:
- rrset
- name
- name, rdataclass, rdatatype, [covers]
- name, rdataset...
- name, rdata...
"""
self._check_ended()
self._check_read_only()
return self._delete(False, args)
def delete_exact(self, *args):
"""Delete records.
The arguments may be:
- rrset
- name
- name, rdataclass, rdatatype, [covers]
- name, rdataset...
- name, rdata...
Raises dns.transaction.DeleteNotExact if some of the records
are not in the existing set.
"""
self._check_ended()
self._check_read_only()
return self._delete(True, args)
def name_exists(self, name):
"""Does the specified name exist?"""
self._check_ended()
if isinstance(name, str):
name = dns.name.from_text(name, None)
return self._name_exists(name)
def update_serial(self, value=1, relative=True, name=dns.name.empty):
"""Update the serial number.
*value*, an `int`, is an increment if *relative* is `True`, or the
actual value to set if *relative* is `False`.
Raises `KeyError` if there is no SOA rdataset at *name*.
Raises `ValueError` if *value* is negative or if the increment is
so large that it would cause the new serial to be less than the
prior value.
"""
self._check_ended()
if value < 0:
raise ValueError('negative update_serial() value')
if isinstance(name, str):
name = dns.name.from_text(name, None)
rdataset = self._get_rdataset(name, dns.rdatatype.SOA,
dns.rdatatype.NONE)
if rdataset is None or len(rdataset) == 0:
raise KeyError
if relative:
serial = dns.serial.Serial(rdataset[0].serial) + value
else:
serial = dns.serial.Serial(value)
serial = serial.value # convert back to int
if serial == 0:
serial = 1
rdata = rdataset[0].replace(serial=serial)
new_rdataset = dns.rdataset.from_rdata(rdataset.ttl, rdata)
self.replace(name, new_rdataset)
def __iter__(self):
self._check_ended()
return self._iterate_rdatasets()
def changed(self):
"""Has this transaction changed anything?
For read-only transactions, the result is always `False`.
For writable transactions, the result is `True` if at some time
during the life of the transaction, the content was changed.
"""
self._check_ended()
return self._changed()
def commit(self):
"""Commit the transaction.
Normally transactions are used as context managers and commit
or rollback automatically, but it may be done explicitly if needed.
A ``dns.transaction.Ended`` exception will be raised if you try
to use a transaction after it has been committed or rolled back.
Raises an exception if the commit fails (in which case the transaction
is also rolled back.
"""
self._end(True)
def rollback(self):
"""Rollback the transaction.
Normally transactions are used as context managers and commit
or rollback automatically, but it may be done explicitly if needed.
A ``dns.transaction.AlreadyEnded`` exception will be raised if you try
to use a transaction after it has been committed or rolled back.
Rollback cannot otherwise fail.
"""
self._end(False)
#
# Helper methods
#
def _raise_if_not_empty(self, method, args):
if len(args) != 0:
raise TypeError(f'extra parameters to {method}')
def _rdataset_from_args(self, method, deleting, args):
try:
arg = args.popleft()
if isinstance(arg, dns.rrset.RRset):
rdataset = arg.to_rdataset()
elif isinstance(arg, dns.rdataset.Rdataset):
rdataset = arg
else:
if deleting:
ttl = 0
else:
if isinstance(arg, int):
ttl = arg
if ttl > dns.ttl.MAX_TTL:
raise ValueError(f'{method}: TTL value too big')
else:
raise TypeError(f'{method}: expected a TTL')
arg = args.popleft()
if isinstance(arg, dns.rdata.Rdata):
rdataset = dns.rdataset.from_rdata(ttl, arg)
else:
raise TypeError(f'{method}: expected an Rdata')
return rdataset
except IndexError:
if deleting:
return None
else:
# reraise
raise TypeError(f'{method}: expected more arguments')
def _add(self, replace, args):
try:
args = collections.deque(args)
if replace:
method = 'replace()'
else:
method = 'add()'
arg = args.popleft()
if isinstance(arg, str):
arg = dns.name.from_text(arg, None)
if isinstance(arg, dns.name.Name):
name = arg
rdataset = self._rdataset_from_args(method, False, args)
elif isinstance(arg, dns.rrset.RRset):
rrset = arg
name = rrset.name
# rrsets are also rdatasets, but they don't print the
# same and can't be stored in nodes, so convert.
rdataset = rrset.to_rdataset()
else:
raise TypeError(f'{method} requires a name or RRset ' +
'as the first argument')
if rdataset.rdclass != self.manager.get_class():
raise ValueError(f'{method} has objects of wrong RdataClass')
if rdataset.rdtype == dns.rdatatype.SOA:
(_, _, origin) = self.manager.origin_information()
if name != origin:
raise ValueError(f'{method} has non-origin SOA')
self._raise_if_not_empty(method, args)
if not replace:
existing = self._get_rdataset(name, rdataset.rdtype,
rdataset.covers)
if existing is not None:
if isinstance(existing, dns.rdataset.ImmutableRdataset):
trds = dns.rdataset.Rdataset(existing.rdclass,
existing.rdtype,
existing.covers)
trds.update(existing)
existing = trds
rdataset = existing.union(rdataset)
self._put_rdataset(name, rdataset)
except IndexError:
raise TypeError(f'not enough parameters to {method}')
def _delete(self, exact, args):
try:
args = collections.deque(args)
if exact:
method = 'delete_exact()'
else:
method = 'delete()'
arg = args.popleft()
if isinstance(arg, str):
arg = dns.name.from_text(arg, None)
if isinstance(arg, dns.name.Name):
name = arg
if len(args) > 0 and (isinstance(args[0], int) or
isinstance(args[0], str)):
# deleting by type and (optionally) covers
rdtype = dns.rdatatype.RdataType.make(args.popleft())
if len(args) > 0:
covers = dns.rdatatype.RdataType.make(args.popleft())
else:
covers = dns.rdatatype.NONE
self._raise_if_not_empty(method, args)
existing = self._get_rdataset(name, rdtype, covers)
if existing is None:
if exact:
raise DeleteNotExact(f'{method}: missing rdataset')
else:
self._delete_rdataset(name, rdtype, covers)
return
else:
rdataset = self._rdataset_from_args(method, True, args)
elif isinstance(arg, dns.rrset.RRset):
rdataset = arg # rrsets are also rdatasets
name = rdataset.name
else:
raise TypeError(f'{method} requires a name or RRset ' +
'as the first argument')
self._raise_if_not_empty(method, args)
if rdataset:
if rdataset.rdclass != self.manager.get_class():
raise ValueError(f'{method} has objects of wrong '
'RdataClass')
existing = self._get_rdataset(name, rdataset.rdtype,
rdataset.covers)
if existing is not None:
if exact:
intersection = existing.intersection(rdataset)
if intersection != rdataset:
raise DeleteNotExact(f'{method}: missing rdatas')
rdataset = existing.difference(rdataset)
if len(rdataset) == 0:
self._delete_rdataset(name, rdataset.rdtype,
rdataset.covers)
else:
self._put_rdataset(name, rdataset)
elif exact:
raise DeleteNotExact(f'{method}: missing rdataset')
else:
if exact and not self._name_exists(name):
raise DeleteNotExact(f'{method}: name not known')
self._delete_name(name)
except IndexError:
raise TypeError(f'not enough parameters to {method}')
def _check_ended(self):
if self._ended:
raise AlreadyEnded
def _end(self, commit):
self._check_ended()
if self._ended:
raise AlreadyEnded
try:
self._end_transaction(commit)
finally:
self._ended = True
#
# Transactions are context managers.
#
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._ended:
if exc_type is None:
self.commit()
else:
self.rollback()
return False
#
# This is the low level API, which must be implemented by subclasses
# of Transaction.
#
def _get_rdataset(self, name, rdtype, covers):
"""Return the rdataset associated with *name*, *rdtype*, and *covers*,
or `None` if not found.
"""
raise NotImplementedError # pragma: no cover
def _put_rdataset(self, name, rdataset):
"""Store the rdataset."""
raise NotImplementedError # pragma: no cover
def _delete_name(self, name):
"""Delete all data associated with *name*.
It is not an error if the rdataset does not exist.
"""
raise NotImplementedError # pragma: no cover
def _delete_rdataset(self, name, rdtype, covers):
"""Delete all data associated with *name*, *rdtype*, and *covers*.
It is not an error if the rdataset does not exist.
"""
raise NotImplementedError # pragma: no cover
def _name_exists(self, name):
"""Does name exist?
Returns a bool.
"""
raise NotImplementedError # pragma: no cover
def _changed(self):
"""Has this transaction changed anything?"""
raise NotImplementedError # pragma: no cover
def _end_transaction(self, commit):
"""End the transaction.
*commit*, a bool. If ``True``, commit the transaction, otherwise
roll it back.
If committing adn the commit fails, then roll back and raise an
exception.
"""
raise NotImplementedError # pragma: no cover
def _set_origin(self, origin):
"""Set the origin.
This method is called when reading a possibly relativized
source, and an origin setting operation occurs (e.g. $ORIGIN
in a zone file).
"""
raise NotImplementedError # pragma: no cover
def _iterate_rdatasets(self):
"""Return an iterator that yields (name, rdataset) tuples.
Not all Transaction subclasses implement this.
"""
raise NotImplementedError # pragma: no cover
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the currently experimental in-graph batch ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors import InvalidArgumentError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import gen_batch_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def delayed_plus1(x):
"""Sleeps for 100ms then returns x+1."""
time.sleep(0.1)
return x + 1
@test_util.run_all_in_graph_and_eager_modes
class BatchOpsTest(test.TestCase):
"""Tests for batch_ops.{un,}batch."""
# Test for only non eager mode as batching in eager context as a functionality
# is TBD.
def testBasicBatch(self):
"""Tests that a single batched tensor executes together and only once."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
index_t = thread_results[1]
empty_b = main_results[0][0]
empty_m = main_results[1]
else:
batch_t = main_results[0][0]
index_t = main_results[1]
empty_b = thread_results[0][0]
empty_m = thread_results[1]
# Check that both the inputs made it out exactly once.
self.assertAllEqual(sorted(batch_t), (1, 2))
# Check that we get 2 rows in the index tensor.
self.assertEqual(len(index_t), 2)
# Check that the other ones are empty.
self.assertEqual(len(empty_b), 0)
self.assertEqual(len(empty_m), 0)
def testBatchWithPadding(self):
"""Test that batching with padding up to an allowed batch size works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[5, 10],
grad_timeout_micros=0, batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1, 3]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2, 4]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
else:
batch_t = main_results[0][0]
# Check that the batch tensor incorporates the padding.
self.assertEqual(len(batch_t), 5)
def testMultipleBatch(self):
"""Tests that multiple batched tensors execute together."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, _, _ = batch_ops.batch(
[inp0, inp1],
num_batch_threads=1,
max_batch_size=2,
batch_timeout_micros=36000000,
grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched], feed_dict={inp0: [1],
inp1: [2]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0]
empty_t = main_results[0]
else:
batch_t = main_results[0]
empty_t = thread_results[0]
# Assert that the tensors were batched together.
self.assertAllEqual(sorted(batch_t[0]), [1, 2])
self.assertAllEqual(sorted(batch_t[1]), [2, 3])
self.assertAllEqual(empty_t[0], [])
self.assertAllEqual(empty_t[1], [])
def testIllegalBatchDifferentDim0Sizes(self):
"""Tests illegally feeding tensors with different dim0 sizes."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp0, inp1], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="")
with self.assertRaises(Exception) as raised:
_ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]})
self.assertGreater(
raised.exception.message.find("must have equal 0th-dimension size"),
0)
def testBasicUnbatch(self):
"""Tests that batch and unbatch work together."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[3, 10],
grad_timeout_micros=0, batching_queue="")
computation = batched[0] + 1
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBasicUnbatchDecorated(self):
"""Tests that the batch_function decorator works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
# TODO(apassos): Removing this line causes test flakiness! Ideally should
# be investigated.
default_inp = array_ops.placeholder_with_default(2, shape=[]) # pylint: disable=unused-variable
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
self.assertTrue(in_t.shape is not None)
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBatchDecoratedWithCapturedInput(self):
"""Tests that the batch_function decorator works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
captured_inp0 = array_ops.placeholder_with_default(2, shape=[])
captured_inp1 = array_ops.placeholder_with_default(1, shape=[])
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
return in_t + captured_inp0 - captured_inp1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBatchFunctionOp(self):
"""Tests that the batch_function op works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
@function.Defun(dtypes.int32)
def computation(in_t):
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = gen_batch_ops.batch_function(
[inp],
num_batch_threads=1,
max_batch_size=10,
batch_timeout_micros=100000,
Tout=[dtypes.int32],
f=computation,
captured_tensors=computation.captured_inputs)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBatchFunctionOpWithCapturedInput(self):
"""Tests that batch_function op works with captured input."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
captured_inp0 = array_ops.placeholder_with_default(2, shape=[])
captured_inp1 = array_ops.placeholder_with_default(1, shape=[])
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
@function.Defun(dtypes.int32)
def computation(inp):
return inp + captured_inp0 - captured_inp1
result = gen_batch_ops.batch_function(
num_batch_threads=1,
max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[3, 10],
batching_queue="",
f=computation,
in_tensors=[inp],
captured_tensors=computation.captured_inputs,
Tout=[o.type for o in computation.definition.signature.output_arg])
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBatchFunctionOpWithInputError(self):
"""Tests that batch_function op works with error in the inputs."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
@function.Defun(dtypes.int32, dtypes.int32)
def computation(in0, in1):
return in0 + in1
result = gen_batch_ops.batch_function(
[inp], # computation actually expects 2 inputs.
num_batch_threads=1,
max_batch_size=10,
batch_timeout_micros=100000, # 100ms
batching_queue="",
f=computation,
captured_tensors=computation.captured_inputs,
Tout=[o.type for o in computation.definition.signature.output_arg])
with self.assertRaisesRegex(InvalidArgumentError,
".*2 arguments.*but 1.*"):
sess.run([result], feed_dict={inp: [2]})
def testBatchFunctionOpWithLargeBatchSplitted(self):
"""Tests that the batch_function op works with large batch splitted."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
@function.Defun(dtypes.int32)
def computation(in_t):
return in_t + 3
inp = array_ops.placeholder(dtype=dtypes.int32)
result = gen_batch_ops.batch_function(
[inp],
num_batch_threads=2,
# enable_large_batch_splitting is True, so it's valid as long as
# max('allowed_batch_sizes') <= 'max_batch_size'.
allowed_batch_sizes=[1, 2],
max_batch_size=5,
batch_timeout_micros=100000, # 100ms
Tout=[dtypes.int32],
enable_large_batch_splitting=True,
f=computation,
captured_tensors=computation.captured_inputs)
thread1_results = []
thread2_results = []
# Input sizes of worker1 and main thread are larger than
# max(allowed_batch_sizes), while input size of worker2 is smaller.
def worker1():
thread1_results.extend(
sess.run([result], feed_dict={inp: [5, 6, 7, 8, 9]}))
worker_thread1 = threading.Thread(target=worker1)
worker_thread1.start()
def worker2():
thread2_results.extend(sess.run([result], feed_dict={inp: [10]}))
worker_thread2 = threading.Thread(target=worker2)
worker_thread2.start()
main_results = sess.run([result], feed_dict={inp: [2, 3, 4]})
worker_thread1.join()
worker_thread2.join()
self.assertTrue(
np.all(np.equal(thread2_results[0], np.array([13], dtype=np.int32))))
self.assertTrue(
np.all(
np.equal(thread1_results[0],
np.array([8, 9, 10, 11, 12], dtype=np.int32))))
self.assertTrue(
np.all(
np.equal(main_results[0], np.array([5, 6, 7], dtype=np.int32))))
def testBasicUnbatchDecoratedWithReshape(self):
"""Tests that the batch_function decorator works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
return array_ops.reshape(in_t, [-1]) + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1, 1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [[1]]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [[2]]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testUnbatchTimeout(self):
"""Tests that the unbatch timeout works."""
if context.executing_eagerly():
return
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
computation = batched[0] + 1
timeout_micros = 10
result = batch_ops.unbatch(computation, index, id_t, timeout_micros,
shared_name="shared_unbatch")
# Set up a parallel pipeline that delays the computation, but uses the
# same unbatch resource object as the non-delayed pipeline.
computation_delayed = script_ops.py_func(delayed_plus1,
[batched[0]],
dtypes.int32)
result_delayed = batch_ops.unbatch(computation_delayed,
index,
id_t,
timeout_micros,
shared_name="shared_unbatch")
thread_results = []
def worker():
# A first call using the non-delayed pipeline. The batcher will send an
# empty tensor along the non-delayed pipeline.
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
time.sleep(0.1) # Ensure the thread's call starts first.
# A second call using the delayed pipeline. The batcher will send the
# batched tensor along the delayed pipeline, thus delaying the arrival of
# the batched tensor at the unbatch op, relative to the empty tensor.
#
# TODO(olston, apassos): Avoid relying on the order in which the batch op
# emits the empty tensor versus the batched one.
_ = sess.run([result_delayed], feed_dict={inp: [2]})
worker_thread.join()
# The thread's call should hit the timeout, and thus get 0 results.
self.assertEqual(len(thread_results), 0)
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Agenda.category_id'
db.add_column(u'agendas_agenda', 'category_id',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='agendas', null=True, to=orm['tagging.Tag']),
keep_default=False)
# Adding field 'Agenda.number_knesset'
db.add_column(u'agendas_agenda', 'number_knesset',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='agendas', null=True, to=orm['mks.Knesset']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Agenda.category_id'
db.delete_column(u'agendas_agenda', 'category_id_id')
# Deleting field 'Agenda.number_knesset'
db.delete_column(u'agendas_agenda', 'number_knesset_id')
models = {
u'agendas.agenda': {
'Meta': {'unique_together': "(('name', 'public_owner_name'),)", 'object_name': 'Agenda'},
'category_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'agendas'", 'null': 'True', 'to': u"orm['tagging.Tag']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'agendas'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'num_followers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'number_knesset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'agendas'", 'null': 'True', 'to': u"orm['mks.Knesset']"}),
'public_owner_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['laws.Vote']", 'through': u"orm['agendas.AgendaVote']", 'symmetrical': 'False'})
},
u'agendas.agendabill': {
'Meta': {'unique_together': "(('agenda', 'bill'),)", 'object_name': 'AgendaBill'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendabills'", 'to': u"orm['agendas.Agenda']"}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendabills'", 'to': u"orm['laws.Bill']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'reasoning': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'agendas.agendameeting': {
'Meta': {'unique_together': "(('agenda', 'meeting'),)", 'object_name': 'AgendaMeeting'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendameetings'", 'to': u"orm['agendas.Agenda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendacommitteemeetings'", 'to': u"orm['committees.CommitteeMeeting']"}),
'reasoning': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'agendas.agendavote': {
'Meta': {'unique_together': "(('agenda', 'vote'),)", 'object_name': 'AgendaVote'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendavotes'", 'to': u"orm['agendas.Agenda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'reasoning': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendavotes'", 'to': u"orm['laws.Vote']"})
},
u'agendas.summaryagenda': {
'Meta': {'object_name': 'SummaryAgenda'},
'against_votes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'score_summaries'", 'to': u"orm['agendas.Agenda']"}),
'db_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'for_votes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'agenda_summaries'", 'null': 'True', 'to': u"orm['mks.Member']"}),
'month': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'summary_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'votes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
u'agendas.usersuggestedvote': {
'Meta': {'unique_together': "(('agenda', 'vote', 'user'),)", 'object_name': 'UserSuggestedVote'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_suggested_votes'", 'to': u"orm['agendas.Agenda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reasoning': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'sent_to_editor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'suggested_agenda_votes'", 'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_suggested_agendas'", 'to': u"orm['laws.Vote']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'committees.committee': {
'Meta': {'object_name': 'Committee'},
'aliases': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'chairpersons': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'chaired_committees'", 'blank': 'True', 'to': u"orm['mks.Member']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committees'", 'blank': 'True', 'to': u"orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'portal_knesset_broadcasts_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'blank': 'True'}),
'replacements': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'replacing_in_committees'", 'blank': 'True', 'to': u"orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "'committee'", 'max_length': '10'})
},
u'committees.committeemeeting': {
'Meta': {'ordering': "('-date',)", 'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': u"orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lobbyist_corporations_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': u"orm['lobbyists.LobbyistCorporation']"}),
'lobbyists_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': u"orm['lobbyists.Lobbyist']"}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': u"orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': u"orm['laws.Vote']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'laws.bill': {
'Meta': {'ordering': "('-stage_date', '-id')", 'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': u"orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': u"orm['laws.Vote']"}),
'full_title': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['mks.Member']"}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': u"orm['laws.Law']"}),
'popular_name': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'popular_name_slug': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['laws.Vote']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['committees.CommitteeMeeting']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'laws.law': {
'Meta': {'object_name': 'Law'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_into': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'duplicates'", 'null': 'True', 'to': u"orm['laws.Law']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'laws.vote': {
'Meta': {'ordering': "('-time', '-id')", 'object_name': 'Vote'},
'against_coalition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_own_bill': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'for_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'vote_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': u"orm['laws.VoteAction']", 'to': u"orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_own_bill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Party']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['laws.Vote']"})
},
u'lobbyists.lobbyist': {
'Meta': {'object_name': 'Lobbyist'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'large_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'lobbyist'", 'null': 'True', 'to': u"orm['persons.Person']"}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'lobbyists.lobbyistcorporation': {
'Meta': {'object_name': 'LobbyistCorporation'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'mks.knesset': {
'Meta': {'object_name': 'Knesset'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': u"orm['mks.Party']"}),
'current_position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': u"orm['mks.Membership']", 'to': u"orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Party']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'unique_together': "(('knesset', 'name'),)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'knesset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parties'", 'null': 'True', 'to': u"orm['mks.Knesset']"}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'split_from': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Party']", 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'calendar_sync_token': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'calendar_url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': u"orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['persons.Title']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'persons.title': {
'Meta': {'object_name': 'Title'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
u'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['agendas']
|
|
class FieldDStarNode:
'''
This class is intended to assist for the DStar search mode.
This takes longer but is much more flexible, as it allows
for more than the 8 cardinal directions.
'''
def __init__(self, coordinates, cost=float('inf'), rhs=float('inf')):
self.coordinates = coordinates
self.cost = cost
self.rhs = rhs # default set to positive infinity
def _fieldDStarGetNeighbors(self, searchNode):
row = searchNode.coordinates[0]
col = searchNode.coordinates[1]
children = [(row + 1, col), (row + 1, col + 1), (row + 1, col - 1), (row, col + 1), (row, col - 1),
(row - 1, col + 1), (row - 1, col), (row - 1, col - 1)]
valid_children = [child for child in children if self.map.isPassable(child)]
return valid_children
def _fieldDStarGetConsecutiveNeighbors(self, coordinates):
'''
To calculate cost, field D* requires pairs of consecutive neighbors
Note that neighbors are returned as tuples
'''
row = coordinates[0]
col = coordinates[1]
consecutive_neighbors = [((row + 1, col), (row + 1, col + 1)),
((row + 1, col + 1), (row, col + 1)),
((row, col + 1), (row - 1, col + 1)),
((row - 1, col + 1), (row - 1, col)),
((row - 1, col), (row - 1, col - 1)),
((row - 1, col - 1), (row, col - 1)),
((row, col - 1), (row + 1, col - 1)),
((row + 1, col - 1), (row + 1, col))]
valid_consecutive_neighbors = [item for item in consecutive_neighbors if
(self.map._inBounds(item[0]) and self.map._inBounds(item[1]))]
return valid_consecutive_neighbors
def _fieldDStarComputeCost(self, node, neighbor_a, neighbor_b, optimize_on, nodeDict=None, numTestPoints=11):
# neighbor_a and neighbor_b must be nodes, not coordinates
# This function returns a tuple - the point on the edge that is intersected, and the cost
# Check the documentation for more information about the Compute Cost function
optimize_vector = self._vectorize(optimize_on)
R = self.map.resolution
row, col = node.coordinates
# s_1 is the horizontal neighbor, s_2 is the diagonal neighbor
if neighbor_a.coordinates[0] == row or neighbor_a.coordinates[1] == col:
s_1 = neighbor_a
s_2 = neighbor_b
else:
s_1 = neighbor_b
s_2 = neighbor_a
c_1 = s_1.cost
h_1 = self.map.getElevation(s_1.coordinates)
c_2 = s_2.cost
h_2 = self.map.getElevation(s_2.coordinates)
h = self.map.getElevation(node.coordinates)
# This takes care of the cases where c_1 or c_2 are infinite
# In one of these is infinite, we simply take the other path
if (c_1 == float('inf')) and (c_2 != float('inf')):
return (
s_2.coordinates, self._aStarCostFunction(node.coordinates, s_2.coordinates, optimize_vector) + s_2.cost)
elif (c_2 == float('inf')) and (c_1 != float('inf')):
return (
s_1.coordinates, self._aStarCostFunction(node.coordinates, s_1.coordinates, optimize_vector) + s_1.cost)
elif (c_1 == float('inf')) and (c_2 == float('inf')):
return (s_1.coordinates, float('inf'))
# This is the function which goes directly to the opposite side
# y represents the y-coordinate of the side and is a number between 0 and 1
def f(y):
prevCost = y * c_2 + (1 - y) * c_1
height = y * h_2 + (1 - y) * h_1
dist = math.sqrt(1 + y ** 2) * R
slope = math.degrees(math.atan((height - h) / (dist)))
d = self.explorer.distance(dist)
t = self.explorer.time(dist, slope)
e = self.explorer.energyCost(dist, slope, self.map.getGravity())
# This stuff is mostly just here because unfortunately inf*0 = nan
# and nan is really hard to deal with and causes a lot of bugs
totalCost = prevCost
if optimize_vector[0] != 0:
totalCost += d * optimize_vector[0]
if optimize_vector[1] != 0:
totalCost += t * optimize_vector[1]
if optimize_vector[2] != 0:
totalCost += e * optimize_vector[2]
return totalCost
step = 1.0 / (numTestPoints - 1)
# evenly spread test points
testPoints = [step * i for i in range(numTestPoints)]
# We have several test points to determine our starting location
funcPoints = [f(tp) for tp in testPoints]
startPoint = testPoints[np.argmin(funcPoints)]
# Not too sure if SLSQP is the best choice. I chose it because it allows me to set bounds.
minimum = fmin_slsqp(f, startPoint, bounds=[(0, 1)], iprint=0)[0]
# point is the point that is corresponds by the minimum value
point = ((1 - minimum) * s_1.coordinates[0] + minimum * s_2.coordinates[0],
(1 - minimum) * s_1.coordinates[1] + minimum * s_2.coordinates[1])
return (point, f(minimum))
def _fieldDStarGetKey(self, nodeDict, coordinates, start_node, optimize_on):
# if never visited before, add it to nodeDict
if coordinates not in nodeDict.keys():
nodeDict[coordinates] = FieldDStarNode(coordinates, float('inf'), float('inf'))
node = nodeDict[coordinates]
return (
min(node.cost, node.rhs) + self._heuristic(node, start_node, optimize_on, "Field D*"),
min(node.cost, node.rhs))
def _fieldDStarUpdateState(self, nodeDict, openInputs, startNode, coordinates, endCoordinates, optimize_on):
# print "State being updated: ", coordinates
# If node was never previously visited, cost = infinity, mark it as visited
if coordinates not in nodeDict.keys():
nodeDict[coordinates] = FieldDStarNode(coordinates, float('inf'), float('inf'))
node = nodeDict[coordinates]
logger.info('Added coordinate ', coordinates, ' to the nodeDict')
else:
node = nodeDict[coordinates]
# If node != goal
# rhs(node) = min_{(s', s'') in connbrs(node)} ComputeCost(node, s', s'')
if coordinates != endCoordinates:
rhs = float('inf')
for pair in self._fieldDStarGetConsecutiveNeighbors(coordinates):
neighbor_a, neighbor_b = pair
if neighbor_a not in nodeDict.keys():
nodeDict[neighbor_a] = FieldDStarNode(neighbor_a)
if neighbor_b not in nodeDict.keys():
nodeDict[neighbor_b] = FieldDStarNode(neighbor_b)
test_val = self._fieldDStarComputeCost(node, nodeDict[neighbor_a], nodeDict[neighbor_b], optimize_on)[1]
if test_val < rhs:
rhs = test_val
node.rhs = rhs
# updating nodeDict
nodeDict[coordinates] = node
# if node in openInputs, remove node from openInputs
open_coords = [pair[1] for pair in openInputs]
if coordinates in open_coords:
for pair in openInputs:
if pair[1] == coordinates:
openInputs.remove(pair)
# if cost != rhs, insert node into openInputs with key(node)
if node.cost != node.rhs:
heapq.heappush(openInputs,
(self._fieldDStarGetKey(nodeDict, coordinates, startNode, optimize_on), coordinates))
def _fieldDStarComputeShortestPath(self, nodeDict, startCoordinates, endCoordinates, openInputs, optimize_on):
startNode = nodeDict[startCoordinates]
past_100_coordinates = [None] * 100
while (openInputs[0][0] < self._fieldDStarGetKey(nodeDict, startCoordinates, startNode, optimize_on)) or (
startNode.cost != startNode.rhs):
key, coordinates = heapq.heappop(openInputs)
# if the coordinate appeared more than 20 times in the past 100 coordinates
# we skip it and move on to the next thing in openInputs
if past_100_coordinates.count(coordinates) > 20:
key, coordinates = heapq.heappop(openInputs)
node = nodeDict[coordinates]
past_100_coordinates.pop(0)
past_100_coordinates.append(coordinates)
# print key, coordinates
if node.cost > node.rhs:
node.cost = node.rhs
nodeDict[coordinates] = node
for neighbor in self._fieldDStarGetNeighbors(node):
self._fieldDStarUpdateState(nodeDict, openInputs, nodeDict[startCoordinates], neighbor,
endCoordinates, optimize_on)
else:
node.cost = float('inf')
nodeDict[coordinates] = node
for neighbor in self._fieldDStarGetNeighbors(node) + [node.coordinates]:
self._fieldDStarUpdateState(nodeDict, openInputs, nodeDict[startCoordinates], neighbor,
endCoordinates, optimize_on)
def _fieldDStarExtractPath(self, nodeDict, startCoordinates, endCoordinates, optimize_on, numTestPoints=11):
coordinates = startCoordinates
path = [startCoordinates]
optimize_vector = self._vectorize(optimize_on)
def interpolatedConsecutiveCoordinates(p):
# This function represents the 6 possible pairs of consecutive points
# around an interpolated point
if (p[0] % 1 != 0):
a = int(p[0])
b = p[1]
return [((a, b), (a, b + 1)),
((a, b + 1), (a + 1, b + 1)),
((a + 1, b + 1), (a + 1, b)),
((a + 1, b), (a + 1, b - 1)),
((a + 1, b - 1), (a, b - 1)),
((a, b - 1), (a, b))]
else:
a = p[0]
b = int(p[1])
return [((a, b), (a + 1, b)),
((a + 1, b), (a + 1, b + 1)),
((a + 1, b + 1), (a, b + 1)),
((a, b + 1), (a - 1, b + 1)),
((a - 1, b + 1), (a - 1, b)),
((a - 1, b), (a, b))]
# node will always refer to the current point in the path
while coordinates != endCoordinates:
print path
nextPoint = None
if (coordinates[0] % 1 != 0) or (coordinates[1] % 1 != 0): # interpolated point
height = convenience.getWeightedElevation(self.map, coordinates)
connCoords = interpolatedConsecutiveCoordinates(coordinates)
else: # both coordinates are integers
height = self.map.getElevation(coordinates)
connCoords = self._fieldDStarGetConsecutiveNeighbors(coordinates)
# The cost of the current point. We will be minimizing the difference of the point that is travelled to
# and the cost of the current point.
currentCost = convenience.getWeightedCost(nodeDict, coordinates)
minCost = float('inf')
# There should be either six or eight pairs; we put the best option into nextPoint with the associated cost into minCost
for pair in connCoords:
# making sure that both coordinates in the pair are contained
# inside the nodeDict
if (pair[0] in nodeDict) and (pair[1] in nodeDict):
s_1 = nodeDict[pair[0]]
s_2 = nodeDict[pair[1]]
else:
continue
h_1 = self.map.getElevation(s_1.coordinates)
h_2 = self.map.getElevation(s_2.coordinates)
c_1 = s_1.cost
c_2 = s_2.cost
# First 3 parts deal with what happens when c_1 or c_2 are infinite
if (c_1 == float('inf')) and (c_2 != float('inf')):
prevCost = c_2
newCost = self._aStarCostFunction(coordinates, s_2.coordinates, optimize_vector)
if abs(prevCost + newCost - currentCost) < minCost:
minCost = abs(prevCost + newCost - currentCost)
nextPoint = s_2.coordinates
elif (c_2 == float('inf')) and (c_1 != float('inf')):
prevCost = c_1
newCost = self._aStarCostFunction(coordinates, s_1.coordinates, optimize_vector)
if abs(prevCost + newCost - currentCost) < minCost:
minCost = abs(prevCost + newCost - currentCost)
nextPoint = s_1.coordinates
elif (c_1 == float('inf')) and (c_2 == float('inf')):
continue # This is not gonna be viable
else:
def f(y):
# This is the function to be minimized
prevCost = (1 - y) * c_1 + y * c_2
coord1, coord2 = pair
x1, y1 = coord1
x2, y2 = coord2
if x1 == x2:
p = (x1, y1 * (1 - y) + y2 * y)
else:
p = (x1 * (1 - y) + x2 * y, y2)
h = (1 - y) * h_1 + y * h_2
path_length = math.sqrt(
((p[0] - coordinates[0]) ** 2) + ((p[1] - coordinates[1]) ** 2)) * self.map.resolution
slope = math.degrees(math.atan((height - h) / path_length))
grav = self.map.getGravity()
result = prevCost - currentCost
# This stuff is necessary to avoid some annoying inf*0 = nan thing
if optimize_vector[0] != 0:
result += self.explorer.distance(path_length) * optimize_vector[0]
if optimize_vector[1] != 0:
result += self.explorer.time(path_length, slope) * optimize_vector[1]
if optimize_vector[2] != 0:
result += self.explorer.energyCost(path_length, slope, grav) * optimize_vector[2]
return result
# We test 11 points and choose the smallest one as the "seed value" for the minimization
step = 1.0 / (numTestPoints - 1)
testPoints = [step * i for i in range(numTestPoints)]
fPoints = [f(tp) for tp in testPoints]
startPoint = testPoints[np.argmin(fPoints)]
minimum = fmin_slsqp(f, startPoint, bounds=[(0, 1)], iprint=0)[0]
minResult = f(minimum)
if minResult < minCost:
minCost = minResult
nextPoint = ((1 - minimum) * pair[0][0] + minimum * pair[1][0],
(1 - minimum) * pair[0][1] + minimum * pair[1][1])
if nextPoint:
# if nextPoint exists, add it
path.append(nextPoint)
coordinates = nextPoint
else:
print "nextPoint doesn't exist!"
break
return path
def fieldDStarSearch(self, startCoords, endCoords, optimize_on, numTestPoints=11):
optimize_vector = self._vectorize(optimize_on)
startNode = FieldDStarNode(startCoords, float('inf'), float('inf'))
endNode = FieldDStarNode(endCoords, float('inf'), 0)
open_coords = []
nodeDict = {} # This dictionary maps coordinates to FieldDStarNode objects.
# Only contains nodes that have been travelled to/are relevant
nodeDict[startCoords] = startNode
nodeDict[endCoords] = endNode
heapq.heappush(open_coords, (self._fieldDStarGetKey(nodeDict, endCoords, endNode, optimize_vector), endCoords))
self._fieldDStarComputeShortestPath(nodeDict, startCoords, endCoords, open_coords, optimize_vector)
for key in nodeDict:
print '{', key[0], ',', key[1], ',', nodeDict[key].cost, '},'
path = self._fieldDStarExtractPath(nodeDict, startCoords, endCoords, optimize_vector, numTestPoints)
# return self._toJSON(path, optimize_vector, [ActivityPoint(startCoords), ActivityPoint(endCoords)])
return path
def fieldDStarCompletePath(self, optimize_on, waypoints, returnType="JSON", fileName=None, numTestPoints=11):
optimize_vector = self._vectorize(optimize_on)
finalPath = []
costs = []
for i in range(len(waypoints) - 1):
segmentCost = 0
p1 = self.map.convertToRowCol(waypoints[i].coordinates)
p2 = self.map.convertToRowCol(waypoints[i + 1].coordinates)
partialPath = self.fieldDStarSearch(p1, p2, optimize_vector, numTestPoints)
path, expanded, cost = partialPath
finalPath += path # for now I'm not going to bother with deleting the duplicates
# that will occur at every activity point. I think it might end up being useful
segmentCost += cost
segmentCost += optimize_vector[1] * waypoints[i].duration
costs.append(segmentCost)
if returnType == "tuple":
return (finalPath, costs, sum(costs))
elif returnType == "JSON":
data = self._toJSON(finalPath, optimize_on, waypoints)
if fileName:
with open('data.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return data
elif returnType == "csv":
sequence = self._toCSV(finalPath, optimize_on, waypoints)
print sequence
if fileName:
with open(fileName, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in sequence:
writer.writerow(row)
return sequence
|
|
#
# -*- coding: utf-8 -*-
"""Development related tasks to be run with 'invoke'"""
import os
import pathlib
import shutil
import invoke
TASK_ROOT = pathlib.Path(__file__).resolve().parent
TASK_ROOT_STR = str(TASK_ROOT)
# shared function
def rmrf(items, verbose=True):
"""Silently remove a list of directories or files"""
if isinstance(items, str):
items = [items]
for item in items:
if verbose:
print("Removing {}".format(item))
shutil.rmtree(item, ignore_errors=True)
# rmtree doesn't remove bare files
try:
os.remove(item)
except FileNotFoundError:
pass
# create namespaces
namespace = invoke.Collection()
namespace_clean = invoke.Collection('clean')
namespace.add_collection(namespace_clean, 'clean')
#####
#
# pytest, pylint, and codecov
#
#####
@invoke.task
def pytest(context, junit=False, pty=True, append_cov=False):
"""Run tests and code coverage using pytest"""
ROOT_PATH = TASK_ROOT.parent.parent
with context.cd(str(ROOT_PATH)):
command_str = 'pytest --cov=cmd2_myplugin --cov-report=term --cov-report=html'
if append_cov:
command_str += ' --cov-append'
if junit:
command_str += ' --junitxml=junit/test-results.xml'
command_str += ' ' + str((TASK_ROOT / 'tests').relative_to(ROOT_PATH))
context.run(command_str, pty=pty)
namespace.add_task(pytest)
@invoke.task
def pytest_clean(context):
"""Remove pytest cache and code coverage files and directories"""
# pylint: disable=unused-argument
with context.cd(TASK_ROOT_STR):
dirs = ['.pytest_cache', '.cache', '.coverage']
rmrf(dirs)
namespace_clean.add_task(pytest_clean, 'pytest')
@invoke.task
def pylint(context):
"""Check code quality using pylint"""
context.run('pylint --rcfile=cmd2_myplugin/pylintrc cmd2_myplugin')
namespace.add_task(pylint)
@invoke.task
def pylint_tests(context):
"""Check code quality of test suite using pylint"""
context.run('pylint --rcfile=tests/pylintrc tests')
namespace.add_task(pylint_tests)
#####
#
# build and distribute
#
#####
BUILDDIR = 'build'
DISTDIR = 'dist'
@invoke.task
def build_clean(context):
"""Remove the build directory"""
# pylint: disable=unused-argument
rmrf(BUILDDIR)
namespace_clean.add_task(build_clean, 'build')
@invoke.task
def dist_clean(context):
"""Remove the dist directory"""
# pylint: disable=unused-argument
rmrf(DISTDIR)
namespace_clean.add_task(dist_clean, 'dist')
@invoke.task
def eggs_clean(context):
"""Remove egg directories"""
# pylint: disable=unused-argument
dirs = set()
dirs.add('.eggs')
for name in os.listdir(os.curdir):
if name.endswith('.egg-info'):
dirs.add(name)
if name.endswith('.egg'):
dirs.add(name)
rmrf(dirs)
namespace_clean.add_task(eggs_clean, 'eggs')
@invoke.task
def bytecode_clean(context):
"""Remove __pycache__ directories and *.pyc files"""
# pylint: disable=unused-argument
dirs = set()
for root, dirnames, files in os.walk(os.curdir):
if '__pycache__' in dirnames:
dirs.add(os.path.join(root, '__pycache__'))
for file in files:
if file.endswith(".pyc"):
dirs.add(os.path.join(root, file))
print("Removing __pycache__ directories and .pyc files")
rmrf(dirs, verbose=False)
namespace_clean.add_task(bytecode_clean, 'bytecode')
#
# make a dummy clean task which runs all the tasks in the clean namespace
clean_tasks = list(namespace_clean.tasks.values())
@invoke.task(pre=list(namespace_clean.tasks.values()), default=True)
def clean_all(context):
"""Run all clean tasks"""
# pylint: disable=unused-argument
pass
namespace_clean.add_task(clean_all, 'all')
@invoke.task(pre=[clean_all])
def sdist(context):
"""Create a source distribution"""
context.run('python setup.py sdist')
namespace.add_task(sdist)
@invoke.task(pre=[clean_all])
def wheel(context):
"""Build a wheel distribution"""
context.run('python setup.py bdist_wheel')
namespace.add_task(wheel)
#
# these two tasks are commented out so you don't
# accidentally run them and upload this template to pypi
#
# @invoke.task(pre=[sdist, wheel])
# def pypi(context):
# """Build and upload a distribution to pypi"""
# context.run('twine upload dist/*')
# namespace.add_task(pypi)
# @invoke.task(pre=[sdist, wheel])
# def pypi_test(context):
# """Build and upload a distribution to https://test.pypi.org"""
# context.run('twine upload --repository-url https://test.pypi.org/legacy/ dist/*')
# namespace.add_task(pypi_test)
|
|
from __future__ import print_function, division, absolute_import
import numpy as np
import operator
from collections import namedtuple
from numba import types, utils
from numba.typing.templates import (AttributeTemplate, AbstractTemplate, infer,
infer_global, infer_getattr, signature,
bound_function)
# import time side effect: array operations requires typing support of sequence
# defined in collections: e.g. array.shape[i]
from numba.typing import collections
from numba.errors import TypingError
Indexing = namedtuple("Indexing", ("index", "result", "advanced"))
def get_array_index_type(ary, idx):
"""
Returns None or a tuple-3 for the types of the input array, index, and
resulting type of ``array[index]``.
Note: This is shared logic for ndarray getitem and setitem.
"""
if not isinstance(ary, types.Buffer):
return
ndim = ary.ndim
left_indices = []
right_indices = []
ellipsis_met = False
advanced = False
has_integer = False
if not isinstance(idx, types.BaseTuple):
idx = [idx]
# Walk indices
for ty in idx:
if ty is types.ellipsis:
if ellipsis_met:
raise TypeError("only one ellipsis allowed in array index "
"(got %s)" % (idx,))
ellipsis_met = True
elif isinstance(ty, types.SliceType):
pass
elif isinstance(ty, types.Integer):
# Normalize integer index
ty = types.intp if ty.signed else types.uintp
# Integer indexing removes the given dimension
ndim -= 1
has_integer = True
elif (isinstance(ty, types.Array) and ty.ndim == 0
and isinstance(ty.dtype, types.Integer)):
# 0-d array used as integer index
ndim -= 1
has_integer = True
elif (isinstance(ty, types.Array)
and ty.ndim == 1
and isinstance(ty.dtype, (types.Integer, types.Boolean))):
if advanced or has_integer:
# We don't support the complicated combination of
# advanced indices (and integers are considered part
# of them by Numpy).
raise NotImplementedError("only one advanced index supported")
advanced = True
else:
raise TypeError("unsupported array index type %s in %s"
% (ty, idx))
(right_indices if ellipsis_met else left_indices).append(ty)
# Only Numpy arrays support advanced indexing
if advanced and not isinstance(ary, types.Array):
return
# Check indices and result dimensionality
all_indices = left_indices + right_indices
if ellipsis_met:
assert right_indices[0] is types.ellipsis
del right_indices[0]
n_indices = len(all_indices) - ellipsis_met
if n_indices > ary.ndim:
raise TypeError("cannot index %s with %d indices: %s"
% (ary, n_indices, idx))
if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:
# Full integer indexing => scalar result
# (note if ellipsis is present, a 0-d view is returned instead)
res = ary.dtype
elif advanced:
# Result is a copy
res = ary.copy(ndim=ndim, layout='C', readonly=False)
else:
# Result is a view
if ary.slice_is_copy:
# Avoid view semantics when the original type creates a copy
# when slicing.
return
# Infer layout
layout = ary.layout
def keeps_contiguity(ty, is_innermost):
# A slice can only keep an array contiguous if it is the
# innermost index and it is not strided
return (ty is types.ellipsis or isinstance(ty, types.Integer)
or (is_innermost and isinstance(ty, types.SliceType)
and not ty.has_step))
def check_contiguity(outer_indices):
"""
Whether indexing with the given indices (from outer to inner in
physical layout order) can keep an array contiguous.
"""
for ty in outer_indices[:-1]:
if not keeps_contiguity(ty, False):
return False
if outer_indices and not keeps_contiguity(outer_indices[-1], True):
return False
return True
if layout == 'C':
# Integer indexing on the left keeps the array C-contiguous
if n_indices == ary.ndim:
# If all indices are there, ellipsis's place is indifferent
left_indices = left_indices + right_indices
right_indices = []
if right_indices:
layout = 'A'
elif not check_contiguity(left_indices):
layout = 'A'
elif layout == 'F':
# Integer indexing on the right keeps the array F-contiguous
if n_indices == ary.ndim:
# If all indices are there, ellipsis's place is indifferent
right_indices = left_indices + right_indices
left_indices = []
if left_indices:
layout = 'A'
elif not check_contiguity(right_indices[::-1]):
layout = 'A'
if ndim == 0:
# Implicitly convert to a scalar if the output ndim==0
res = ary.dtype
else:
res = ary.copy(ndim=ndim, layout=layout)
# Re-wrap indices
if isinstance(idx, types.BaseTuple):
idx = types.BaseTuple.from_types(all_indices)
else:
idx, = all_indices
return Indexing(idx, res, advanced)
@infer_global(operator.getitem)
class GetItemBuffer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[ary, idx] = args
out = get_array_index_type(ary, idx)
if out is not None:
return signature(out.result, ary, out.index)
@infer_global(operator.setitem)
class SetItemBuffer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
ary, idx, val = args
if not isinstance(ary, types.Buffer):
return
if not ary.mutable:
raise TypeError("Cannot modify value of type %s" %(ary,))
out = get_array_index_type(ary, idx)
if out is None:
return
idx = out.index
res = out.result
if isinstance(res, types.Array):
# Indexing produces an array
if isinstance(val, types.Array):
if not self.context.can_convert(val.dtype, res.dtype):
# DType conversion not possible
return
else:
res = val
elif isinstance(val, types.Sequence):
if (res.ndim == 1 and
self.context.can_convert(val.dtype, res.dtype)):
# Allow assignement of sequence to 1d array
res = val
else:
# NOTE: sequence-to-array broadcasting is unsupported
return
else:
# Allow scalar broadcasting
if self.context.can_convert(val, res.dtype):
res = res.dtype
else:
# Incompatible scalar type
return
elif not isinstance(val, types.Array):
# Single item assignment
if not self.context.can_convert(val, res):
# if the array dtype is not yet defined
if not res.is_precise():
# set the array type to use the dtype of value (RHS)
newary = ary.copy(dtype=val)
return signature(types.none, newary, idx, res)
else:
return
res = val
else:
return
return signature(types.none, ary, idx, res)
def normalize_shape(shape):
if isinstance(shape, types.UniTuple):
if isinstance(shape.dtype, types.Integer):
dimtype = types.intp if shape.dtype.signed else types.uintp
return types.UniTuple(dimtype, len(shape))
elif isinstance(shape, types.Tuple) and shape.count == 0:
# Force (0 x intp) for consistency with other shapes
return types.UniTuple(types.intp, 0)
@infer_getattr
class ArrayAttribute(AttributeTemplate):
key = types.Array
def resolve_dtype(self, ary):
return types.DType(ary.dtype)
def resolve_itemsize(self, ary):
return types.intp
def resolve_shape(self, ary):
return types.UniTuple(types.intp, ary.ndim)
def resolve_strides(self, ary):
return types.UniTuple(types.intp, ary.ndim)
def resolve_ndim(self, ary):
return types.intp
def resolve_size(self, ary):
return types.intp
def resolve_flat(self, ary):
return types.NumpyFlatType(ary)
def resolve_ctypes(self, ary):
return types.ArrayCTypes(ary)
def resolve_flags(self, ary):
return types.ArrayFlags(ary)
def resolve_T(self, ary):
if ary.ndim <= 1:
retty = ary
else:
layout = {"C": "F", "F": "C"}.get(ary.layout, "A")
retty = ary.copy(layout=layout)
return retty
def resolve_real(self, ary):
return self._resolve_real_imag(ary, attr='real')
def resolve_imag(self, ary):
return self._resolve_real_imag(ary, attr='imag')
def _resolve_real_imag(self, ary, attr):
if ary.dtype in types.complex_domain:
return ary.copy(dtype=ary.dtype.underlying_float, layout='A')
elif ary.dtype in types.number_domain:
res = ary.copy(dtype=ary.dtype)
if attr == 'imag':
res = res.copy(readonly=True)
return res
else:
msg = "cannot access .{} of array of {}"
raise TypingError(msg.format(attr, ary.dtype))
@bound_function("array.transpose")
def resolve_transpose(self, ary, args, kws):
def sentry_shape_scalar(ty):
if ty in types.number_domain:
# Guard against non integer type
if not isinstance(ty, types.Integer):
raise TypeError("transpose() arg cannot be {0}".format(ty))
return True
else:
return False
assert not kws
if len(args) == 0:
return signature(self.resolve_T(ary))
if len(args) == 1:
shape, = args
if sentry_shape_scalar(shape):
assert ary.ndim == 1
return signature(ary, *args)
shape = normalize_shape(shape)
if shape is None:
return
assert ary.ndim == shape.count
return signature(self.resolve_T(ary), shape)
else:
if any(not sentry_shape_scalar(a) for a in args):
raise TypeError("transpose({0}) is not supported".format(
', '.join(args)))
assert ary.ndim == len(args)
return signature(self.resolve_T(ary), *args)
@bound_function("array.copy")
def resolve_copy(self, ary, args, kws):
assert not args
assert not kws
retty = ary.copy(layout="C", readonly=False)
return signature(retty)
@bound_function("array.item")
def resolve_item(self, ary, args, kws):
assert not kws
# We don't support explicit arguments as that's exactly equivalent
# to regular indexing. The no-argument form is interesting to
# allow some degree of genericity when writing functions.
if not args:
return signature(ary.dtype)
@bound_function("array.itemset")
def resolve_itemset(self, ary, args, kws):
assert not kws
# We don't support explicit arguments as that's exactly equivalent
# to regular indexing. The no-argument form is interesting to
# allow some degree of genericity when writing functions.
if len(args) == 1:
return signature(types.none, ary.dtype)
@bound_function("array.nonzero")
def resolve_nonzero(self, ary, args, kws):
assert not args
assert not kws
# 0-dim arrays return one result array
ndim = max(ary.ndim, 1)
retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)
return signature(retty)
@bound_function("array.reshape")
def resolve_reshape(self, ary, args, kws):
def sentry_shape_scalar(ty):
if ty in types.number_domain:
# Guard against non integer type
if not isinstance(ty, types.Integer):
raise TypeError("reshape() arg cannot be {0}".format(ty))
return True
else:
return False
assert not kws
if ary.layout not in 'CF':
# only work for contiguous array
raise TypeError("reshape() supports contiguous array only")
if len(args) == 1:
# single arg
shape, = args
if sentry_shape_scalar(shape):
ndim = 1
else:
shape = normalize_shape(shape)
if shape is None:
return
ndim = shape.count
retty = ary.copy(ndim=ndim)
return signature(retty, shape)
elif len(args) == 0:
# no arg
raise TypeError("reshape() take at least one arg")
else:
# vararg case
if any(not sentry_shape_scalar(a) for a in args):
raise TypeError("reshape({0}) is not supported".format(
', '.join(map(str, args))))
retty = ary.copy(ndim=len(args))
return signature(retty, *args)
@bound_function("array.sort")
def resolve_sort(self, ary, args, kws):
assert not args
assert not kws
if ary.ndim == 1:
return signature(types.none)
@bound_function("array.argsort")
def resolve_argsort(self, ary, args, kws):
assert not args
kwargs = dict(kws)
kind = kwargs.pop('kind', types.StringLiteral('quicksort'))
if kwargs:
msg = "Unsupported keywords: {!r}"
raise TypingError(msg.format([k for k in kwargs.keys()]))
if ary.ndim == 1:
def argsort_stub(kind='quicksort'):
pass
pysig = utils.pysignature(argsort_stub)
sig = signature(types.Array(types.intp, 1, 'C'), kind).replace(pysig=pysig)
return sig
@bound_function("array.view")
def resolve_view(self, ary, args, kws):
from .npydecl import _parse_dtype
assert not kws
dtype, = args
dtype = _parse_dtype(dtype)
if dtype is None:
return
retty = ary.copy(dtype=dtype)
return signature(retty, *args)
@bound_function("array.astype")
def resolve_astype(self, ary, args, kws):
from .npydecl import _parse_dtype
assert not kws
dtype, = args
dtype = _parse_dtype(dtype)
if dtype is None:
return
if not self.context.can_convert(ary.dtype, dtype):
raise TypeError("astype(%s) not supported on %s: "
"cannot convert from %s to %s"
% (dtype, ary, ary.dtype, dtype))
layout = ary.layout if ary.layout in 'CF' else 'C'
# reset the write bit irrespective of whether the cast type is the same
# as the current dtype, this replicates numpy
retty = ary.copy(dtype=dtype, layout=layout, readonly=False)
return signature(retty, *args)
@bound_function("array.ravel")
def resolve_ravel(self, ary, args, kws):
# Only support no argument version (default order='C')
assert not kws
assert not args
return signature(ary.copy(ndim=1, layout='C'))
@bound_function("array.flatten")
def resolve_flatten(self, ary, args, kws):
# Only support no argument version (default order='C')
assert not kws
assert not args
return signature(ary.copy(ndim=1, layout='C'))
@bound_function("array.take")
def resolve_take(self, ary, args, kws):
assert not kws
argty, = args
if isinstance(argty, types.Integer):
sig = signature(ary.dtype, *args)
elif isinstance(argty, types.Array):
sig = signature(argty.copy(layout='C', dtype=ary.dtype), *args)
elif isinstance(argty, types.List): # 1d lists only
sig = signature(types.Array(ary.dtype, 1, 'C'), *args)
elif isinstance(argty, types.BaseTuple):
sig = signature(types.Array(ary.dtype, np.ndim(argty), 'C'), *args)
else:
raise TypeError("take(%s) not supported for %s" % argty)
return sig
def generic_resolve(self, ary, attr):
# Resolution of other attributes, for record arrays
if isinstance(ary.dtype, types.Record):
if attr in ary.dtype.fields:
return ary.copy(dtype=ary.dtype.typeof(attr), layout='A')
@infer_getattr
class DTypeAttr(AttributeTemplate):
key = types.DType
def resolve_type(self, ary):
# Wrap the numeric type in NumberClass
return types.NumberClass(ary.dtype)
def resolve_kind(self, ary):
if isinstance(ary.key, types.scalars.Float):
val = 'f'
elif isinstance(ary.key, types.scalars.Integer):
val = 'i'
else:
return None # other types not supported yet
return types.StringLiteral(val)
@infer
class StaticGetItemArray(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
# Resolution of members for record and structured arrays
ary, idx = args
if (isinstance(ary, types.Array) and isinstance(idx, str) and
isinstance(ary.dtype, types.Record)):
if idx in ary.dtype.fields:
ret = ary.copy(dtype=ary.dtype.typeof(idx), layout='A')
return signature(ret, *args)
@infer_getattr
class RecordAttribute(AttributeTemplate):
key = types.Record
def generic_resolve(self, record, attr):
ret = record.typeof(attr)
assert ret
return ret
@infer
class StaticGetItemRecord(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
# Resolution of members for records
record, idx = args
if isinstance(record, types.Record) and isinstance(idx, str):
ret = record.typeof(idx)
assert ret
return signature(ret, *args)
@infer
class StaticSetItemRecord(AbstractTemplate):
key = "static_setitem"
def generic(self, args, kws):
# Resolution of members for record and structured arrays
record, idx, value = args
if isinstance(record, types.Record) and isinstance(idx, str):
expectedty = record.typeof(idx)
if self.context.can_convert(value, expectedty) is not None:
return signature(types.void, record, types.literal(idx), value)
@infer_getattr
class ArrayCTypesAttribute(AttributeTemplate):
key = types.ArrayCTypes
def resolve_data(self, ctinfo):
return types.uintp
@infer_getattr
class ArrayFlagsAttribute(AttributeTemplate):
key = types.ArrayFlags
def resolve_contiguous(self, ctflags):
return types.boolean
def resolve_c_contiguous(self, ctflags):
return types.boolean
def resolve_f_contiguous(self, ctflags):
return types.boolean
@infer_getattr
class NestedArrayAttribute(ArrayAttribute):
key = types.NestedArray
def _expand_integer(ty):
"""
If *ty* is an integer, expand it to a machine int (like Numpy).
"""
if isinstance(ty, types.Integer):
if ty.signed:
return max(types.intp, ty)
else:
return max(types.uintp, ty)
elif isinstance(ty, types.Boolean):
return types.intp
else:
return ty
def generic_homog(self, args, kws):
assert not args
assert not kws
return signature(self.this.dtype, recvr=self.this)
def generic_expand(self, args, kws):
assert not args
assert not kws
return signature(_expand_integer(self.this.dtype), recvr=self.this)
def sum_expand(self, args, kws):
"""
sum can be called with or without an axis parameter.
"""
pysig = None
if kws:
def sum_stub(axis):
pass
pysig = utils.pysignature(sum_stub)
# rewrite args
args = list(args) + [kws['axis']]
kws = None
args_len = len(args)
assert args_len <= 1
if args_len == 0:
# No axis parameter so the return type of the summation is a scalar
# of the type of the array.
out = signature(_expand_integer(self.this.dtype), *args,
recvr=self.this)
else:
# There is an axis parameter
if self.this.ndim == 1:
# 1d reduces to a scalar
return_type = self.this.dtype
else:
# the return type of this summation is an array of dimension one
# less than the input array.
return_type = types.Array(dtype=_expand_integer(self.this.dtype),
ndim=self.this.ndim-1, layout='C')
out = signature(return_type, *args, recvr=self.this)
return out.replace(pysig=pysig)
def generic_expand_cumulative(self, args, kws):
assert not args
assert not kws
assert isinstance(self.this, types.Array)
return_type = types.Array(dtype=_expand_integer(self.this.dtype),
ndim=1, layout='C')
return signature(return_type, recvr=self.this)
def generic_hetero_real(self, args, kws):
assert not args
assert not kws
if isinstance(self.this.dtype, (types.Integer, types.Boolean)):
return signature(types.float64, recvr=self.this)
return signature(self.this.dtype, recvr=self.this)
def generic_hetero_always_real(self, args, kws):
assert not args
assert not kws
if isinstance(self.this.dtype, (types.Integer, types.Boolean)):
return signature(types.float64, recvr=self.this)
if isinstance(self.this.dtype, types.Complex):
return signature(self.this.dtype.underlying_float, recvr=self.this)
return signature(self.this.dtype, recvr=self.this)
def generic_index(self, args, kws):
assert not args
assert not kws
return signature(types.intp, recvr=self.this)
def install_array_method(name, generic):
my_attr = {"key": "array." + name, "generic": generic}
temp_class = type("Array_" + name, (AbstractTemplate,), my_attr)
def array_attribute_attachment(self, ary):
return types.BoundFunction(temp_class, ary)
setattr(ArrayAttribute, "resolve_" + name, array_attribute_attachment)
# Functions that return the same type as the array
for fname in ["min", "max"]:
install_array_method(fname, generic_homog)
# Functions that return a machine-width type, to avoid overflows
install_array_method("prod", generic_expand)
install_array_method("sum", sum_expand)
# Functions that return a machine-width type, to avoid overflows
for fname in ["cumsum", "cumprod"]:
install_array_method(fname, generic_expand_cumulative)
# Functions that require integer arrays get promoted to float64 return
for fName in ["mean"]:
install_array_method(fName, generic_hetero_real)
# var and std by definition return in real space and int arrays
# get promoted to float64 return
for fName in ["var", "std"]:
install_array_method(fName, generic_hetero_always_real)
# Functions that return an index (intp)
install_array_method("argmin", generic_index)
install_array_method("argmax", generic_index)
@infer_global(operator.eq)
class CmpOpEqArray(AbstractTemplate):
#key = operator.eq
def generic(self, args, kws):
assert not kws
[va, vb] = args
if isinstance(va, types.Array) and va == vb:
return signature(va.copy(dtype=types.boolean), va, vb)
|
|
import logging
import os
import ibmsecurity
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
uri = "/wga/apiac/resource/instance"
requires_modules = ["wga"]
requires_version = "9.0.7"
def get_all(isamAppliance, instance_name, check_mode=False, force=False):
"""
Retrieving the list of all files in the API Access Control documentation root
"""
instance_exist, warnings = _check_instance_exist(isamAppliance, instance_name)
if force is True or instance_exist is True:
return isamAppliance.invoke_get(
"Retrieving the list of all files in the API Access Control documentation root ",
"{0}/{1}/documentation/".format(uri, instance_name),
requires_modules=requires_modules, requires_version=requires_version)
def get(isamAppliance, instance_name, file_name, check_mode=False, force=False):
"""
Retrieving the list of all files in the API Access Control documentation root
"""
instance_exist, warnings = _check_instance_exist(isamAppliance, instance_name)
if force is True or instance_exist is True:
return isamAppliance.invoke_get(
"Retrieving the list of all files in the API Access Control documentation root ",
"{0}/{1}/documentation/{2}".format(uri, instance_name, file_name),
requires_modules=requires_modules, requires_version=requires_version)
def add(isamAppliance, instance_name, type, file_name=None, dir_name=None, contents=None,
check_mode=False, force=False):
"""
Creating a file or directory in the API Access Control documentation
"""
exist, warnings = _check_exist(isamAppliance, instance_name, file_name)
if force is True or exist is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = {
"type": type
}
if file_name != None:
json_data['file_name'] = file_name
if dir_name != None:
json_data['dir_name'] = dir_name
if contents != None:
json_data['contents'] = contents
return isamAppliance.invoke_post(
"Creating a file or directory in the API Access Control documentation ",
"{0}/{1}/documentation".format(uri, instance_name),
json_data,
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
def update(isamAppliance, instance_name, file_name, contents, type='file', check_mode=False, force=False):
"""
Updating a file in the API Access Control documentation root
"""
same_contents, warnings = _check_contents(isamAppliance, instance_name, file_name, contents)
if force is True or same_contents is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = {
"type": type,
"contents": contents
}
return isamAppliance.invoke_put(
"Updating a file in the API Access Control documentation root ",
"{0}/{1}/documentation/{2}".format(uri, instance_name, file_name),
json_data,
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
def set(isamAppliance, instance_name, file_name, contents, type='file', check_mode=False, force=False):
exist, warnings = _check_exist(isamAppliance, instance_name, file_name)
if exist is True:
return update(isamAppliance=isamAppliance, instance_name=instance_name, file_name=file_name, contents=contents,
type=type, check_mode=check_mode, force=force)
else:
return add(isamAppliance=isamAppliance, instance_name=instance_name, type=type, file_name=file_name,
contents=contents,
check_mode=check_mode, force=force)
def rename_directory(isamAppliance, instance_name, file_name, new_name, type='directory', check_mode=False,
force=False):
"""
Renaming a directory in the API Access Control documentation root
"""
exists, warnings = _check_exist(isamAppliance, instance_name, file_name)
if force is True or exists is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = {
"type": type,
"new_name": new_name
}
return isamAppliance.invoke_put(
"Renaming a directory in the API Access Control documentation root",
"{0}/{1}/documentation/{2}".format(uri, instance_name, file_name),
json_data,
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
def rename_file(isamAppliance, instance_name, file_name, new_name, type='file', check_mode=False, force=False):
"""
Renaming a file in the API Access Control documentation root
"""
exists, warnings = _check_exist(isamAppliance, instance_name, file_name)
if force is True or exists is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = {
"type": type,
"new_name": new_name
}
return isamAppliance.invoke_put(
"Renaming a file in the API Access Control documentation root",
"{0}/{1}/documentation/{2}".format(uri, instance_name, file_name),
json_data,
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
def delete(isamAppliance, instance_name, file_name, check_mode=False, force=False):
"""
Deleting a file or directory from the API Access Control
"""
exists, warnings = _check_exist(isamAppliance, instance_name, file_name)
if force is True or exists is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Deleting a file or directory from the API Access Control",
"{0}/{1}/documentation/{2}".format(uri, instance_name, file_name),
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
def export_file(isamAppliance, instance_name, file_name, file_path, check_mode=False, force=False):
"""
Exporting a file in the API Access Control documentation root
"""
if os.path.exists(file_path) is True:
warn_str = "File {0} already exists".format(file_path)
warnings = [warn_str]
return isamAppliance.create_return_object(warnings=warnings)
exists, warnings = _check_exist(isamAppliance, instance_name, file_name)
if force is True or exists is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_get_file(
"Exporting a file in the API Access Control documentation root",
"{0}/{1}/documentation/{2}?export=true".format(uri, instance_name, file_name),
file_path,
requires_modules=requires_modules,
requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
def import_file(isamAppliance, instance_name, file_path, file_name="", check_mode=False, force=False):
"""
Importing a file to the API Access Control documentation root
file_path: location of the file to be uploaded. for example: /home/user/file1.json
file_name: name of the directory path and filename in API Documentation Root. for example: dir/subdir or dir/file1
"""
if os.path.exists(file_path) is False:
warn_str = "File {0} already exists".format(file_path)
warnings = [warn_str]
return isamAppliance.create_return_object(warnings=warnings)
same_contents, warnings = _check_contents(isamAppliance=isamAppliance, instance_name=instance_name,
file_name=file_name, file=file_path)
if force is True or same_contents is False:
return isamAppliance.invoke_post_files("Importing a file to the API Access Control documentation root",
"{0}/{1}/documentation/{2}?uiCalled=true".format(uri, instance_name,
file_name),
[
{
'file_formfield': 'file',
'filename': file_path,
'mimetype': 'application/octet-stream'
}
],
{
'type': 'file',
'force': True
},
requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
def _check_instance_exist(isamAppliance, instance_name):
ret_obj = ibmsecurity.isam.web.api_access_control.resources.get_all_instances(isamAppliance)
for obj in ret_obj['data']:
if obj['name'] == instance_name:
return True, ret_obj['warnings']
return False, ret_obj['warnings']
def _check_exist(isamAppliance, instance_name, file_name):
try:
ret_obj = get(isamAppliance, instance_name, file_name)
if ret_obj['data'] != []:
return True, ret_obj['warnings']
else:
return False, ret_obj['warnings']
except Exception as e:
warnings = ["Exception: {0}".format(e)]
return False, warnings
def _check_contents(isamAppliance, instance_name, file_name, contents=None, file=None):
try:
ret_obj = get(isamAppliance, instance_name, file_name)
if contents != None:
if ret_obj['data']['contents'] == contents:
return True, ret_obj['warnings']
else:
return False, ret_obj['warnings']
elif file != None:
with open(file, 'rt') as myfile:
new_contents = myfile.read()
if ret_obj['data']['contents'] == new_contents:
return True, ret_obj['warnings']
else:
return False, ret_obj['warnings']
else:
return False, ret_obj['warnings']
except Exception as e:
warnings = ["Exception occurred: {0}.".format(e)]
return True, warnings
def compare(isamAppliance1, isamAppliance2, instance1_name, instance2_name=None):
"""
Compare documentation root between two appliances
"""
if instance2_name is None or instance2_name == '':
instance2_name = instance1_name
ret_obj1 = get_all(isamAppliance1, instance1_name)
ret_obj2 = get_all(isamAppliance2, instance2_name)
return tools.json_compare(ret_obj1, ret_obj2)
|
|
# Copyright (C) 2015 Tom Barron <tpb@dyncloud.net>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup NFS driver.
"""
import bz2
import ddt
import filecmp
import hashlib
import os
import shutil
import stat
import tempfile
import threading
import zlib
from eventlet import tpool
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_config import cfg
import six
from cinder.backup.drivers import nfs
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
CONF = cfg.CONF
FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base'
FAKE_HOST = 'fake_host'
FAKE_EXPORT_PATH = 'fake/export/path'
FAKE_BACKUP_SHARE = '%s:/%s' % (FAKE_HOST, FAKE_EXPORT_PATH)
FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE,
FAKE_EXPORT_PATH)
FAKE_BACKUP_ID = fake.BACKUP_ID
FAKE_BACKUP_ID_PART1 = fake.BACKUP_ID[:2]
FAKE_BACKUP_ID_PART2 = fake.BACKUP_ID[2:4]
FAKE_BACKUP_ID_REST = fake.BACKUP_ID[4:]
UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1,
FAKE_BACKUP_ID_PART2,
FAKE_BACKUP_ID)
FAKE_EGID = 1234
@ddt.ddt
class BackupNFSShareTestCase(test.TestCase):
def setUp(self):
super(BackupNFSShareTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.mock_object(nfs, 'LOG')
def test_check_configuration_no_backup_share(self):
self.override_config('backup_share', None)
self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path',
return_value=FAKE_BACKUP_PATH)
driver = nfs.NFSBackupDriver(self.ctxt)
self.assertRaises(exception.InvalidConfigurationValue,
driver.check_for_setup_error)
@mock.patch('os.getegid', return_value=FAKE_EGID)
@mock.patch('cinder.utils.get_file_gid')
@mock.patch('cinder.utils.get_file_mode')
@ddt.data((FAKE_EGID, 0),
(FAKE_EGID, stat.S_IWGRP),
(6666, 0),
(6666, stat.S_IWGRP))
@ddt.unpack
def test_init_backup_repo_path(self,
file_gid,
file_mode,
mock_get_file_mode,
mock_get_file_gid,
mock_getegid):
self.override_config('backup_share', FAKE_BACKUP_SHARE)
self.override_config('backup_mount_point_base',
FAKE_BACKUP_MOUNT_POINT_BASE)
mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=FAKE_BACKUP_PATH)
self.mock_object(nfs.NFSBackupDriver, 'check_for_setup_error')
self.mock_object(remotefs_brick, 'RemoteFsClient',
return_value=mock_remotefsclient)
with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'):
driver = nfs.NFSBackupDriver(self.ctxt)
mock_get_file_gid.return_value = file_gid
mock_get_file_mode.return_value = file_mode
mock_execute = self.mock_object(driver, '_execute')
path = driver._init_backup_repo_path()
self.assertEqual(FAKE_BACKUP_PATH, path)
mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE)
mock_remotefsclient.get_mount_point.assert_called_once_with(
FAKE_BACKUP_SHARE)
mock_execute_calls = []
if file_gid != FAKE_EGID:
mock_execute_calls.append(
mock.call('chgrp',
'-R',
FAKE_EGID,
path,
root_helper=driver._root_helper,
run_as_root=True))
if not (file_mode & stat.S_IWGRP):
mock_execute_calls.append(
mock.call('chmod',
'-R',
'g+w',
path,
root_helper=driver._root_helper,
run_as_root=True))
mock_execute.assert_has_calls(mock_execute_calls, any_order=True)
self.assertEqual(len(mock_execute_calls), mock_execute.call_count)
def fake_md5(arg):
class result(object):
def hexdigest(self):
return 'fake-md5-sum'
ret = result()
return ret
class BackupNFSSwiftBasedTestCase(test.TestCase):
"""Test Cases for based on Swift tempest backup tests."""
_DEFAULT_VOLUME_ID = fake.VOLUME_ID
def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID):
vol = {'id': volume_id,
'size': 1,
'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self,
volume_id=_DEFAULT_VOLUME_ID,
container='test-container',
backup_id=fake.BACKUP_ID,
parent_id=None):
try:
db.volume_get(self.ctxt, volume_id)
except exception.NotFound:
self._create_volume_db_entry(volume_id=volume_id)
backup = {'id': backup_id,
'size': 1,
'container': container,
'volume_id': volume_id,
'parent_id': parent_id,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
}
return db.backup_create(self.ctxt, backup)['id']
def _write_effective_compression_file(self, data_size):
"""Ensure file contents can be effectively compressed."""
self.volume_file.seek(0)
self.volume_file.write(bytes([65] * data_size))
self.volume_file.seek(0)
def _store_thread(self, *args, **kwargs):
self.thread_dict['thread'] = threading.current_thread()
return self.thread_original_method(*args, **kwargs)
def setUp(self):
super(BackupNFSSwiftBasedTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.mock_object(hashlib, 'md5', fake_md5)
self.volume_file = tempfile.NamedTemporaryFile()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(self.volume_file.close)
self.override_config('backup_share', FAKE_BACKUP_SHARE)
self.override_config('backup_mount_point_base',
FAKE_BACKUP_MOUNT_POINT_BASE)
self.override_config('backup_file_size', 52428800)
self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path',
return_value=self.temp_dir)
# Remove tempdir.
self.addCleanup(shutil.rmtree, self.temp_dir)
self.size_volume_file = 0
for _i in range(0, 32):
self.volume_file.write(os.urandom(1024))
self.size_volume_file += 1024
# Use dictionary to share data between threads
self.thread_dict = {}
def test_backup_uncompressed(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_bz2(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
service = nfs.NFSBackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_zlib(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
service = nfs.NFSBackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_default_container(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id,
container=None,
backup_id=FAKE_BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME)
def test_backup_cancel(self):
"""Test the backup abort mechanism when backup is force deleted."""
count = set()
def my_refresh():
# This refresh method will abort the backup after 1 chunk
count.add(len(count) + 1)
if len(count) == 2:
backup.destroy()
original_refresh()
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id,
container=None,
backup_id=FAKE_BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
original_refresh = backup.refresh
# We cannot mock refresh method in backup object directly because
# mock will raise AttributeError on context manager exit.
with mock.patch('cinder.objects.base.CinderPersistentObject.refresh',
side_effect=my_refresh), \
mock.patch.object(service, 'delete_object',
side_effect=service.delete_object) as delete:
# Driver shouldn't raise the NotFound exception
service.backup(backup, self.volume_file)
# Ensure we called the delete_backup method when abort is detected
self.assertEqual(1, delete.call_count)
@mock.patch('cinder.backup.drivers.posix.PosixBackupDriver.'
'update_container_name',
return_value='testcontainer1')
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_notification')
def test_backup_container_notify_1(self, _send_progress,
_send_progress_end,
_mock_update_container_name):
# This unit test writes data to disk. It should be
# updated to not do that.
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id,
container='testcontainer1')
# If the backup_object_number_per_notification is set to 1,
# the _send_progress method will be called for sure.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 1)
CONF.set_override("backup_enable_progress_timer", False)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
@mock.patch('cinder.backup.drivers.posix.PosixBackupDriver.'
'update_container_name',
return_value='testcontainer2')
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_notification')
def test_backup_container_notify_2(self, _send_progress,
_send_progress_end,
_mock_update_container_name):
# This unit test writes data to disk. It should be
# updated to not do that.
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id,
container='testcontainer2')
# If the backup_object_number_per_notification is increased to
# another value, the _send_progress method will not be called.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
CONF.set_override("backup_enable_progress_timer", False)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
@mock.patch('cinder.backup.drivers.posix.PosixBackupDriver.'
'update_container_name',
return_value='testcontainer3')
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_notification')
def test_backup_container_notify_3(self, _send_progress,
_send_progress_end,
_mock_update_container_name):
# This unit test writes data to disk. It should be
# updated to not do that.
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id,
container='testcontainer3')
# If the timer is enabled, the _send_progress will be called,
# since the timer can trigger the progress notification.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
CONF.set_override("backup_enable_progress_timer", True)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
def test_backup_custom_container(self):
volume_id = fake.VOLUME_ID
container_name = 'fake99'
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
def test_backup_shafile(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
# Verify sha contents
content1 = service._read_sha256file(backup)
self.assertEqual(32 * 1024 / content1['chunk_size'],
len(content1['sha256s']))
def test_backup_cmp_shafiles(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
self.assertEqual(deltabackup['container'], container_name)
# Compare shas from both files
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertEqual(len(content1['sha256s']), len(content2['sha256s']))
self.assertEqual(set(content1['sha256s']), set(content2['sha256s']))
def test_backup_delta_two_objects_change(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size=(8 * 1024))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
self.assertEqual(deltabackup['container'], container_name)
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
# Verify that two shas are changed at index 16 and 20
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_backup_delta_two_blocks_in_object_change(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size=(8 * 1024))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
self.assertEqual(deltabackup['container'], container_name)
# Verify that two shas are changed at index 16 and 20
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_backup_backup_metadata_fail(self):
"""Test of when an exception occurs in backup().
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process of an
exception handler.
"""
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.mock_object(nfs.NFSBackupDriver, '_backup_metadata',
fake_backup_metadata)
# We expect that an exception be notified directly.
self.assertRaises(exception.BackupDriverException,
service.backup,
backup, self.volume_file)
def test_backup_backup_metadata_fail2(self):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete_backup().
"""
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.mock_object(nfs.NFSBackupDriver, '_backup_metadata',
fake_backup_metadata)
def fake_delete(self, backup):
raise exception.BackupOperationError()
# Raise a pseudo exception.BackupOperationError.
self.mock_object(nfs.NFSBackupDriver, 'delete_backup', fake_delete)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
service.backup,
backup, self.volume_file)
def test_restore_uncompressed(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
self.flags(backup_sha_block_size_bytes=32)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.restore(backup, volume_id, restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_bz2(self):
self.thread_original_method = bz2.decompress
volume_id = fake.VOLUME_ID
self.mock_object(bz2, 'decompress', side_effect=self._store_thread)
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
file_size = 1024 * 3
self.flags(backup_file_size=file_size)
self.flags(backup_sha_block_size_bytes=1024)
service = nfs.NFSBackupDriver(self.ctxt)
self._write_effective_compression_file(file_size)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.restore(backup, volume_id, restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
self.assertNotEqual(threading.current_thread(),
self.thread_dict['thread'])
def test_restore_zlib(self):
self.thread_original_method = zlib.decompress
self.mock_object(zlib, 'decompress', side_effect=self._store_thread)
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
file_size = 1024 * 3
self.flags(backup_file_size=file_size)
self.flags(backup_sha_block_size_bytes=1024)
service = nfs.NFSBackupDriver(self.ctxt)
self._write_effective_compression_file(file_size)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.restore(backup, volume_id, restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
self.assertNotEqual(threading.current_thread(),
self.thread_dict['thread'])
def test_restore_delta(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
self.mock_object(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size=(1024 * 8))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file, True)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.restore(backup, volume_id,
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_delete(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
service = nfs.NFSBackupDriver(self.ctxt)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.delete_backup(backup)
def test_get_compressor(self):
service = nfs.NFSBackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')
self.assertEqual(compressor, zlib)
self.assertIsInstance(compressor, tpool.Proxy)
compressor = service._get_compressor('bz2')
self.assertEqual(compressor, bz2)
self.assertIsInstance(compressor, tpool.Proxy)
self.assertRaises(ValueError, service._get_compressor, 'fake')
def create_buffer(self, size):
# Set up buffer of zeroed bytes
fake_data = bytearray(size)
if six.PY2:
# On Python 2, zlib.compressor() accepts buffer, but not bytearray
# NOTE(jsbryant): Pep8 fails on py3 based installations as buffer()
# was removed. 'noqa' used here to avoid that failure.
fake_data = buffer(fake_data) # noqa
return fake_data
def test_prepare_output_data_effective_compression(self):
"""Test compression works on a native thread."""
self.thread_original_method = zlib.compress
self.mock_object(zlib, 'compress', side_effect=self._store_thread)
service = nfs.NFSBackupDriver(self.ctxt)
fake_data = self.create_buffer(128)
result = service._prepare_output_data(fake_data)
self.assertEqual('zlib', result[0])
self.assertGreater(len(fake_data), len(result[1]))
self.assertNotEqual(threading.current_thread(),
self.thread_dict['thread'])
def test_prepare_output_data_no_compresssion(self):
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
fake_data = self.create_buffer(128)
result = service._prepare_output_data(fake_data)
self.assertEqual('none', result[0])
self.assertEqual(fake_data, result[1])
def test_prepare_output_data_ineffective_compression(self):
service = nfs.NFSBackupDriver(self.ctxt)
fake_data = self.create_buffer(128)
# Pre-compress so that compression in the driver will be ineffective.
already_compressed_data = service.compressor.compress(fake_data)
result = service._prepare_output_data(already_compressed_data)
self.assertEqual('none', result[0])
self.assertEqual(already_compressed_data, result[1])
|
|
"""
Module for exporting and importing flopy model attributes
"""
import numpy as np
from ..utils import Util2d, Util3d, Transient2d, MfList
def write_gridlines_shapefile(filename, sr):
"""
Write a polyline shapefile of the grid lines - a lightweight alternative
to polygons.
Parameters
----------
filename : string
name of the shapefile to write
sr : spatial reference
Returns
-------
None
"""
try:
import shapefile
except Exception as e:
raise Exception("io.to_shapefile(): error " +
"importing shapefile - try pip install pyshp")
wr = shapefile.Writer(shapeType=shapefile.POLYLINE)
wr.field("number", "N", 20, 0)
for i, line in enumerate(sr.get_grid_lines()):
wr.poly([line])
wr.record(i)
wr.save(filename)
def write_grid_shapefile(filename, sr, array_dict, nan_val=-1.0e9):
"""
Write a grid shapefile array_dict attributes.
Parameters
----------
filename : string
name of the shapefile to write
sr : spatial reference instance
spatial reference object for model grid
array_dict : dict
Dictionary of name and 2D array pairs. Additional 2D arrays to add as
attributes to the grid shapefile.
Returns
-------
None
"""
try:
import shapefile
except Exception as e:
raise Exception("io.to_shapefile(): error " +
"importing shapefile - try pip install pyshp")
wr = shapefile.Writer(shapeType=shapefile.POLYGON)
wr.field("row", "N", 10, 0)
wr.field("column", "N", 10, 0)
arrays = []
names = list(array_dict.keys())
names.sort()
# for name,array in array_dict.items():
for name in names:
array = array_dict[name]
if array.ndim == 3:
assert array.shape[0] == 1
array = array[0, :, :]
assert array.shape == (sr.nrow, sr.ncol)
array[np.where(np.isnan(array))] = nan_val
if array.dtype in [np.int,np.int32,np.int64]:
wr.field(name, "N", 20, 0)
else:
wr.field(name, "N", 20, 12)
arrays.append(array)
for i in range(sr.nrow):
for j in range(sr.ncol):
pts = sr.get_vertices(i, j)
wr.poly(parts=[pts])
rec = [i + 1, j + 1]
for array in arrays:
rec.append(array[i, j])
wr.record(*rec)
wr.save(filename)
def model_attributes_to_shapefile(filename, ml, package_names=None, array_dict=None, **kwargs):
"""
Wrapper function for writing a shapefile of model data. If package_names is
not None, then search through the requested packages looking for arrays that
can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
ml : flopy.mbase
model instance
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
array_dict : dict of {name:2D array} pairs
Additional 2D arrays to add as attributes to the shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> flopy.utils.model_attributes_to_shapefile('model.shp', m)
"""
if array_dict is None:
array_dict = {}
if package_names is not None:
if not isinstance(package_names, list):
package_names = [package_names]
else:
package_names = [pak.name[0] for pak in ml.packagelist]
for pname in package_names:
pak = ml.get_package(pname)
if pak is not None:
attrs = dir(pak)
if 'sr' in attrs:
attrs.remove('sr')
if 'start_datetime' in attrs:
attrs.remove('start_datetime')
for attr in attrs:
a = pak.__getattribute__(attr)
if isinstance(a, Util2d) and a.shape == (ml.nrow, ml.ncol):
name = a.name.lower()
array_dict[name] = a.array
elif isinstance(a, Util3d):
for i, u2d in enumerate(a):
# name = u2d.name.lower().replace(' ', '_')
name = shape_attr_name(u2d.name)
name += '_{:03d}'.format(i + 1)
array_dict[name] = u2d.array
elif isinstance(a, Transient2d):
kpers = list(a.transient_2ds.keys())
kpers.sort()
for kper in kpers:
u2d = a.transient_2ds[kper]
# name = u2d.name.lower() + "_{0:03d}".format(kper + 1)
name = shape_attr_name(u2d.name)
name = "{}_{:03d}".format(name, kper + 1)
array_dict[name] = u2d.array
elif isinstance(a, MfList):
kpers = a.data.keys()
for kper in kpers:
arrays = a.to_array(kper)
for name, array in arrays.items():
for k in range(array.shape[0]):
# aname = name + "{0:03d}{1:02d}".format(kper, k)
name = shape_attr_name(name, length=4)
aname = "{}{:03d}{:03d}".format(name, k + 1, kper + 1)
array_dict[aname] = array[k]
elif isinstance(a, list):
for v in a:
if isinstance(v, Util3d):
for i, u2d in enumerate(v):
# name = u2d.name.lower().replace(' ', '_')
name = shape_attr_name(u2d.name)
name += '_{:03d}'.format(i + 1)
array_dict[name] = u2d.array
# write data arrays to a shapefile
write_grid_shapefile(filename, ml.sr, array_dict)
def shape_attr_name(name, length=6, keep_layer=False):
"""
Function for to format an array name to a maximum of 10 characters to
conform with ESRI shapefile maximum attribute name length
Parameters
----------
name : string
data array name
length : int
maximum length of string to return. Value passed to function is
overridden and set to 10 if keep_layer=True. (default is 6)
keep_layer : bool
Boolean that determines if layer number in name should be retained.
(default is False)
Returns
-------
String
Examples
--------
>>> import flopy
>>> name = flopy.utils.shape_attr_name('averylongstring')
>>> name
>>> 'averyl'
"""
# replace spaces with "_"
n = name.lower().replace(' ', '_')
# exclude "_layer_X" portion of string
if keep_layer:
length = 10
n = n.replace('_layer', '_')
else:
try:
idx = n.index('_layer')
n = n[:idx]
except:
pass
if len(n) > length:
n = n[:length]
return n
|
|
# Disable while we have Python 2.x compatability
# pylint: disable=useless-object-inheritance, too-many-arguments
"""Data structures for music service items
The basis for this implementation is this page in the Sonos API
documentation: http://musicpartners.sonos.com/node/83
A note about naming. The Sonos API uses camel case with starting lower
case. These names have been adapted to match general Python class
naming conventions.
MediaMetadata:
Track
Stream
Show
Other
MediaCollection:
Artist
Album
Genre
Playlist
Search
Program
Favorites
Favorite
Collection
Container
AlbumList
TrackList
StreamList
ArtistTrackList
Other
NOTE: "Other" is allowed under both.
Class overview:
+----------------+ +----------------+ +---------------+
|MetadataDictBase+-->+MusicServiceItem+-->+MediaCollection|
+-----+----------+ +--------+-------+ +---------------+
| |
| | +------------------+
| +---->+ MediaMetadata |
| | |
| | +-------------+ |
+------------------------------>+TrackMetadata| |
| | +-------------+ |
| | |
| | +--------------+ |
+------------------------------>+StreamMetadata| |
| +--------------+ |
| |
+------------------+
"""
from urllib.parse import quote as quote_url
import logging
from collections import OrderedDict
from ..data_structures import DidlResource, DidlItem, SearchResult
from ..utils import camel_to_underscore
_LOG = logging.getLogger(__name__)
_LOG.addHandler(logging.NullHandler())
# For now we generate classes dynamically. This is shorter, but
# provides no custom documentation for all the different types.
CLASSES = {}
def get_class(class_key):
"""Form a music service data structure class from the class key
Args:
class_key (str): A concatenation of the base class (e.g. MediaMetadata)
and the class name
Returns:
class: Subclass of MusicServiceItem
"""
if class_key not in CLASSES:
for basecls in (MediaMetadata, MediaCollection):
if class_key.startswith(basecls.__name__):
# So MediaMetadataTrack turns into MSTrack
class_name = "MS" + class_key.replace(basecls.__name__, "")
CLASSES[class_key] = type(class_name, (basecls,), {})
_LOG.debug("Class %s created", CLASSES[class_key])
return CLASSES[class_key]
def parse_response(service, response, search_type):
"""Parse the response to a music service query and return a SearchResult
Args:
service (MusicService): The music service that produced the response
response (OrderedDict): The response from the soap client call
search_type (str): A string that indicates the search type that the
response is from
Returns:
SearchResult: A SearchResult object
"""
_LOG.debug(
'Parse response "%s" from service "%s" of type "%s"',
response,
service,
search_type,
)
items = []
# The result to be parsed is in either searchResult or getMetadataResult
if "searchResult" in response:
response = response["searchResult"]
elif "getMetadataResult" in response:
response = response["getMetadataResult"]
else:
raise ValueError(
'"response" should contain either the key '
'"searchResult" or "getMetadataResult"'
)
# Form the search metadata
search_metadata = {
"number_returned": response["count"],
"total_matches": None,
"search_type": search_type,
"update_id": None,
}
for result_type in ("mediaCollection", "mediaMetadata"):
# Upper case the first letter (used for the class_key)
result_type_proper = result_type[0].upper() + result_type[1:]
raw_items = response.get(result_type, [])
# If there is only 1 result, it is not put in an array
if isinstance(raw_items, OrderedDict):
raw_items = [raw_items]
for raw_item in raw_items:
# Form the class_key, which is a unique string for this type,
# formed by concatenating the result type with the item type. Turns
# into e.g: MediaMetadataTrack
class_key = result_type_proper + raw_item["itemType"].title()
cls = get_class(class_key)
items.append(cls.from_music_service(service, raw_item))
return SearchResult(items, **search_metadata)
def form_uri(item_id, service, is_track):
"""Form and return a music service item uri
Args:
item_id (str): The item id
service (MusicService): The music service that the item originates from
is_track (bool): Whether the item_id is from a track or not
Returns:
str: The music service item uri
"""
if is_track:
uri = service.sonos_uri_from_id(item_id)
else:
uri = "x-rincon-cpcontainer:" + item_id
return uri
# Type Helper
BOOL_STRS = {"true", "false"}
def bool_str(string):
"""Returns a boolean from a string imput of 'true' or 'false'"""
if string not in BOOL_STRS:
raise ValueError('Invalid boolean string: "{}"'.format(string))
return string == "true"
# Music Service item base classes
class MetadataDictBase:
"""Class used to parse metadata from kwargs"""
# The following two fields should be overwritten in subclasses
# _valid_fields is a set of valid fields
_valid_fields = {}
# _types is a dict of fields with non-string types and their convertion
# callables
_types = {}
def __init__(self, metadata_dict):
"""Initialize local variables"""
_LOG.debug("MetadataDictBase.__init__ with: %s", metadata_dict)
for key in metadata_dict:
# Check for invalid fields
if key not in self._valid_fields:
message = '%s instantiated with invalid field "%s" and value: "%s"'
# Really wanted to raise exceptions here, but as it
# turns out I have already encountered invalid fields
# from music services.
_LOG.debug(message, self.__class__, key, metadata_dict[key])
# Convert names and create metadata dict
self.metadata = {}
for key, value in metadata_dict.items():
if key in self._types:
convertion_callable = self._types[key]
value = convertion_callable(value)
self.metadata[camel_to_underscore(key)] = value
def __getattr__(self, key):
"""Return item from metadata in case of unknown attribute"""
try:
return self.metadata[key]
except KeyError as error:
message = 'Class {} has no attribute "{}"'
raise AttributeError(
message.format(self.__class__.__name__, key)
) from error
class MusicServiceItem(MetadataDictBase):
"""A base class for all music service items"""
# See comment in MetadataDictBase for explanation of these two attributes
_valid_fields = {}
_types = {}
def __init__(
self,
item_id,
desc, # pylint: disable=too-many-arguments
resources,
uri,
metadata_dict,
music_service=None,
):
"""Init music service item
Args:
item_id (str): This is the Didl compatible id NOT the music item id
desc (str): A DIDL descriptor, default ``'RINCON_AssociatedZPUDN'
resources (list): List of DidlResource
uri (str): The uri for the location of the item
metdata_dict (dict): Mapping of metadata
music_service (MusicService): The MusicService instance the item
originates from
"""
_LOG.debug(
"%s.__init__ with item_id=%s, desc=%s, resources=%s, "
"uri=%s, metadata_dict=..., music_service=%s",
self.__class__.__name__,
item_id,
desc,
resources,
uri,
music_service,
)
super().__init__(metadata_dict)
self.item_id = item_id
self.desc = desc
self.resources = resources
self.uri = uri
self.music_service = music_service
@classmethod
def from_music_service(cls, music_service, content_dict):
"""Return an element instantiated from the information that a music
service has (alternative constructor)
Args:
music_service (MusicService): The music service that content_dict
originated from
content_dict (OrderedDict): The data to instantiate the music
service item from
Returns:
MusicServiceItem: A MusicServiceItem instance
"""
# Form the item_id
quoted_id = quote_url(content_dict["id"].encode("utf-8"))
# The hex prefix remains a mistery for now
item_id = "0fffffff{}".format(quoted_id)
# Form the uri
is_track = cls == get_class("MediaMetadataTrack")
uri = form_uri(item_id, music_service, is_track)
# Form resources and get desc
resources = [DidlResource(uri=uri, protocol_info="DUMMY")]
desc = music_service.desc
return cls(
item_id, desc, resources, uri, content_dict, music_service=music_service
)
def __str__(self):
"""Return custom string representation"""
title = self.metadata.get("title")
str_ = '<{} title="{}">'
return str_.format(self.__class__.__name__, title)
def to_element(self, include_namespaces=False):
"""Return an ElementTree Element representing this instance.
Args:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
~xml.etree.ElementTree.Element: The (XML) Element representation of
this object
"""
# We piggy back on the implementation in DidlItem
didl_item = DidlItem(
title="DUMMY",
# This is ignored. Sonos gets the title from the item_id
parent_id="DUMMY", # Ditto
item_id=self.item_id,
desc=self.desc,
resources=self.resources,
)
return didl_item.to_element(include_namespaces=include_namespaces)
class TrackMetadata(MetadataDictBase):
"""Track metadata class"""
# _valid_fields is a set of valid fields
_valid_fields = {
"artistId",
"artist",
"composerId",
"composer",
"albumId",
"album",
"albumArtURI",
"albumArtistId",
"albumArtist",
"genreId",
"genre",
"duration",
"canPlay",
"canSkip",
"canAddToFavorites",
"rating",
"trackNumber",
"isFavorite",
}
# _types is a dict of fields with non-string types and their
# convertion callables
_types = {
"duration": int,
"canPlay": bool_str,
"canSkip": bool_str,
"canAddToFavorites": bool_str,
"rating": int,
"trackNumber": int,
"isFavorite": bool_str,
}
class StreamMetadata(MetadataDictBase):
"""Stream metadata class"""
# _valid_fields is a set of valid fields
_valid_fields = {
"currentHost",
"currentShowId",
"currentShow",
"secondsRemaining",
"secondsToNextShow",
"bitrate",
"logo",
"hasOutOfBandMetadata",
"description",
"isEphemeral",
}
# _types is a dict of fields with non-string types and their
# convertion callables
_types = {
"secondsRemaining": int,
"secondsToNextShow": int,
"bitrate": int,
"hasOutOfBandMetadata": bool_str,
"isEphemeral": bool_str,
}
class MediaMetadata(MusicServiceItem):
"""Base class for all media metadata items"""
# _valid_fields is a set of valid fields
_valid_fields = {
"id",
"title",
"mimeType",
"itemType",
"displayType",
"summary",
"trackMetadata",
"streamMetadata",
"dynamic",
}
# _types is a dict of fields with non-string types and their
# convertion callables
_types = {
"trackMetadata": TrackMetadata,
"streamMetadata": StreamMetadata,
# We ignore types on the dynamic field
# 'dynamic': ???,
}
class MediaCollection(MusicServiceItem):
"""Base class for all mediaCollection items"""
# _valid_fields is a set of valid fields
_valid_fields = {
"id",
"title",
"itemType",
"displayType",
"summary",
"artistId",
"artist",
"albumArtURI",
"canPlay",
"canEnumerate",
"canAddToFavorites",
"containsFavorite",
"canScroll",
"canSkip",
"isFavorite",
}
# _types is a dict of fields with non-string types and their
# convertion callables
_types = {
"canPlay": bool_str,
"canEnumerate": bool_str,
"canAddToFavorites": bool_str,
"containsFavorite": bool_str,
"canScroll": bool_str,
"canSkip": bool_str,
"isFavorite": bool_str,
}
|
|
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo.utils import excutils
import paramiko
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts
import cinder.zonemanager.drivers.brocade.fc_zone_constants as ZoneConstant
from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
LOG = logging.getLogger(__name__)
class BrcdFCSanLookupService(FCSanLookupService):
"""The SAN lookup service that talks to Brocade switches.
Version History:
1.0.0 - Initial version
"""
VERSION = "1.0.0"
def __init__(self, **kwargs):
"""Initializing the client."""
super(BrcdFCSanLookupService, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
self.create_configuration()
self.client = self.create_ssh_client(**kwargs)
def create_configuration(self):
"""Configuration specific to SAN context values."""
config = self.configuration
fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')]
LOG.debug('Fabric Names: %s', fabric_names)
# There can be more than one SAN in the network and we need to
# get credentials for each for SAN context lookup later.
if len(fabric_names) > 0:
self.fabric_configs = fabric_opts.load_fabric_configurations(
fabric_names)
def create_ssh_client(self, **kwargs):
ssh_client = paramiko.SSHClient()
known_hosts_file = kwargs.get('known_hosts_file', None)
if known_hosts_file is None:
ssh_client.load_system_host_keys()
else:
ssh_client.load_host_keys(known_hosts_file)
missing_key_policy = kwargs.get('missing_key_policy', None)
if missing_key_policy is None:
missing_key_policy = paramiko.WarningPolicy()
ssh_client.set_missing_host_key_policy(missing_key_policy)
return ssh_client
def get_device_mapping_from_network(self,
initiator_wwn_list,
target_wwn_list):
"""Provides the initiator/target map for available SAN contexts.
Looks up nameserver of each fc SAN configured to find logged in devices
and returns a map of initiator and target port WWNs for each fabric.
:param initiator_wwn_list: List of initiator port WWN
:param target_wwn_list: List of target port WWN
:returns List -- device wwn map in following format
{
<San name>: {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'..)
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121'..)
}
}
:raises Exception when connection to fabric is failed
"""
device_map = {}
formatted_target_list = []
formatted_initiator_list = []
fabric_map = {}
fabric_names = self.configuration.fc_fabric_names
fabrics = None
if not fabric_names:
raise exception.InvalidParameterValue(
err=_("Missing Fibre Channel SAN configuration "
"param - fc_fabric_names"))
fabrics = [x.strip() for x in fabric_names.split(',')]
LOG.debug("FC Fabric List: %s", fabrics)
if fabrics:
for t in target_wwn_list:
formatted_target_list.append(self.get_formatted_wwn(t))
for i in initiator_wwn_list:
formatted_initiator_list.append(self.
get_formatted_wwn(i))
for fabric_name in fabrics:
fabric_ip = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_address')
fabric_user = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_password')
fabric_port = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_port')
# Get name server data from fabric and find the targets
# logged in
nsinfo = ''
try:
LOG.debug("Getting name server data for "
"fabric %s", fabric_ip)
self.client.connect(
fabric_ip, fabric_port, fabric_user, fabric_pwd)
nsinfo = self.get_nameserver_info()
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting name server info from"
" fabric %s") % fabric_ip)
except Exception as e:
msg = _("SSH connection failed "
"for %(fabric)s with error: %(err)s"
) % {'fabric': fabric_ip, 'err': e}
LOG.error(msg)
raise exception.FCSanLookupServiceException(message=msg)
finally:
self.client.close()
LOG.debug("Lookup service:nsinfo-%s", nsinfo)
LOG.debug("Lookup service:initiator list from "
"caller-%s", formatted_initiator_list)
LOG.debug("Lookup service:target list from "
"caller-%s", formatted_target_list)
visible_targets = filter(lambda x: x in formatted_target_list,
nsinfo)
visible_initiators = filter(lambda x: x in
formatted_initiator_list, nsinfo)
if visible_targets:
LOG.debug("Filtered targets is: %s", visible_targets)
# getting rid of the : before returning
for idx, elem in enumerate(visible_targets):
elem = str(elem).replace(':', '')
visible_targets[idx] = elem
else:
LOG.debug("No targets are in the nameserver for SAN %s",
fabric_name)
if visible_initiators:
# getting rid of the : before returning ~sk
for idx, elem in enumerate(visible_initiators):
elem = str(elem).replace(':', '')
visible_initiators[idx] = elem
else:
LOG.debug("No initiators are in the nameserver "
"for SAN %s", fabric_name)
fabric_map = {
'initiator_port_wwn_list': visible_initiators,
'target_port_wwn_list': visible_targets
}
device_map[fabric_name] = fabric_map
LOG.debug("Device map for SAN context: %s", device_map)
return device_map
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
"""
cli_output = None
nsinfo_list = []
try:
cli_output = self._get_switch_data(ZoneConstant.NS_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nsshow info for fabric"))
if cli_output:
nsinfo_list = self._parse_ns_output(cli_output)
try:
cli_output = self._get_switch_data(ZoneConstant.NS_CAM_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nscamshow"))
if cli_output:
nsinfo_list.extend(self._parse_ns_output(cli_output))
LOG.debug("Connector returning nsinfo-%s", nsinfo_list)
return nsinfo_list
def _get_switch_data(self, cmd):
stdin, stdout, stderr = None, None, None
utils.check_ssh_injection([cmd])
try:
stdin, stdout, stderr = self.client.exec_command(cmd)
switch_data = stdout.readlines()
except paramiko.SSHException as e:
msg = (_("SSH Command failed with error '%(err)s' "
"'%(command)s'") % {'err': e,
'command': cmd})
LOG.error(msg)
raise exception.FCSanLookupServiceException(message=msg)
finally:
if (stdin):
stdin.flush()
stdin.close()
if (stdout):
stdout.close()
if (stderr):
stderr.close()
return switch_data
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns list of device port wwn from ns info
"""
nsinfo_list = []
for line in switch_data:
if not(" NL " in line or " N " in line):
continue
linesplit = line.split(';')
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
nsinfo_list.append(node_port_wwn)
else:
msg = _("Malformed nameserver string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return nsinfo_list
def get_formatted_wwn(self, wwn_str):
"""Utility API that formats WWN to insert ':'."""
if (len(wwn_str) != 16):
return wwn_str.lower()
else:
return (':'.join([wwn_str[i:i + 2]
for i in range(0, len(wwn_str), 2)])).lower()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from sqlalchemy.orm import exc
from sqlalchemy.sql import exists
from neutron.common import constants
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class ChanceScheduler(object):
"""Allocate a L3 agent for a router in a random way.
More sophisticated scheduler (similar to filter scheduler in nova?)
can be introduced later.
"""
def auto_schedule_routers(self, plugin, context, host, router_ids):
"""Schedule non-hosted routers to L3 Agent running on host.
If router_ids is given, each router in router_ids is scheduled
if it is not scheduled yet. Otherwise all unscheduled routers
are scheduled.
Don't schedule the routers which are hosted already
by active l3 agents.
"""
with context.session.begin(subtransactions=True):
# query if we have valid l3 agent on the host
query = context.session.query(agents_db.Agent)
query = query.filter(agents_db.Agent.agent_type ==
constants.AGENT_TYPE_L3,
agents_db.Agent.host == host,
agents_db.Agent.admin_state_up == True)
try:
l3_agent = query.one()
except (exc.MultipleResultsFound, exc.NoResultFound):
LOG.debug(_('No enabled L3 agent on host %s'),
host)
return False
if agents_db.AgentDbMixin.is_agent_down(
l3_agent.heartbeat_timestamp):
LOG.warn(_('L3 agent %s is not active'), l3_agent.id)
# check if each of the specified routers is hosted
if router_ids:
unscheduled_router_ids = []
for router_id in router_ids:
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [router_id], admin_state_up=True)
if l3_agents:
LOG.debug(_('Router %(router_id)s has already been'
' hosted by L3 agent %(agent_id)s'),
{'router_id': router_id,
'agent_id': l3_agents[0]['id']})
else:
unscheduled_router_ids.append(router_id)
if not unscheduled_router_ids:
# all (specified) routers are already scheduled
return False
else:
# get all routers that are not hosted
#TODO(gongysh) consider the disabled agent's router
stmt = ~exists().where(
l3_db.Router.id ==
l3_agentschedulers_db.RouterL3AgentBinding.router_id)
unscheduled_router_ids = [router_id_[0] for router_id_ in
context.session.query(
l3_db.Router.id).filter(stmt)]
if not unscheduled_router_ids:
LOG.debug(_('No non-hosted routers'))
return False
# check if the configuration of l3 agent is compatible
# with the router
routers = plugin.get_routers(
context, filters={'id': unscheduled_router_ids})
to_removed_ids = []
for router in routers:
candidates = plugin.get_l3_agent_candidates(router, [l3_agent])
if not candidates:
to_removed_ids.append(router['id'])
router_ids = set([r['id'] for r in routers]) - set(to_removed_ids)
if not router_ids:
LOG.warn(_('No routers compatible with L3 agent configuration'
' on host %s'), host)
return False
# binding
for router_id in router_ids:
binding = l3_agentschedulers_db.RouterL3AgentBinding()
binding.l3_agent = l3_agent
binding.router_id = router_id
binding.default = True
context.session.add(binding)
return True
def schedule(self, plugin, context, router_id):
"""Schedule the router to an active L3 agent if there
is no enable L3 agent hosting it.
"""
with context.session.begin(subtransactions=True):
# allow one router is hosted by just
# one enabled l3 agent hosting since active is just a
# timing problem. Non-active l3 agent can return to
# active any time
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [router_id], admin_state_up=True)
if l3_agents:
LOG.debug(_('Router %(router_id)s has already been hosted'
' by L3 agent %(agent_id)s'),
{'router_id': router_id,
'agent_id': l3_agents[0]['id']})
return
sync_router = plugin.get_router(context, router_id)
active_l3_agents = plugin.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warn(_('No active L3 agents'))
return
candidates = plugin.get_l3_agent_candidates(sync_router,
active_l3_agents)
if not candidates:
LOG.warn(_('No L3 agents can host the router %s'),
sync_router['id'])
return
chosen_agent = random.choice(candidates)
binding = l3_agentschedulers_db.RouterL3AgentBinding()
binding.l3_agent = chosen_agent
binding.router_id = sync_router['id']
context.session.add(binding)
LOG.debug(_('Router %(router_id)s is scheduled to '
'L3 agent %(agent_id)s'),
{'router_id': sync_router['id'],
'agent_id': chosen_agent['id']})
return chosen_agent
class LeastUtilizedScheduler(object):
"""Allocate a L3 agent for a new router that is the least utilized
in terms of router count
"""
def auto_schedule_routers(self, plugin, context, host, router_ids):
"""Schedule non-hosted routers to L3 Agent running on host.
If router_ids is given, each router in router_ids is scheduled
if it is not scheduled yet. Otherwise all unscheduled routers
are scheduled.
Don't schedule the routers which are hosted already
by active l3 agents.
"""
with context.session.begin(subtransactions=True):
# query if we have valid l3 agent on the host
query = context.session.query(agents_db.Agent)
query = query.filter(agents_db.Agent.agent_type ==
constants.AGENT_TYPE_L3,
agents_db.Agent.admin_state_up == True)
agents = []
for agent in query.all():
router_query = context.session.query(l3_agentschedulers_db.RouterL3AgentBinding)
router_count = router_query.filter(l3_agentschedulers_db.RouterL3AgentBinding.l3_agent_id == agent['id']).count()
agent['router_count'] = router_count
agents.append(agent)
LOG.debug(_('Router Scheduler found router hosted count='
'%(router_count)s s for agent_id=%(agent_id)s'),
{'router_count': agent['router_count'],
'agent_id': agent['id']})
if not agents:
LOG.debug(_('No enabled L3 agents available to schedule to'))
return False
ordered_agents = sorted(agents, key=lambda k: k['router_count'])
l3_agent = None
for chosen_agent in ordered_agents:
if agents_db.AgentDbMixin.is_agent_down(chosen_agent.heartbeat_timestamp):
LOG.debug(_('Router Skipping Preferred agent_id=%s which is down'), chosen_agent['id'])
continue
else:
l3_agent = chosen_agent
# make sure the agent with the least hosted routers is chosen
break
LOG.debug(_('Router Scheduler choosing agent_id=%s'), l3_agent['id'])
if agents_db.AgentDbMixin.is_agent_down(
l3_agent.heartbeat_timestamp):
LOG.warn(_('L3 agent %s is not active'), l3_agent.id)
# check if each of the specified routers is hosted
if router_ids:
if not isinstance(router_ids, (list, tuple)):
router_ids = [router_ids]
unscheduled_router_ids = []
for router_id in router_ids:
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [router_id], admin_state_up=True)
if l3_agents:
LOG.debug(_('Router %(router_id)s has already been'
' hosted by L3 agent %(agent_id)s'),
{'router_id': router_id,
'agent_id': l3_agents[0]['id']})
else:
unscheduled_router_ids.append(router_id)
LOG.info(_("Router is appending router_id=%r to unscheduled_router_ids"), router_id)
if not unscheduled_router_ids:
# all (specified) routers are already scheduled
return False
else:
# get all routers that are not hosted
#TODO(gongysh) consider the disabled agent's router
stmt = ~exists().where(
l3_db.Router.id ==
l3_agentschedulers_db.RouterL3AgentBinding.router_id)
unscheduled_router_ids = [router_id_[0] for router_id_ in
context.session.query(
l3_db.Router.id).filter(stmt)]
if not unscheduled_router_ids:
LOG.debug(_('No non-hosted routers'))
return False
else:
LOG.info(_("Router RouterL3AgentBinding returned unscheduled_router_ids=%r"), unscheduled_router_ids)
# check if the configuration of l3 agent is compatible
# with the router
routers = plugin.get_routers(
context, filters={'id': unscheduled_router_ids})
to_removed_ids = []
for router in routers:
candidates = plugin.get_l3_agent_candidates(router, [l3_agent])
if not candidates:
to_removed_ids.append(router['id'])
router_ids = set(unscheduled_router_ids) - set(to_removed_ids)
if not router_ids:
LOG.warn(_('No routers compatible with L3 agent configuration'
' on host %s'), host)
return False
# binding
for router_id in router_ids:
LOG.info(_("Router iterating over router_ids=%r and got router_id=%s" % (router_ids, router_id)))
binding = l3_agentschedulers_db.RouterL3AgentBinding()
binding.l3_agent = l3_agent
binding.router_id = router_id
binding.default = True
context.session.add(binding)
return True
def schedule(self, plugin, context, router_id):
"""Schedule the router to the least utilized active L3 agent
if there is no enable L3 agent hosting it.
"""
with context.session.begin(subtransactions=True):
# allow one router is hosted by just
# one enabled l3 agent hosting since active is just a
# timing problem. Non-active l3 agent can return to
# active any time
# get router object from router_id
sync_router = plugin.get_router(context, router_id)
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [sync_router['id']], admin_state_up=True)
if l3_agents:
LOG.debug(_('Router %(router_id)s has already been hosted'
' by L3 agent %(agent_id)s'),
{'router_id': sync_router['id'],
'agent_id': l3_agents[0]['id']})
return
active_l3_agents = plugin.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warn(_('No active L3 agents'))
return
candidates = plugin.get_l3_agent_candidates(sync_router,
active_l3_agents)
if not candidates:
LOG.warn(_('No L3 agents can host the router %s'),
sync_router['id'])
return
for candidate in candidates:
routers = plugin.list_routers_on_l3_agent(context,
candidate['id'])
LOG.debug(_('Router Scheduler found router hosted count='
'%(router_count)s s for agent_id=%(agent_id)s'),
{'router_count': len(routers['routers']),
'agent_id': candidate['id']})
candidate['router_count'] = len(routers['routers'])
ordered_agents = sorted(candidates, key=lambda k: k['router_count'])
chosen_agent = None
for l3_agent in ordered_agents:
if agents_db.AgentDbMixin.is_agent_down(l3_agent.heartbeat_timestamp):
LOG.debug(_('Router Skipping Preferred agent_id=%s which is down'), l3_agent['id'])
continue
else:
chosen_agent = l3_agent
# make sure the agent with the least hosted routers is chosen
break
LOG.debug(_('Router Scheduler choosing agent=%r'), chosen_agent)
binding = l3_agentschedulers_db.RouterL3AgentBinding()
binding.l3_agent = chosen_agent
binding.router_id = sync_router['id']
context.session.add(binding)
LOG.debug(_('Router %(router_id)s is scheduled to '
'L3 agent %(agent_id)s'),
{'router_id': sync_router['id'],
'agent_id': chosen_agent['id']})
return chosen_agent
|
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
See Also
---------
`c_step` function
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
`raw_location_` : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
`raw_covariance_` : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
`raw_support_` : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
`location_` : array-like, shape (n_features,)
Estimated robust location
`covariance_` : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`support_` : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
`dist_` : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
|
from __future__ import unicode_literals
import warnings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch, QuerySet
from django.db.models.query import get_prefetcher
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from django.utils import six
from django.utils.encoding import force_text
from .models import (
Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book, Bookmark,
BookReview, BookWithYear, Comment, Department, Employee, FavoriteAuthors,
House, LessonEntry, Person, Qualification, Reader, Room, TaggedItem,
Teacher, WordEntry,
)
class PrefetchRelatedTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
"""
Test that we can clear the behavior by calling prefetch_related()
"""
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""
Test we can follow a m2m and another m2m
"""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Test that objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[six.text_type(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
Test we can follow an m2m relation after a relation like ForeignKey
that doesn't have many objects
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[six.text_type(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
Test that we can follow a m2m relation after going through
the select_related reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
with self.assertRaises(AttributeError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
with self.assertRaises(ValueError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertEqual(sql.count(self.author1.name), 1)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertEqual(sql.count(self.author1.name), 1)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertEqual(sql.count(str(self.author1.id)), 1, sql)
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name='Joe')
cls.person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1)
cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1)
cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1)
cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1)
cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2)
cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2)
cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2)
cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3)
cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3)
cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2)
cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4)
cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4)
cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
with self.assertRaises(ValueError):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
with self.assertRaises(AttributeError):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1')),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
def test_apply_rel_filters_deprecation_shim(self):
# Simulate a missing `_apply_rel_filters` method.
del Person.houses.related_manager_cls._apply_rel_filters
# Also remove `get_queryset` as it rely on `_apply_rel_filters`.
del Person.houses.related_manager_cls.get_queryset
try:
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1'))
))
finally:
# Deleting `related_manager_cls` will force the creation of a new
# class since it's a `cached_property`.
del Person.houses.related_manager_cls
msg = (
'The `django.db.models.fields.related_descriptors.ManyRelatedManager` class '
'must implement a `_apply_rel_filters()` method that accepts a `QuerySet` as '
'its single argument and returns an appropriately filtered version of it.'
)
self.assertEqual(len(warns), 2) # Once person.
self.assertEqual(str(warns[0].message), msg)
self.assertEqual(str(warns[0].message), msg)
def test_values_queryset(self):
with self.assertRaisesMessage(ValueError, 'Prefetch querysets cannot use values().'):
Prefetch('houses', House.objects.values('pk'))
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch('rooms', queryset=Room.objects.all(), to_attr='to_rooms'),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(six.text_type(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
Test that we can traverse a 'content_object' with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted([i.tag for i in bookmark.tags.all()]), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch('tags', TaggedItem.objects.filter(tag='django')),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011)
cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50)
cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49)
cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1')
cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2')
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()] for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[six.text_type(book) for book in author.books_with_year.all()] for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[six.text_type(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[six.text_type(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[six.text_type(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
l = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(l, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title='Poems')
cls.author1 = Author.objects.create(name='Jane', first_book=cls.book)
cls.author2 = Author.objects.create(name='Tom', first_book=cls.book)
cls.author3 = Author.objects.create(name='Robert', first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[six.text_type(i_like) for i_like in author.favorite_authors.all()],
[six.text_type(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([six.text_type(self.author2)], [six.text_type(self.author3)]),
([six.text_type(self.author3)], [six.text_type(self.author1)]),
([six.text_type(self.author1)], [six.text_type(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Check that prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte", first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne", first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily", first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane", first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house', 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', force_text(queryset.query))
class Ticket25546Tests(TestCase):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
Before, prefetch queries were for 'addresses', 'first_time_authors', and
'first_time_authors__addresses'. The last query is the duplicate.
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
def test_prefetch(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertListEqual(list(book1.first_time_authors.all()), [self.author11, self.author12])
self.assertListEqual(list(book2.first_time_authors.all()), [self.author21])
self.assertListEqual(list(book1.first_time_authors.all()[0].addresses.all()), [self.author1_address1])
self.assertListEqual(list(book1.first_time_authors.all()[1].addresses.all()), [])
self.assertListEqual(list(book2.first_time_authors.all()[0].addresses.all()), [self.author2_address1])
self.assertEqual(
list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all())
)
def test_prefetch_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertListEqual(book1.first_authors, [self.author11, self.author12])
self.assertListEqual(book2.first_authors, [self.author21])
self.assertListEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertListEqual(book1.first_authors[1].happy_place, [])
self.assertListEqual(book2.first_authors[0].happy_place, [self.author2_address1])
|
|
import time
import numpy as np
from numpy import sin, cos, pi, exp, sqrt, abs
from scipy.optimize import rosen
class SimpleQuadratic:
def fun(self, x):
return np.dot(x, x)
def der(self, x):
return 2. * x
def hess(self, x):
return 2. * np.eye(x.size)
class AsymmetricQuadratic:
def fun(self, x):
return np.dot(x, x) + x[0]
def der(self, x):
d = 2. * x
d[0] += 1
return d
def hess(self, x):
return 2. * np.eye(x.size)
class SlowRosen:
def fun(self, x):
time.sleep(40e-6)
return rosen(x)
class LJ:
"""
The Lennard Jones potential
a mathematically simple model that approximates the interaction between a
pair of neutral atoms or molecules.
https://en.wikipedia.org/wiki/Lennard-Jones_potential
E = sum_ij V(r_ij)
where r_ij is the cartesian distance between atom i and atom j, and the
pair potential has the form
V(r) = 4 * eps * ( (sigma / r)**12 - (sigma / r)**6
Notes
-----
the double loop over many atoms makes this *very* slow in Python. If it
were in a compiled language it would be much faster.
"""
def __init__(self, eps=1.0, sig=1.0):
self.sig = sig
self.eps = eps
def vij(self, r):
return 4. * self.eps * ((self.sig / r)**12 - (self.sig / r)**6)
def dvij(self, r):
p7 = 6. / self.sig * (self.sig / r)**7
p13 = -12. / self.sig * (self.sig / r)**13
return 4. * self.eps * (p7 + p13)
def fun(self, coords):
natoms = coords.size // 3
coords = np.reshape(coords, [natoms, 3])
energy = 0.
for i in range(natoms):
for j in range(i + 1, natoms):
dr = coords[j, :] - coords[i, :]
r = np.linalg.norm(dr)
energy += self.vij(r)
return energy
def der(self, coords):
natoms = coords.size // 3
coords = np.reshape(coords, [natoms, 3])
energy = 0.
grad = np.zeros([natoms, 3])
for i in range(natoms):
for j in range(i + 1, natoms):
dr = coords[j, :] - coords[i, :]
r = np.linalg.norm(dr)
energy += self.vij(r)
g = self.dvij(r)
grad[i, :] += -g * dr/r
grad[j, :] += g * dr/r
grad = grad.reshape([natoms * 3])
return grad
def get_random_configuration(self):
rnd = np.random.uniform(-1, 1, [3 * self.natoms])
return rnd * float(self.natoms)**(1. / 3)
class LJ38(LJ):
natoms = 38
target_E = -173.928427
class LJ30(LJ):
natoms = 30
target_E = -128.286571
class LJ20(LJ):
natoms = 20
target_E = -77.177043
class LJ13(LJ):
natoms = 13
target_E = -44.326801
class Booth:
target_E = 0.
solution = np.array([1., 3.])
xmin = np.array([-10., -10.])
xmax = np.array([10., 10.])
def fun(self, coords):
x, y = coords
return (x + 2. * y - 7.)**2 + (2. * x + y - 5.)**2
def der(self, coords):
x, y = coords
dfdx = 2. * (x + 2. * y - 7.) + 4. * (2. * x + y - 5.)
dfdy = 4. * (x + 2. * y - 7.) + 2. * (2. * x + y - 5.)
return np.array([dfdx, dfdy])
class Beale:
target_E = 0.
solution = np.array([3., 0.5])
xmin = np.array([-4.5, -4.5])
xmax = np.array([4.5, 4.5])
def fun(self, coords):
x, y = coords
p1 = (1.5 - x + x * y)**2
p2 = (2.25 - x + x * y**2)**2
p3 = (2.625 - x + x * y**3)**2
return p1 + p2 + p3
def der(self, coords):
x, y = coords
dfdx = (2. * (1.5 - x + x * y) * (-1. + y) +
2. * (2.25 - x + x * y**2) * (-1. + y**2) +
2. * (2.625 - x + x * y**3) * (-1. + y**3))
dfdy = (2. * (1.5 - x + x * y) * (x) +
2. * (2.25 - x + x * y**2) * (2. * y * x) +
2. * (2.625 - x + x * y**3) * (3. * x * y**2))
return np.array([dfdx, dfdy])
"""
Global Test functions for minimizers.
HolderTable, Ackey and Levi have many competing local minima and are suited
for global minimizers such as basinhopping or differential_evolution.
(https://en.wikipedia.org/wiki/Test_functions_for_optimization)
See also https://mpra.ub.uni-muenchen.de/2718/1/MPRA_paper_2718.pdf
"""
class HolderTable:
target_E = -19.2085
solution = [8.05502, 9.66459]
xmin = np.array([-10, -10])
xmax = np.array([10, 10])
stepsize = 2.
temperature = 2.
def fun(self, x):
return - abs(sin(x[0]) * cos(x[1]) * exp(abs(1. - sqrt(x[0]**2 +
x[1]**2) / pi)))
def dabs(self, x):
"""derivative of absolute value"""
if x < 0:
return -1.
elif x > 0:
return 1.
else:
return 0.
#commented out at the because it causes FloatingPointError in
#basinhopping
# def der(self, x):
# R = sqrt(x[0]**2 + x[1]**2)
# g = 1. - R / pi
# f = sin(x[0]) * cos(x[1]) * exp(abs(g))
# E = -abs(f)
#
# dRdx = x[0] / R
# dgdx = - dRdx / pi
# dfdx = cos(x[0]) * cos(x[1]) * exp(abs(g)) + f * self.dabs(g) * dgdx
# dEdx = - self.dabs(f) * dfdx
#
# dRdy = x[1] / R
# dgdy = - dRdy / pi
# dfdy = -sin(x[0]) * sin(x[1]) * exp(abs(g)) + f * self.dabs(g) * dgdy
# dEdy = - self.dabs(f) * dfdy
# return np.array([dEdx, dEdy])
class Ackley:
# note: this function is not smooth at the origin. the gradient will never
# converge in the minimizer
target_E = 0.
solution = [0., 0.]
xmin = np.array([-5, -5])
xmax = np.array([5, 5])
def fun(self, x):
E = (-20. * exp(-0.2 * sqrt(0.5 * (x[0]**2 + x[1]**2))) + 20. + np.e -
exp(0.5 * (cos(2. * pi * x[0]) + cos(2. * pi * x[1]))))
return E
def der(self, x):
R = sqrt(x[0]**2 + x[1]**2)
term1 = -20. * exp(-0.2 * R)
term2 = -exp(0.5 * (cos(2. * pi * x[0]) + cos(2. * pi * x[1])))
deriv1 = term1 * (-0.2 * 0.5 / R)
dfdx = 2. * deriv1 * x[0] - term2 * pi * sin(2. * pi * x[0])
dfdy = 2. * deriv1 * x[1] - term2 * pi * sin(2. * pi * x[1])
return np.array([dfdx, dfdy])
class Levi:
target_E = 0.
solution = [1., 1.]
xmin = np.array([-10, -10])
xmax = np.array([10, 10])
def fun(self, x):
E = (sin(3. * pi * x[0])**2 + (x[0] - 1.)**2 *
(1. + sin(3 * pi * x[1])**2) +
(x[1] - 1.)**2 * (1. + sin(2 * pi * x[1])**2))
return E
def der(self, x):
dfdx = (2. * 3. * pi *
cos(3. * pi * x[0]) * sin(3. * pi * x[0]) +
2. * (x[0] - 1.) * (1. + sin(3 * pi * x[1])**2))
dfdy = ((x[0] - 1.)**2 * 2. * 3. * pi * cos(3. * pi * x[1]) * sin(3. *
pi * x[1]) + 2. * (x[1] - 1.) *
(1. + sin(2 * pi * x[1])**2) + (x[1] - 1.)**2 *
2. * 2. * pi * cos(2. * pi * x[1]) * sin(2. * pi * x[1]))
return np.array([dfdx, dfdy])
class EggHolder:
target_E = -959.6407
solution = [512, 404.2319]
xmin = np.array([-512., -512])
xmax = np.array([512., 512])
def fun(self, x):
a = -(x[1] + 47) * np.sin(np.sqrt(abs(x[1] + x[0]/2. + 47)))
b = -x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47))))
return a + b
class CrossInTray:
target_E = -2.06261
solution = [1.34941, -1.34941]
xmin = np.array([-10., -10])
xmax = np.array([10., 10])
def fun(self, x):
arg = abs(100 - sqrt(x[0]**2 + x[1]**2)/pi)
val = np.power(abs(sin(x[0]) * sin(x[1]) * exp(arg)) + 1., 0.1)
return -0.0001 * val
class Schaffer2:
target_E = 0
solution = [0., 0.]
xmin = np.array([-100., -100])
xmax = np.array([100., 100])
def fun(self, x):
num = np.power(np.sin(x[0]**2 - x[1]**2), 2) - 0.5
den = np.power(1 + 0.001 * (x[0]**2 + x[1]**2), 2)
return 0.5 + num / den
class Schaffer4:
target_E = 0.292579
solution = [0, 1.253131828927371]
xmin = np.array([-100., -100])
xmax = np.array([100., 100])
def fun(self, x):
num = cos(sin(abs(x[0]**2 - x[1]**2)))**2 - 0.5
den = (1+0.001*(x[0]**2 + x[1]**2))**2
return 0.5 + num / den
|
|
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from six.moves.urllib import parse as urllib
import webob
from jacket.api.storage.storage import common
from jacket.api.storage.storage.v2 import snapshots
from jacket import context
from jacket import db
from jacket.storage import exception
from jacket.objects import storage
from jacket.storage import test
from jacket.tests.storage.unit.api import fakes
from jacket.tests.storage.unit.api.v2 import stubs
from jacket.tests.storage.unit import fake_snapshot
from jacket.tests.storage.unit import fake_volume
from jacket.tests.storage.unit import utils
from jacket.storage import volume
CONF = cfg.CONF
UUID = '00000000-0000-0000-0000-000000000001'
INVALID_UUID = '00000000-0000-0000-0000-000000000002'
def _get_default_snapshot_param():
return {
'id': UUID,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'updated_at': None,
'user_id': 'bcb7746c7a41472d88a1ffac89ba6a9b',
'project_id': '7ffe17a15c724e2aa79fc839540aec15',
'display_name': 'Default name',
'display_description': 'Default description',
'deleted': None,
'volume': {'availability_zone': 'test_zone'}
}
def stub_snapshot_delete(self, context, snapshot):
if snapshot['id'] != UUID:
raise exception.SnapshotNotFound(snapshot['id'])
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != UUID:
raise exception.SnapshotNotFound(snapshot_id)
param = _get_default_snapshot_param()
return param
def stub_snapshot_get_all(self, context, search_opts=None):
param = _get_default_snapshot_param()
return [param]
class SnapshotApiTest(test.TestCase):
def setUp(self):
super(SnapshotApiTest, self).setUp()
self.controller = snapshots.SnapshotsController()
self.stubs.Set(storage, 'snapshot_get_all_by_project',
stubs.stub_snapshot_get_all_by_project)
self.stubs.Set(storage, 'snapshot_get_all',
stubs.stub_snapshot_get_all)
self.ctx = context.RequestContext('admin', 'fakeproject', True)
@mock.patch(
'storage.api.storage.openstack.wsgi.Controller.validate_name_and_description')
def test_snapshot_create(self, mock_validate):
volume = utils.create_volume(self.ctx)
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
snapshot = {
"volume_id": volume.id,
"force": False,
"name": snapshot_name,
"description": snapshot_description
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v2/snapshots')
resp_dict = self.controller.create(req, body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(snapshot_name, resp_dict['snapshot']['name'])
self.assertEqual(snapshot_description,
resp_dict['snapshot']['description'])
self.assertTrue(mock_validate.called)
self.assertIn('updated_at', resp_dict['snapshot'])
storage.volume_destroy(self.ctx, volume.id)
def test_snapshot_create_force(self):
volume = utils.create_volume(self.ctx, status='in-use')
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
snapshot = {
"volume_id": volume.id,
"force": True,
"name": snapshot_name,
"description": snapshot_description
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v2/snapshots')
resp_dict = self.controller.create(req, body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(snapshot_name,
resp_dict['snapshot']['name'])
self.assertEqual(snapshot_description,
resp_dict['snapshot']['description'])
self.assertIn('updated_at', resp_dict['snapshot'])
snapshot = {
"volume_id": volume.id,
"force": "**&&^^%%$$##@@",
"name": "Snapshot Test Name",
"description": "Snapshot Test Desc"
}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.assertRaises(exception.InvalidParameterValue,
self.controller.create,
req,
body)
storage.volume_destroy(self.ctx, volume.id)
def test_snapshot_create_without_volume_id(self):
snapshot_name = 'Snapshot Test Name'
snapshot_description = 'Snapshot Test Desc'
body = {
"snapshot": {
"force": True,
"name": snapshot_name,
"description": snapshot_description
}
}
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
@mock.patch.object(volume.api.API, "update_snapshot",
side_effect=stubs.stub_snapshot_update)
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
@mock.patch('storage.storage.Volume.get_by_id')
@mock.patch('storage.storage.Snapshot.get_by_id')
@mock.patch(
'storage.api.storage.openstack.wsgi.Controller.validate_name_and_description')
def test_snapshot_update(
self, mock_validate, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get, update_snapshot):
snapshot = {
'id': UUID,
'volume_id': 1,
'status': 'available',
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
updates = {
"name": "Updated Test Name",
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
res_dict = self.controller.update(req, UUID, body)
expected = {
'snapshot': {
'id': UUID,
'volume_id': '1',
'status': u'available',
'size': 100,
'created_at': None,
'updated_at': None,
'name': u'Updated Test Name',
'description': u'Default description',
'metadata': {},
}
}
self.assertEqual(expected, res_dict)
self.assertTrue(mock_validate.called)
self.assertEqual(2, len(self.notifier.notifications))
def test_snapshot_update_missing_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, UUID, body)
def test_snapshot_update_invalid_body(self):
body = {'name': 'missing top level snapshot key'}
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, UUID, body)
def test_snapshot_update_not_found(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
updates = {
"name": "Updated Test Name",
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v2/snapshots/not-the-uuid')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req,
'not-the-uuid', body)
@mock.patch.object(volume.api.API, "delete_snapshot",
side_effect=stubs.stub_snapshot_update)
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
@mock.patch('storage.storage.Volume.get_by_id')
@mock.patch('storage.storage.Snapshot.get_by_id')
def test_snapshot_delete(self, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get, delete_snapshot):
snapshot = {
'id': UUID,
'volume_id': 1,
'status': 'available',
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
snapshot_id = UUID
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id)
resp = self.controller.delete(req, snapshot_id)
self.assertEqual(202, resp.status_int)
def test_snapshot_delete_invalid_id(self):
self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, snapshot_id)
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
@mock.patch('storage.storage.Volume.get_by_id')
@mock.patch('storage.storage.Snapshot.get_by_id')
def test_snapshot_show(self, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get):
snapshot = {
'id': UUID,
'volume_id': 1,
'status': 'available',
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata'],
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID)
resp_dict = self.controller.show(req, UUID)
self.assertIn('snapshot', resp_dict)
self.assertEqual(UUID, resp_dict['snapshot']['id'])
self.assertIn('updated_at', resp_dict['snapshot'])
def test_snapshot_show_invalid_id(self):
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, snapshot_id)
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
@mock.patch('storage.storage.Volume.get_by_id')
@mock.patch('storage.storage.Snapshot.get_by_id')
@mock.patch('storage.volume.api.API.get_all_snapshots')
def test_snapshot_detail(self, get_all_snapshots, snapshot_get_by_id,
volume_get_by_id, snapshot_metadata_get):
snapshot = {
'id': UUID,
'volume_id': 1,
'status': 'available',
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
'expected_attrs': ['metadata']
}
ctx = context.RequestContext('admin', 'fake', True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
snapshots = storage.SnapshotList(storage=[snapshot_obj])
get_all_snapshots.return_value = snapshots
req = fakes.HTTPRequest.blank('/v2/snapshots/detail')
resp_dict = self.controller.detail(req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(1, len(resp_snapshots))
self.assertIn('updated_at', resp_snapshots[0])
resp_snapshot = resp_snapshots.pop()
self.assertEqual(UUID, resp_snapshot['id'])
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
def test_admin_list_snapshots_limited_to_project(self,
snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
def test_list_snapshots_with_limit_and_offset(self,
snapshot_metadata_get):
def list_snapshots_with_limit_and_offset(snaps, is_admin):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots?limit=1\
&offset=1',
use_admin_context=is_admin)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
self.assertEqual(snaps[1].id, res['snapshots'][0]['id'])
self.assertIn('updated_at', res['snapshots'][0])
# Test that we get an empty list with an offset greater than the
# number of items
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=3')
self.assertEqual({'snapshots': []}, self.controller.index(req))
self.stubs.UnsetAll()
volume, snaps = self._create_db_snapshots(3)
# admin case
list_snapshots_with_limit_and_offset(snaps, is_admin=True)
# non-admin case
list_snapshots_with_limit_and_offset(snaps, is_admin=False)
@mock.patch.object(storage, 'snapshot_get_all_by_project')
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
def test_list_snpashots_with_wrong_limit_and_offset(self,
mock_metadata_get,
mock_snapshot_get_all):
"""Test list with negative and non numeric limit and offset."""
mock_snapshot_get_all.return_value = []
# Negative limit
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=-1&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Non numeric limit
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=a&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Negative offset
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=-1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Non numeric offset
req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=a')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
# Test that we get an exception HTTPBadRequest(400) with an offset
# greater than the maximum offset value.
url = '/v2/snapshots?limit=1&offset=323245324356534235'
req = fakes.HTTPRequest.blank(url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def _assert_list_next(self, expected_query=None, project='fakeproject',
**kwargs):
"""Check a page of snapshots list."""
# Since we are accessing v2 api directly we don't need to specify
# v2 in the request path, if we did, we'd get /v2/v2 links back
request_path = '/v2/%s/snapshots' % project
expected_path = request_path
# Construct the query if there are kwargs
if kwargs:
request_str = request_path + '?' + urllib.urlencode(kwargs)
else:
request_str = request_path
# Make the request
req = fakes.HTTPRequest.blank(request_str)
res = self.controller.index(req)
# We only expect to have a next link if there is an actual expected
# query.
if expected_query:
# We must have the links
self.assertIn('snapshots_links', res)
links = res['snapshots_links']
# Must be a list of links, even if we only get 1 back
self.assertTrue(list, type(links))
next_link = links[0]
# rel entry must be next
self.assertIn('rel', next_link)
self.assertIn('next', next_link['rel'])
# href entry must have the right path
self.assertIn('href', next_link)
href_parts = urllib.urlparse(next_link['href'])
self.assertEqual(expected_path, href_parts.path)
# And the query from the next link must match what we were
# expecting
params = urllib.parse_qs(href_parts.query)
self.assertDictEqual(expected_query, params)
# Make sure we don't have links if we were not expecting them
else:
self.assertNotIn('snapshots_links', res)
def _create_db_snapshots(self, num_snaps):
volume = utils.create_volume(self.ctx)
snaps = [utils.create_snapshot(self.ctx,
volume.id,
display_name='snap' + str(i))
for i in range(num_snaps)]
self.addCleanup(storage.volume_destroy, self.ctx, volume.id)
for snap in snaps:
self.addCleanup(storage.snapshot_destroy, self.ctx, snap.id)
snaps.reverse()
return volume, snaps
def test_list_snapshots_next_link_default_limit(self):
"""Test that snapshot list pagination is limited by osapi_max_limit."""
self.stubs.UnsetAll()
volume, snaps = self._create_db_snapshots(3)
# NOTE(geguileo): Since storage.api.storage.common.limited has already been
# imported his argument max_limit already has a default value of 1000
# so it doesn't matter that we change it to 2. That's why we need to
# mock it and send it current value. We still need to set the default
# value because other sections of the code use it, for example
# _get_collection_links
CONF.set_default('osapi_max_limit', 2)
def get_pagination_params(params, max_limit=CONF.osapi_max_limit,
original_call=common.get_pagination_params):
return original_call(params, max_limit)
def _get_limit_param(params, max_limit=CONF.osapi_max_limit,
original_call=common._get_limit_param):
return original_call(params, max_limit)
with mock.patch.object(common, 'get_pagination_params',
get_pagination_params), \
mock.patch.object(common, '_get_limit_param',
_get_limit_param):
# The link from the first page should link to the second
self._assert_list_next({'marker': [snaps[1].id]})
# Second page should have no next link
self._assert_list_next(marker=snaps[1].id)
def test_list_snapshots_next_link_with_limit(self):
"""Test snapshot list pagination with specific limit."""
self.stubs.UnsetAll()
volume, snaps = self._create_db_snapshots(2)
# The link from the first page should link to the second
self._assert_list_next({'limit': ['1'], 'marker': [snaps[0].id]},
limit=1)
# Even though there are no more elements, we should get a next element
# per specification.
expected = {'limit': ['1'], 'marker': [snaps[1].id]}
self._assert_list_next(expected, limit=1, marker=snaps[0].id)
# When we go beyond the number of elements there should be no more
# next links
self._assert_list_next(limit=1, marker=snaps[1].id)
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
def test_admin_list_snapshots_all_tenants(self, snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(3, len(res['snapshots']))
@mock.patch.object(storage, 'snapshot_get_all')
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
def test_admin_list_snapshots_by_tenant_id(self, snapshot_metadata_get,
snapshot_get_all):
def get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
if 'project_id' in filters and 'tenant1' in filters['project_id']:
return [stubs.stub_snapshot(1, tenant_id='tenant1')]
else:
return []
snapshot_get_all.side_effect = get_all
req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1'
'&project_id=tenant1',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
def test_all_tenants_non_admin_gets_all_tenants(self,
snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1')
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
@mock.patch('storage.storage.snapshot_metadata_get', return_value=dict())
def test_non_admin_get_by_project(self, snapshot_metadata_get):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots')
res = self.controller.index(req)
self.assertIn('snapshots', res)
self.assertEqual(1, len(res['snapshots']))
def _create_snapshot_bad_body(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_no_body(self):
self._create_snapshot_bad_body(body=None)
def test_create_missing_snapshot(self):
body = {'foo': {'a': 'b'}}
self._create_snapshot_bad_body(body=body)
def test_create_malformed_entity(self):
body = {'snapshot': 'string'}
self._create_snapshot_bad_body(body=body)
class SnapshotSerializerTest(test.TestCase):
def _verify_snapshot(self, snap, tree):
self.assertEqual('snapshot', tree.tag)
for attr in ('id', 'status', 'size', 'created_at',
'name', 'description', 'volume_id'):
self.assertEqual(str(snap[attr]), tree.get(attr))
def test_snapshot_show_create_serializer(self):
serializer = snapshots.SnapshotTemplate()
raw_snapshot = dict(
id='snap_id',
status='snap_status',
size=1024,
created_at=timeutils.utcnow(),
name='snap_name',
description='snap_desc',
display_description='snap_desc',
volume_id='vol_id',
)
text = serializer.serialize(dict(snapshot=raw_snapshot))
tree = etree.fromstring(text)
self._verify_snapshot(raw_snapshot, tree)
def test_snapshot_index_detail_serializer(self):
serializer = snapshots.SnapshotsTemplate()
raw_snapshots = [
dict(
id='snap1_id',
status='snap1_status',
size=1024,
created_at=timeutils.utcnow(),
name='snap1_name',
description='snap1_desc',
volume_id='vol1_id',
),
dict(
id='snap2_id',
status='snap2_status',
size=1024,
created_at=timeutils.utcnow(),
name='snap2_name',
description='snap2_desc',
volume_id='vol2_id',
)
]
text = serializer.serialize(dict(snapshots=raw_snapshots))
tree = etree.fromstring(text)
self.assertEqual('snapshots', tree.tag)
self.assertEqual(len(raw_snapshots), len(tree))
for idx, child in enumerate(tree):
self._verify_snapshot(raw_snapshots[idx], child)
|
|
# Copyright 2016, IBM US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gettext
# establish _ in global namespace
gettext.install('opsmgr', '/usr/share/locale')
import logging
from opsmgr.inventory import persistent_mgr, resource_mgr
from opsmgr.common.utils import entry_exit, push_message, load_plugin_by_namespace
from opsmgr.inventory.data_model import Rack
I_MANAGER_RACK_HOOK = "opsmgr.inventory.interfaces.IManagerRackHook"
@entry_exit(exclude_index=[], exclude_name=[])
def add_rack(label, data_center='', room='', row='', notes=''):
"""add rack to the list of racks in the configuration managed
Args:
label: label for rack
data_center: data center location (free form)
room: Room in the data center of the rack (free form)
row: Row in the room of the rack (free form)
notes: freeform notes associated with this rack to describe its use, mgmt, etc.
Returns:
RC: integer return code
Message: string with message associated with return code
"""
_method_ = 'rack_mgr.add_rack'
label = label.strip()
message = None
session = persistent_mgr.create_database_session()
# get existing rack info for next set of checks
racks_info = persistent_mgr.get_all_racks(session)
for rack in racks_info:
if rack.label == label:
message = _(
"The rack label (%s) conflicts with a rack label in the configuration file.") \
% label
return 101, message
rack_info = Rack()
rack_info.label = label
rack_info.room = room
rack_info.row = row
rack_info.data_center = data_center
rack_info.notes = notes
hooks = _load_inventory_rack_plugins()
hook_name = 'unknown' # keeps pylint happy
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.add_rack_pre_save(rack_info)
except Exception as e:
logging.exception(e)
message = _("Error in plugin (%s). Unable to add rack: Reason: %s") % (hook_name, e)
return 102, message
persistent_mgr.add_racks(session, [rack_info])
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.add_rack_post_save(rack_info)
except Exception as e:
logging.exception(e)
message = _("After rack was added. Error in plugin (%s): %s") % (hook_name, e)
logging.info("%s::add rack(%s) success", _method_, label)
if not message:
message = _("Added rack successfully.")
session.close()
return 0, message
@entry_exit(exclude_index=[], exclude_name=[])
def list_racks(labels=None, isbriefly=False, rackids=None):
"""Get a list of racks based on the information present in that arguments.
Args:
labels: specify racks as list of labels
rackids:Specify racks as list to get the data returned limited to systems on those racks
Returns:
integer with return code
dictionary with results based on parameters
Dictionary entries:
message: any message returned for the processing of this method
column_tags: array of column tags
column_titles: array of column titles
racks: list of rack information packed in a dictionary structure
"""
all_tags = ['label', 'rackid', 'data-center', 'room', 'row', 'notes']
brief_tags = ['label']
result = {}
session = persistent_mgr.create_database_session()
# decide the set of data to return
if isbriefly:
tags = brief_tags
else:
tags = all_tags
# get rack based on labels and rack ids
if labels:
racks, _not_found_racks = persistent_mgr.get_racks_by_labels(session, labels)
elif rackids:
racks, _not_found_racks = persistent_mgr.get_racks_by_ids(session, rackids)
else:
racks = persistent_mgr.get_all_racks(session)
# check if labels returned anything if specified
if len(racks) == 0 and labels:
message = _("No racks labeled as \'%s\'") % labels
result['message'] = message
return 101, result
# already filtered by get_racks_info call
filtered_racks = racks
# add table column info
table_columns_titles = []
result['column_tags'] = tags
for tag in tags:
table_columns_titles.append(_get_racktag_text_id(tag))
result['column_titles'] = table_columns_titles
result_racks = []
for rack in filtered_racks:
rack_dict = rack.to_dict_obj()
rack_output = {}
for tag in tags:
tag_value = rack_dict.get(tag)
if tag_value is None:
tag_value = ''
rack_output[tag] = tag_value
# add final form of rack info to result
result_racks.append(rack_output)
result['racks'] = result_racks
message = ""
result['message'] = message
session.close()
return 0, result
@entry_exit(exclude_index=[], exclude_name=[])
def remove_rack(labels=None, all_racks=False, rackids=None):
'''Remove racks based on information present in the arguments
If the option rackids is specified, then the option labels will be ignored
Args:
labels List of labels of racks to remove
all_racks indicate all racks (except the manager rack (1)) to be removed.
rackids list of rack ids to remove
Returns:
ret return code
message message if provided with return code
'''
_method_ = 'rack_mgr.remove_rack'
session = persistent_mgr.create_database_session()
if labels or rackids:
all_racks = False
# get the right set of racks based on input
if rackids is not None:
racks, not_found_rack_values = persistent_mgr.get_racks_by_ids(session, rackids)
elif labels is not None:
racks, not_found_rack_values = persistent_mgr.get_racks_by_labels(session, labels)
elif all_racks:
racks = persistent_mgr.get_all_racks(session)
else:
message = \
"Error: remove_rack called without specifying to remove either a label, id or all"
return -1, message
devices = persistent_mgr.get_all_devices(session)
hooks = _load_inventory_rack_plugins()
hook_name = 'unknown' # keeps pylint happy
message = None
remove_racks = []
not_remove_racks = []
not_remove_racks_msgs = []
for rack in racks:
label = rack.label
rack_id = rack.rack_id
devices_in_rack = []
for device in devices:
if device.rack_id == rack_id:
devices_in_rack.append(device)
logging.warning("%s::found device (%s) still in rack.", _method_, device.label)
if len(devices_in_rack) > 0:
# don't allow removing if rack in use
not_remove_racks.append(rack)
not_remove_racks_msgs.append(
_("Rack (%s) has devices. Only racks without devices can be removed.") % label)
logging.warning(
"%s::rack (%s) has devices. only empty racks can be removed.", _method_, label)
continue
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.remove_rack_pre_save(rack)
except Exception as e:
logging.exception(e)
not_remove_racks.append(rack)
not_remove_racks_msgs.append(
_("Error in plugin (%s). Unable to remove resource: Reason: %s") % (hook_name, e))
continue
# ok to remove rack.
remove_racks.append(rack)
result_message = ""
ret = 0
if len(remove_racks) > 0:
persistent_mgr.delete_racks(session, remove_racks)
labels_message = resource_mgr.get_labels_message(remove_racks)
message = push_message(message, _("racks removed: %s") % labels_message)
# Call hook for remove_rack_post_save
for rack in remove_racks:
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.remove_rack_post_save(rack)
except Exception as e:
logging.exception(e)
message = push_message(message, _("After rack (%s) was removed. "
"Error in plugin (%s): %s") % (rack.label, hook_name, e))
if len(not_remove_racks) > 0:
labels_message = resource_mgr.get_labels_message(not_remove_racks)
message = push_message(message, _("racks not removed: %s") % labels_message)
for rack_msg in not_remove_racks_msgs:
message = push_message(message, rack_msg)
ret = 102
if len(not_found_rack_values) > 0:
labels_message = resource_mgr.get_labels_message(not_found_rack_values)
message = push_message(message, _("racks not found: %s") % labels_message)
ret = 101
message = push_message(message, result_message)
session.close()
return ret, message
@entry_exit(exclude_index=[], exclude_name=[])
def change_rack_properties(label=None, rackid=None, new_label=None, data_center=None,
room=None, row=None, notes=None):
''' Change the rack properties in the data store
Arguments:
label: label for the rack
rackid: rackid for the rack
new_label: new label to set for the rack
data_center: free form field to describe the data center
room: free form field for the room in the data center
row: free form field for the row in the room
notes: free form set of notes about the rack
Returns:
rc: int indicating the success (0) or failure of the method
message: nls enabled message to respond with on failure
'''
_method_ = 'rack_mgr.change_rack_properties'
logging.info("ENTRY %s", _method_)
message = None
session = persistent_mgr.create_database_session()
# check if no properties changed
properties = [new_label, data_center, room, row, notes]
if all(prop is None for prop in properties):
logging.info("EXIT %s Nothing to change", _method_)
return 0, ""
if rackid:
rack = persistent_mgr.get_rack_by_id(session, rackid)
else:
rack = persistent_mgr.get_rack_by_label(session, label)
rack_des = label if rackid is None else rackid
if not rack:
logging.error(
"%s::Failed to change rack properties, rack (%s) is not found.", _method_, rack_des)
message = _(
"Failed to change rack properties, rack (%s) is not found.") % (rack_des)
logging.info("EXIT %s rack not found", _method_)
return 101, message
if new_label is not None:
# trying to change rack label
if new_label != rack.label:
# have a different label need to check if already exists....
rack_new_label = persistent_mgr.get_rack_by_label(session, new_label)
if rack_new_label is not None:
# found the new label exists
error_message = _("Failed to change rack properties. The new rack label "
"%(new_label)s already exists.") % {"new_label": new_label}
return 102, error_message
# ok now to set the new label
rack.label = new_label
if data_center is not None:
rack.data_center = data_center
if room is not None:
rack.room = room
if row is not None:
rack.row = row
if notes is not None:
rack.notes = notes
hooks = _load_inventory_rack_plugins()
hook_name = 'unknown' # keeps pylint happy
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.change_rack_pre_save(rack)
except Exception as e:
logging.exception(e)
message = _("Error in plugin (%s). Unable to change rack: Reason: %s") % (hook_name, e)
return 102, message
persistent_mgr.update_rack(session)
try:
for hook_name, hook_plugin in hooks.items():
hook_plugin.change_rack_post_save(rack)
except Exception as e:
logging.exception(e)
message = _("After rack properties were changed, Error in plugin (%s): %s") % (hook_name, e)
logging.info("EXIT %s rack properties changed", _method_)
if not message:
message = _("Changed rack property successfully.")
session.close()
return 0, message
@entry_exit(exclude_index=[], exclude_name=[])
def get_rack_id_by_label(rack_label):
"""
Find the rack id for the rack label
Returns:
rack_id or None
"""
rack_id = None
session = persistent_mgr.create_database_session()
rack = persistent_mgr.get_rack_by_label(session, rack_label)
if rack:
rack_id = rack.rack_id
session.close()
return rack_id
def _get_racktag_text_id(tag_name):
racktag_id = {
'rackid': _('id'),
'label': _('label'),
'mgrRackId': _('manager rack'),
'role': _('role'),
'data-center': _('data center'),
'room': _('room'),
'row': _('row'),
'notes': _('notes'),
}
if tag_name in racktag_id:
return racktag_id[tag_name]
return tag_name
def _load_inventory_rack_plugins():
"""
Find the inventory rack plugins and return them as
dictonary[name]=plugin class
"""
return load_plugin_by_namespace(I_MANAGER_RACK_HOOK)
|
|
import logging
import re
import random
import hashlib
from datetime import datetime, timedelta, tzinfo
from time import time, gmtime, strftime
import os.path
from os.path import dirname
from urlparse import urljoin
from django.conf import settings
from django.db import models
from django.db.models import signals, Q, Count, Max
from django.db.models.fields.files import FieldFile, ImageFieldFile
from django.core.mail import send_mail
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.files.base import ContentFile
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.template import Context, TemplateDoesNotExist
from django.template.loader import render_to_string
from django.core.serializers.json import DjangoJSONEncoder
try:
import django.utils.simplejson as json
except ImportError: # Django 1.5 no longer bundles simplejson
import json
# HACK: Django 1.2 is missing receiver and user_logged_in
try:
from django.dispatch import receiver
from django.contrib.auth.signals import user_logged_in
except ImportError, e:
receiver = False
user_logged_in = False
try:
from tower import ugettext_lazy as _
except ImportError, e:
from django.utils.translation import ugettext_lazy as _
try:
from funfactory.urlresolvers import reverse
except ImportError, e:
from django.core.urlresolvers import reverse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from PIL import Image
except ImportError:
import Image
try:
import taggit
from taggit.managers import TaggableManager
from taggit.models import Tag, TaggedItem
except:
taggit = None
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
import badger
from .signals import (badge_will_be_awarded, badge_was_awarded,
nomination_will_be_approved, nomination_was_approved,
nomination_will_be_accepted, nomination_was_accepted,
nomination_will_be_rejected, nomination_was_rejected,
user_will_be_nominated, user_was_nominated)
OBI_VERSION = "0.5.0"
IMG_MAX_SIZE = getattr(settings, "BADGER_IMG_MAX_SIZE", (256, 256))
SITE_ISSUER = getattr(settings, 'BADGER_SITE_ISSUER', {
"origin": "http://mozilla.org",
"name": "Badger",
"org": "Mozilla",
"contact": "lorchard@mozilla.com"
})
# Set up a file system for badge uploads that can be kept separate from the
# rest of /media if necessary. Lots of hackery to ensure sensible defaults.
UPLOADS_ROOT = getattr(settings, 'BADGER_MEDIA_ROOT',
os.path.join(getattr(settings, 'MEDIA_ROOT', 'media/'), 'uploads'))
UPLOADS_URL = getattr(settings, 'BADGER_MEDIA_URL',
urljoin(getattr(settings, 'MEDIA_URL', '/media/'), 'uploads/'))
BADGE_UPLOADS_FS = FileSystemStorage(location=UPLOADS_ROOT,
base_url=UPLOADS_URL)
DEFAULT_BADGE_IMAGE = getattr(settings, 'BADGER_DEFAULT_BADGE_IMAGE',
"%s/fixtures/default-badge.png" % dirname(__file__))
DEFAULT_BADGE_IMAGE_URL = getattr(settings, 'BADGER_DEFAULT_BADGE_IMAGE_URL',
urljoin(getattr(settings, 'MEDIA_URL', '/media/'), 'img/default-badge.png'))
TIME_ZONE_OFFSET = getattr(settings, "TIME_ZONE_OFFSET", timedelta(0))
MK_UPLOAD_TMPL = '%(base)s/%(h1)s/%(h2)s/%(hash)s_%(field_fn)s_%(now)s_%(rand)04d.%(ext)s'
DEFAULT_HTTP_PROTOCOL = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
CLAIM_CODE_LENGTH = getattr(settings, "CLAIM_CODE_LENGTH", 6)
def _document_django_model(cls):
"""Adds meta fields to the docstring for better autodoccing"""
fields = cls._meta.fields
doc = cls.__doc__
if not doc.endswith('\n\n'):
doc = doc + '\n\n'
for f in fields:
doc = doc + ' :arg {0}:\n'.format(f.name)
cls.__doc__ = doc
return cls
def scale_image(img_upload, img_max_size):
"""Crop and scale an image file."""
try:
img = Image.open(img_upload)
except IOError:
return None
src_width, src_height = img.size
src_ratio = float(src_width) / float(src_height)
dst_width, dst_height = img_max_size
dst_ratio = float(dst_width) / float(dst_height)
if dst_ratio < src_ratio:
crop_height = src_height
crop_width = crop_height * dst_ratio
x_offset = int(float(src_width - crop_width) / 2)
y_offset = 0
else:
crop_width = src_width
crop_height = crop_width / dst_ratio
x_offset = 0
y_offset = int(float(src_height - crop_height) / 2)
img = img.crop((x_offset, y_offset,
x_offset + int(crop_width), y_offset + int(crop_height)))
img = img.resize((dst_width, dst_height), Image.ANTIALIAS)
if img.mode != "RGB":
img = img.convert("RGB")
new_img = StringIO()
img.save(new_img, "PNG")
img_data = new_img.getvalue()
return ContentFile(img_data)
# Taken from http://stackoverflow.com/a/4019144
def slugify(txt):
"""A custom version of slugify that retains non-ascii characters. The
purpose of this function in the application is to make URLs more readable
in a browser, so there are some added heuristics to retain as much of the
title meaning as possible while excluding characters that are troublesome
to read in URLs. For example, question marks will be seen in the browser
URL as %3F and are thereful unreadable. Although non-ascii characters will
also be hex-encoded in the raw URL, most browsers will display them as
human-readable glyphs in the address bar -- those should be kept in the
slug."""
# remove trailing whitespace
txt = txt.strip()
# remove spaces before and after dashes
txt = re.sub('\s*-\s*','-', txt, re.UNICODE)
# replace remaining spaces with dashes
txt = re.sub('[\s/]', '-', txt, re.UNICODE)
# replace colons between numbers with dashes
txt = re.sub('(\d):(\d)', r'\1-\2', txt, re.UNICODE)
# replace double quotes with single quotes
txt = re.sub('"', "'", txt, re.UNICODE)
# remove some characters altogether
txt = re.sub(r'[?,:!@#~`+=$%^&\\*()\[\]{}<>]','',txt, re.UNICODE)
return txt
def get_permissions_for(self, user):
"""Mixin method to collect permissions for a model instance"""
pre = 'allows_'
pre_len = len(pre)
methods = (m for m in dir(self) if m.startswith(pre))
perms = dict(
(m[pre_len:], getattr(self, m)(user))
for m in methods
)
return perms
def mk_upload_to(field_fn, ext, tmpl=MK_UPLOAD_TMPL):
"""upload_to builder for file upload fields"""
def upload_to(instance, filename):
base, slug = instance.get_upload_meta()
slug_hash = (hashlib.md5(slug.encode('utf-8', 'ignore'))
.hexdigest())
return tmpl % dict(now=int(time()), rand=random.randint(0, 1000),
slug=slug[:50], base=base, field_fn=field_fn,
pk=instance.pk,
hash=slug_hash, h1=slug_hash[0], h2=slug_hash[1],
ext=ext)
return upload_to
class JSONField(models.TextField):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly
see: http://djangosnippets.org/snippets/1478/
"""
# Used so to_python() is called
__metaclass__ = models.SubfieldBase
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if not value:
return dict()
try:
if (isinstance(value, basestring) or
type(value) is unicode):
return json.loads(value)
except ValueError:
return dict()
return value
def get_db_prep_save(self, value, connection):
"""Convert our JSON object to a string before we save"""
if not value:
return '{}'
if isinstance(value, dict):
value = json.dumps(value, cls=DjangoJSONEncoder)
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
# Tell South that this field isn't all that special
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^badger.models.JSONField"])
except ImportError, e:
pass
class SearchManagerMixin(object):
"""Quick & dirty manager mixin for search"""
# See: http://www.julienphalip.com/blog/2008/08/16/adding-search-django-site-snap/
def _normalize_query(self, query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
# See: http://www.julienphalip.com/blog/2008/08/16/adding-search-django-site-snap/
def _get_query(self, query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
'''
query = None # Query to search for every search term
terms = self._normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def search(self, query_string, sort='title'):
"""Quick and dirty keyword search on submissions"""
# TODO: Someday, replace this with something like Sphinx or another real search engine
strip_qs = query_string.strip()
if not strip_qs:
return self.all_sorted(sort).order_by('-modified')
else:
query = self._get_query(strip_qs, self.search_fields)
return self.all_sorted(sort).filter(query).order_by('-modified')
def all_sorted(self, sort=None):
"""Apply to .all() one of the sort orders supported for views"""
queryset = self.all()
if sort == 'title':
return queryset.order_by('title')
else:
return queryset.order_by('-created')
class BadgerException(Exception):
"""General Badger model exception"""
class BadgeException(BadgerException):
"""Badge model exception"""
class BadgeAwardNotAllowedException(BadgeException):
"""Attempt to award a badge not allowed."""
class BadgeAlreadyAwardedException(BadgeException):
"""Attempt to award a unique badge twice."""
class BadgeDeferredAwardManagementNotAllowedException(BadgeException):
"""Attempt to manage deferred awards not allowed."""
class BadgeManager(models.Manager, SearchManagerMixin):
"""Manager for Badge model objects"""
search_fields = ('title', 'slug', 'description', )
def allows_add_by(self, user):
if user.is_anonymous():
return False
if getattr(settings, "BADGER_ALLOW_ADD_BY_ANYONE", False):
return True
if user.has_perm('badger.add_badge'):
return True
return False
def allows_grant_by(self, user):
if user.is_anonymous():
return False
if user.has_perm('badger.grant_deferredaward'):
return True
return False
def top_tags(self, min_count=2, limit=20):
"""Assemble list of top-used tags"""
if not taggit:
return []
# TODO: There has got to be a better way to do this. I got lost in
# Django model bits, though.
# Gather list of tags sorted by use frequency
ct = ContentType.objects.get_for_model(Badge)
tag_counts = (TaggedItem.objects
.values('tag')
.annotate(count=Count('id'))
.filter(content_type=ct, count__gte=min_count)
.order_by('-count'))[:limit]
# Gather set of tag IDs from list
tag_ids = set(x['tag'] for x in tag_counts)
# Gather and map tag objects to IDs
tags_by_id = dict((x.pk, x)
for x in Tag.objects.filter(pk__in=tag_ids))
# Join tag objects up with counts
tags_with_counts = [
dict(count=x['count'], tag=tags_by_id[x['tag']])
for x in tag_counts]
return tags_with_counts
@_document_django_model
class Badge(models.Model):
"""Representation of a badge"""
objects = BadgeManager()
title = models.CharField(max_length=255, blank=False, unique=True,
help_text="Short, descriptive title")
slug = models.SlugField(blank=False, unique=True,
help_text="Very short name, for use in URLs and links")
description = models.TextField(blank=True,
help_text="Longer description of the badge and its criteria")
image = models.ImageField(blank=True, null=True,
storage=BADGE_UPLOADS_FS, upload_to=mk_upload_to('image','png'),
help_text="Upload an image to represent the badge")
prerequisites = models.ManyToManyField('self', symmetrical=False,
blank=True, null=True,
help_text="When all of the selected badges have been awarded, this "
"badge will be automatically awarded.")
# TODO: Rename? Eventually we'll want a globally-unique badge. That is, one
# unique award for one person for the whole site.
unique = models.BooleanField(default=True,
help_text="Should awards of this badge be limited to "
"one-per-person?")
nominations_accepted = models.BooleanField(default=True, blank=True,
help_text="Should this badge accept nominations from "
"other users?")
nominations_autoapproved = models.BooleanField(default=False, blank=True,
help_text="Should all nominations be automatically approved?")
if taggit:
tags = TaggableManager(blank=True)
creator = models.ForeignKey(User, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True, blank=False)
modified = models.DateTimeField(auto_now=True, blank=False)
class Meta:
unique_together = ('title', 'slug')
ordering = ['-modified', '-created']
permissions = (
("manage_deferredawards",
"Can manage deferred awards for this badge"),
)
get_permissions_for = get_permissions_for
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('badger.views.detail', args=(self.slug,))
def get_upload_meta(self):
return ("badge", self.slug)
def clean(self):
if self.image:
scaled_file = scale_image(self.image.file, IMG_MAX_SIZE)
if not scaled_file:
raise ValidationError(_('Cannot process image'))
self.image.file = scaled_file
def save(self, **kwargs):
"""Save the submission, updating slug and screenshot thumbnails"""
if not self.slug:
self.slug = slugify(self.title)
super(Badge, self).save(**kwargs)
if notification:
if self.creator:
notification.send([self.creator], 'badge_edited',
dict(badge=self,
protocol=DEFAULT_HTTP_PROTOCOL))
def delete(self, **kwargs):
"""Make sure deletes cascade to awards"""
self.award_set.all().delete()
super(Badge, self).delete(**kwargs)
def allows_detail_by(self, user):
# TODO: Need some logic here, someday.
return True
def allows_edit_by(self, user):
if user.is_anonymous():
return False
if user.has_perm('badger.change_badge'):
return True
if user == self.creator:
return True
return False
def allows_delete_by(self, user):
if user.is_anonymous():
return False
if user.has_perm('badger.change_badge'):
return True
if user == self.creator:
return True
return False
def allows_award_to(self, user):
"""Is award_to() allowed for this user?"""
if None == user:
return True
if user.is_anonymous():
return False
if user.is_staff or user.is_superuser:
return True
if user == self.creator:
return True
# TODO: List of delegates for whom awarding is allowed
return False
def allows_manage_deferred_awards_by(self, user):
"""Can this user manage deferred awards"""
if user.is_anonymous():
return False
if user.has_perm('badger.manage_deferredawards'):
return True
if user == self.creator:
return True
return False
def generate_deferred_awards(self, user, amount=10, reusable=False):
"""Generate a number of deferred awards with a claim group code"""
if not self.allows_manage_deferred_awards_by(user):
raise BadgeDeferredAwardManagementNotAllowedException()
return (DeferredAward.objects.generate(self, user, amount, reusable))
def get_claim_group(self, claim_group):
"""Get all the deferred awards for a claim group code"""
return DeferredAward.objects.filter(claim_group=claim_group)
def delete_claim_group(self, user, claim_group):
"""Delete all the deferred awards for a claim group code"""
if not self.allows_manage_deferred_awards_by(user):
raise BadgeDeferredAwardManagementNotAllowedException()
self.get_claim_group(claim_group).delete()
@property
def claim_groups(self):
"""Produce a list of claim group IDs available"""
return DeferredAward.objects.get_claim_groups(badge=self)
def award_to(self, awardee=None, email=None, awarder=None,
description='', raise_already_awarded=False):
"""Award this badge to the awardee on the awarder's behalf"""
# If no awarder given, assume this is on the badge creator's behalf.
if not awarder:
awarder = self.creator
if not self.allows_award_to(awarder):
raise BadgeAwardNotAllowedException()
# If we have an email, but no awardee, try looking up the user.
if email and not awardee:
qs = User.objects.filter(email=email)
if not qs:
# If there's no user for this email address, create a
# DeferredAward for future claiming.
if self.unique and DeferredAward.objects.filter(
badge=self, email=email).exists():
raise BadgeAlreadyAwardedException()
da = DeferredAward(badge=self, email=email)
da.save()
return da
# Otherwise, we'll use the most recently created user
awardee = qs.latest('date_joined')
if self.unique and self.is_awarded_to(awardee):
if raise_already_awarded:
raise BadgeAlreadyAwardedException()
else:
return Award.objects.filter(user=awardee, badge=self)[0]
return Award.objects.create(user=awardee, badge=self,
creator=awarder,
description=description)
def check_prerequisites(self, awardee, dep_badge, award):
"""Check the prerequisites for this badge. If they're all met, award
this badge to the user."""
if self.is_awarded_to(awardee):
# Not unique, but badge auto-award from prerequisites should only
# happen once.
return None
for badge in self.prerequisites.all():
if not badge.is_awarded_to(awardee):
# Bail on the first unmet prerequisites
return None
return self.award_to(awardee)
def is_awarded_to(self, user):
"""Has this badge been awarded to the user?"""
return Award.objects.filter(user=user, badge=self).count() > 0
def progress_for(self, user):
"""Get or create (but not save) a progress record for a user"""
try:
# Look for an existing progress record...
p = Progress.objects.get(user=user, badge=self)
except Progress.DoesNotExist:
# If none found, create a new one but don't save it yet.
p = Progress(user=user, badge=self)
return p
def allows_nominate_for(self, user):
"""Is nominate_for() allowed for this user?"""
if not self.nominations_accepted:
return False
if None == user:
return True
if user.is_anonymous():
return False
if user.is_staff or user.is_superuser:
return True
if user == self.creator:
return True
# TODO: Flag to enable / disable nominations from anyone
# TODO: List of delegates from whom nominations are accepted
return True
def nominate_for(self, nominee, nominator=None):
"""Nominate a nominee for this badge on the nominator's behalf"""
nomination = Nomination.objects.create(badge=self, creator=nominator,
nominee=nominee)
if notification:
if self.creator:
notification.send([self.creator], 'nomination_submitted',
dict(nomination=nomination,
protocol=DEFAULT_HTTP_PROTOCOL))
if self.nominations_autoapproved:
nomination.approve_by(self.creator)
return nomination
def is_nominated_for(self, user):
return Nomination.objects.filter(nominee=user, badge=self).count() > 0
def as_obi_serialization(self, request=None):
"""Produce an Open Badge Infrastructure serialization of this badge"""
if request:
base_url = request.build_absolute_uri('/')[:-1]
else:
base_url = 'http://%s' % (Site.objects.get_current().domain,)
# see: https://github.com/brianlovesdata/openbadges/wiki/Assertions
if not self.creator:
issuer = SITE_ISSUER
else:
issuer = {
# TODO: Get from user profile instead?
"origin": urljoin(base_url, self.creator.get_absolute_url()),
"name": self.creator.username,
"contact": self.creator.email
}
data = {
# The version of the spec/hub this manifest is compatible with. Use
# "0.5.0" for the beta.
"version": OBI_VERSION,
# TODO: truncate more intelligently
"name": self.title[:128],
# TODO: truncate more intelligently
"description": self.description[:128] or self.title[:128],
"criteria": urljoin(base_url, self.get_absolute_url()),
"issuer": issuer
}
image_url = self.image and self.image.url or DEFAULT_BADGE_IMAGE_URL
data['image'] = urljoin(base_url, image_url)
return data
class AwardManager(models.Manager):
def get_query_set(self):
return super(AwardManager, self).get_query_set().exclude(hidden=True)
@_document_django_model
class Award(models.Model):
"""Representation of a badge awarded to a user"""
admin_objects = models.Manager()
objects = AwardManager()
description = models.TextField(blank=True,
help_text="Explanation and evidence for the badge award")
badge = models.ForeignKey(Badge)
image = models.ImageField(blank=True, null=True,
storage=BADGE_UPLOADS_FS,
upload_to=mk_upload_to('image','png'))
claim_code = models.CharField(max_length=32, blank=True,
default='', unique=False, db_index=True,
help_text="Code used to claim this award")
user = models.ForeignKey(User, related_name="award_user")
creator = models.ForeignKey(User, related_name="award_creator",
blank=True, null=True)
hidden = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, blank=False)
modified = models.DateTimeField(auto_now=True, blank=False)
get_permissions_for = get_permissions_for
class Meta:
ordering = ['-modified', '-created']
def __unicode__(self):
by = self.creator and (u' by %s' % self.creator) or u''
return u'Award of %s to %s%s' % (self.badge, self.user, by)
@models.permalink
def get_absolute_url(self):
return ('badger.views.award_detail', (self.badge.slug, self.pk))
def get_upload_meta(self):
u = self.user.username
return ("award/%s/%s/%s" % (u[0], u[1], u), self.badge.slug)
def allows_detail_by(self, user):
# TODO: Need some logic here, someday.
return True
def allows_delete_by(self, user):
if user.is_anonymous():
return False
if user == self.user:
return True
if user == self.creator:
return True
if user.has_perm('badger.change_award'):
return True
return False
def save(self, *args, **kwargs):
# Signals and some bits of logic only happen on a new award.
is_new = not self.pk
if is_new:
# Bail if this is an attempt to double-award a unique badge
if self.badge.unique and self.badge.is_awarded_to(self.user):
raise BadgeAlreadyAwardedException()
# Only fire will-be-awarded signal on a new award.
badge_will_be_awarded.send(sender=self.__class__, award=self)
super(Award, self).save(*args, **kwargs)
# Called after super.save(), so we have some auto-gen fields
if badger.settings.BAKE_AWARD_IMAGES:
self.bake_obi_image()
if is_new:
# Only fire was-awarded signal on a new award.
badge_was_awarded.send(sender=self.__class__, award=self)
if notification:
if self.creator:
notification.send([self.badge.creator], 'badge_awarded',
dict(award=self,
protocol=DEFAULT_HTTP_PROTOCOL))
notification.send([self.user], 'award_received',
dict(award=self,
protocol=DEFAULT_HTTP_PROTOCOL))
# Since this badge was just awarded, check the prerequisites on all
# badges that count this as one.
for dep_badge in self.badge.badge_set.all():
dep_badge.check_prerequisites(self.user, self.badge, self)
# Reset any progress for this user & badge upon award.
Progress.objects.filter(user=self.user, badge=self.badge).delete()
def delete(self):
"""Make sure nominations get deleted along with awards"""
Nomination.objects.filter(award=self).delete()
super(Award, self).delete()
def as_obi_assertion(self, request=None):
badge_data = self.badge.as_obi_serialization(request)
if request:
base_url = request.build_absolute_uri('/')[:-1]
else:
base_url = 'http://%s' % (Site.objects.get_current().domain,)
# If this award has a creator (ie. not system-issued), tweak the issuer
# data to reflect award creator.
# TODO: Is this actually a good idea? Or should issuer be site-wide
if self.creator:
badge_data['issuer'] = {
# TODO: Get from user profile instead?
"origin": base_url,
"name": self.creator.username,
"contact": self.creator.email
}
# see: https://github.com/brianlovesdata/openbadges/wiki/Assertions
# TODO: This salt is stable, and the badge.pk is generally not
# disclosed anywhere, but is it obscured enough?
hash_salt = (hashlib.md5('%s-%s' % (self.badge.pk, self.pk))
.hexdigest())
recipient_text = '%s%s' % (self.user.email, hash_salt)
recipient_hash = ('sha256$%s' % hashlib.sha256(recipient_text)
.hexdigest())
assertion = {
"recipient": recipient_hash,
"salt": hash_salt,
"evidence": urljoin(base_url, self.get_absolute_url()),
# TODO: implement award expiration
# "expires": self.expires.strftime('%s'),
"issued_on": self.created.strftime('%s'),
"badge": badge_data
}
return assertion
def bake_obi_image(self, request=None):
"""Bake the OBI JSON badge award assertion into a copy of the original
badge's image, if one exists."""
if request:
base_url = request.build_absolute_uri('/')
else:
base_url = 'http://%s' % (Site.objects.get_current().domain,)
if self.badge.image:
# Make a duplicate of the badge image
self.badge.image.open()
img_copy_fh = StringIO(self.badge.image.file.read())
else:
# Make a copy of the default badge image
img_copy_fh = StringIO(open(DEFAULT_BADGE_IMAGE, 'rb').read())
try:
# Try processing the image copy, bail if the image is bad.
img = Image.open(img_copy_fh)
except IOError, e:
return False
# Here's where the baking gets done. JSON representation of the OBI
# assertion gets written into the "openbadges" metadata field
# see: http://blog.client9.com/2007/08/python-pil-and-png-metadata-take-2.html
# see: https://github.com/mozilla/openbadges/blob/development/lib/baker.js
# see: https://github.com/mozilla/openbadges/blob/development/controllers/baker.js
try:
from PIL import PngImagePlugin
except ImportError,e:
import PngImagePlugin
meta = PngImagePlugin.PngInfo()
# TODO: Will need this, if we stop doing hosted assertions
# assertion = self.as_obi_assertion(request)
# meta.add_text('openbadges', json.dumps(assertion))
hosted_assertion_url = '%s%s' % (
base_url, reverse('badger.award_detail_json',
args=(self.badge.slug, self.id)))
meta.add_text('openbadges', hosted_assertion_url)
# And, finally save out the baked image.
new_img = StringIO()
img.save(new_img, "PNG", pnginfo=meta)
img_data = new_img.getvalue()
name_before = self.image.name
self.image.save('', ContentFile(img_data), False)
if (self.image.storage.exists(name_before)):
self.image.storage.delete(name_before)
# Update the image field with the new image name
# NOTE: Can't do a full save(), because this gets called in save()
Award.objects.filter(pk=self.pk).update(image=self.image)
return True
@property
def nomination(self):
"""Find the nomination behind this award, if any."""
# TODO: This should really be a foreign key relation, someday.
try:
return Nomination.objects.get(award=self)
except:
return None
class ProgressManager(models.Manager):
pass
class Progress(models.Model):
"""Record tracking progress toward auto-award of a badge"""
badge = models.ForeignKey(Badge)
user = models.ForeignKey(User, related_name="progress_user")
percent = models.FloatField(default=0)
counter = models.FloatField(default=0, blank=True, null=True)
notes = JSONField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True, blank=False)
modified = models.DateTimeField(auto_now=True, blank=False)
class Meta:
unique_together = ('badge', 'user')
verbose_name_plural = "Progresses"
get_permissions_for = get_permissions_for
def __unicode__(self):
perc = self.percent and (' (%s%s)' % (self.percent, '%')) or ''
return u'Progress toward %s by %s%s' % (self.badge, self.user, perc)
def save(self, *args, **kwargs):
"""Save the progress record, with before and after signals"""
# Signals and some bits of logic only happen on a new award.
is_new = not self.pk
# Bail if this is an attempt to double-award a unique badge
if (is_new and self.badge.unique and
self.badge.is_awarded_to(self.user)):
raise BadgeAlreadyAwardedException()
super(Progress, self).save(*args, **kwargs)
# If the percent is over/equal to 1.0, auto-award on save.
if self.percent >= 100:
self.badge.award_to(self.user)
def _quiet_save(self, raise_exception=False):
try:
self.save()
except BadgeAlreadyAwardedException, e:
if raise_exception:
raise e
def update_percent(self, current, total=None, raise_exception=False):
"""Update the percent completion value."""
if total is None:
value = current
else:
value = (float(current) / float(total)) * 100.0
self.percent = value
self._quiet_save(raise_exception)
def increment_by(self, amount, raise_exception=False):
# TODO: Do this with an UPDATE counter+amount in DB
self.counter += amount
self._quiet_save(raise_exception)
return self
def decrement_by(self, amount, raise_exception=False):
# TODO: Do this with an UPDATE counter-amount in DB
self.counter -= amount
self._quiet_save(raise_exception)
return self
class DeferredAwardManager(models.Manager):
def get_claim_groups(self, badge):
"""Build a list of all known claim group IDs for a badge"""
qs = (self.filter(badge=badge)
.values('claim_group').distinct().all()
.annotate(modified=Max('modified'), count=Count('id')))
return [x
for x in qs
if x['claim_group']]
def generate(self, badge, user=None, amount=10, reusable=False):
"""Generate a number of deferred awards for a badge"""
claim_group = '%s-%s' % (time(), random.randint(0, 10000))
for i in range(0, amount):
(DeferredAward(badge=badge, creator=user, reusable=reusable,
claim_group=claim_group).save())
return claim_group
def claim_by_email(self, awardee):
"""Claim all deferred awards that match the awardee's email"""
return self._claim_qs(awardee, self.filter(email=awardee.email))
def claim_by_code(self, awardee, code):
"""Claim a deferred award by code for the awardee"""
return self._claim_qs(awardee, self.filter(claim_code=code))
def _claim_qs(self, awardee, qs):
"""Claim all the deferred awards that match the queryset"""
for da in qs:
da.claim(awardee)
def make_random_code():
"""Generare a random code, using a set of alphanumeric characters that
attempts to avoid ambiguously similar shapes."""
s = '3479acefhjkmnprtuvwxy'
return ''.join([random.choice(s) for x in range(CLAIM_CODE_LENGTH)])
class DeferredAwardGrantNotAllowedException(BadgerException):
"""Attempt to grant a DeferredAward not allowed"""
@_document_django_model
class DeferredAward(models.Model):
"""Deferred award, can be converted into into a real award."""
objects = DeferredAwardManager()
badge = models.ForeignKey(Badge)
description = models.TextField(blank=True)
reusable = models.BooleanField(default=False)
email = models.EmailField(blank=True, null=True, db_index=True)
claim_code = models.CharField(max_length=32,
default=make_random_code, unique=True, db_index=True)
claim_group = models.CharField(max_length=32, blank=True, null=True,
db_index=True)
creator = models.ForeignKey(User, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True, blank=False)
modified = models.DateTimeField(auto_now=True, blank=False)
class Meta:
ordering = ['-modified', '-created']
permissions = (
("grant_deferredaward",
"Can grant deferred award to an email address"),
)
get_permissions_for = get_permissions_for
def allows_detail_by(self, user):
# TODO: Need some logic here, someday.
return True
def allows_claim_by(self, user):
if user.is_anonymous():
return False
# TODO: Need some logic here, someday.
# TODO: Could enforce that the user.email == self.email, but I want to
# allow for people with multiple email addresses. That is, I get an
# award claim invite sent to lorchard@mozilla.com, but I claim it while
# signed in as me@lmorchard.com. Warning displayed in the view.
return True
def allows_grant_by(self, user):
if user.is_anonymous():
return False
if user.has_perm('badger.grant_deferredaward'):
return True
if self.badge.allows_award_to(user):
return True
if user == self.creator:
return True
return False
def get_claim_url(self):
"""Get the URL to a page where this DeferredAward can be claimed."""
return reverse('badger.views.claim_deferred_award',
args=(self.claim_code,))
def save(self, **kwargs):
"""Save the DeferredAward, sending a claim email if it's new"""
is_new = not self.pk
has_existing_deferreds = False
if self.email:
has_existing_deferreds = DeferredAward.objects.filter(
email=self.email).exists()
super(DeferredAward, self).save(**kwargs)
if is_new and self.email and not has_existing_deferreds:
try:
# If this is new and there's an email, send an invite to claim.
context = Context(dict(
deferred_award=self,
badge=self.badge,
protocol=DEFAULT_HTTP_PROTOCOL,
current_site=Site.objects.get_current()
))
tmpl_name = 'badger/deferred_award_%s.txt'
subject = render_to_string(tmpl_name % 'subject', {}, context)
body = render_to_string(tmpl_name % 'body', {}, context)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,
[self.email], fail_silently=False)
except TemplateDoesNotExist, e:
pass
def claim(self, awardee):
"""Claim the deferred award for the given user"""
try:
award = self.badge.award_to(awardee=awardee, awarder=self.creator)
award.claim_code = self.claim_code
award.save()
except (BadgeAlreadyAwardedException,
BadgeAwardNotAllowedException), e:
# Just swallow up and ignore any issues in awarding.
award = None
if not self.reusable:
# Self-destruct, if not made reusable.
self.delete()
return award
def grant_to(self, email, granter):
"""Grant this deferred award to the given email"""
if not self.allows_grant_by(granter):
raise DeferredAwardGrantNotAllowedException()
if not self.reusable:
# If not reusable, reassign email and regenerate claim code.
self.email = email
self.claim_code = make_random_code()
self.save()
return self
else:
# If reusable, create a clone and leave this deferred award alone.
new_da = DeferredAward(badge=self.badge, email=email,
creator=granter, reusable=False)
new_da.save()
return new_da
class NominationException(BadgerException):
"""Nomination model exception"""
class NominationApproveNotAllowedException(NominationException):
"""Attempt to approve a nomination was disallowed"""
class NominationAcceptNotAllowedException(NominationException):
"""Attempt to accept a nomination was disallowed"""
class NominationRejectNotAllowedException(NominationException):
"""Attempt to reject a nomination was disallowed"""
class NominationManager(models.Manager):
pass
@_document_django_model
class Nomination(models.Model):
"""Representation of a user nominated by another user for a badge"""
objects = NominationManager()
badge = models.ForeignKey(Badge)
nominee = models.ForeignKey(User, related_name="nomination_nominee",
blank=False, null=False)
accepted = models.BooleanField(default=False)
creator = models.ForeignKey(User, related_name="nomination_creator",
blank=True, null=True)
approver = models.ForeignKey(User, related_name="nomination_approver",
blank=True, null=True)
rejected_by = models.ForeignKey(User, related_name="nomination_rejected_by",
blank=True, null=True)
rejected_reason = models.TextField(blank=True)
award = models.ForeignKey(Award, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True, blank=False)
modified = models.DateTimeField(auto_now=True, blank=False)
get_permissions_for = get_permissions_for
def __unicode__(self):
return u'Nomination for %s to %s by %s' % (self.badge, self.nominee,
self.creator)
def get_absolute_url(self):
return reverse('badger.views.nomination_detail',
args=(self.badge.slug, self.id))
def save(self, *args, **kwargs):
# Signals and some bits of logic only happen on a new nomination.
is_new = not self.pk
# Bail if this is an attempt to double-award a unique badge
if (is_new and self.badge.unique and
self.badge.is_awarded_to(self.nominee)):
raise BadgeAlreadyAwardedException()
if is_new:
user_will_be_nominated.send(sender=self.__class__,
nomination=self)
if self.is_approved and self.is_accepted:
self.award = self.badge.award_to(self.nominee, self.approver)
super(Nomination, self).save(*args, **kwargs)
if is_new:
user_was_nominated.send(sender=self.__class__,
nomination=self)
def allows_detail_by(self, user):
if (user.is_staff or
user.is_superuser or
user == self.badge.creator or
user == self.nominee or
user == self.creator ):
return True
# TODO: List of delegates empowered by badge creator to approve nominations
return False
@property
def is_approved(self):
"""Has this nomination been approved?"""
return self.approver is not None
def allows_approve_by(self, user):
if self.is_approved or self.is_rejected:
return False
if user.is_staff or user.is_superuser:
return True
if user == self.badge.creator:
return True
# TODO: List of delegates empowered by badge creator to approve nominations
return False
def approve_by(self, approver):
"""Approve this nomination.
Also awards, if already accepted."""
if not self.allows_approve_by(approver):
raise NominationApproveNotAllowedException()
self.approver = approver
nomination_will_be_approved.send(sender=self.__class__,
nomination=self)
self.save()
nomination_was_approved.send(sender=self.__class__,
nomination=self)
if notification:
if self.badge.creator:
notification.send([self.badge.creator], 'nomination_approved',
dict(nomination=self,
protocol=DEFAULT_HTTP_PROTOCOL))
if self.creator:
notification.send([self.creator], 'nomination_approved',
dict(nomination=self,
protocol=DEFAULT_HTTP_PROTOCOL))
notification.send([self.nominee], 'nomination_received',
dict(nomination=self,
protocol=DEFAULT_HTTP_PROTOCOL))
return self
@property
def is_accepted(self):
"""Has this nomination been accepted?"""
return self.accepted
def allows_accept(self, user):
if self.is_accepted or self.is_rejected:
return False
if user.is_staff or user.is_superuser:
return True
if user == self.nominee:
return True
return False
def accept(self, user):
"""Accept this nomination for the nominee.
Also awards, if already approved."""
if not self.allows_accept(user):
raise NominationAcceptNotAllowedException()
self.accepted = True
nomination_will_be_accepted.send(sender=self.__class__,
nomination=self)
self.save()
nomination_was_accepted.send(sender=self.__class__,
nomination=self)
if notification:
if self.badge.creator:
notification.send([self.badge.creator], 'nomination_accepted',
dict(nomination=self,
protocol=DEFAULT_HTTP_PROTOCOL))
if self.creator:
notification.send([self.creator], 'nomination_accepted',
dict(nomination=self,
protocol=DEFAULT_HTTP_PROTOCOL))
return self
@property
def is_rejected(self):
"""Has this nomination been rejected?"""
return self.rejected_by is not None
def allows_reject_by(self, user):
if self.is_approved or self.is_rejected:
return False
if user.is_staff or user.is_superuser:
return True
if user == self.nominee:
return True
if user == self.badge.creator:
return True
return False
def reject_by(self, user, reason=''):
if not self.allows_reject_by(user):
raise NominationRejectNotAllowedException()
self.rejected_by = user
self.rejected_reason = reason
nomination_will_be_rejected.send(sender=self.__class__,
nomination=self)
self.save()
nomination_was_rejected.send(sender=self.__class__,
nomination=self)
if notification:
if self.badge.creator:
notification.send([self.badge.creator], 'nomination_rejected',
dict(nomination=self,
protocol=DEFAULT_HTTP_PROTOCOL))
if self.creator:
notification.send([self.creator], 'nomination_rejected',
dict(nomination=self,
protocol=DEFAULT_HTTP_PROTOCOL))
return self
# HACK: Django 1.2 is missing receiver and user_logged_in
if receiver and user_logged_in:
@receiver(user_logged_in)
def claim_on_login(sender, request, user, **kwargs):
"""When a user logs in, claim any deferred awards by email"""
DeferredAward.objects.claim_by_email(user)
|
|
import re
from binascii import unhexlify
from collections import namedtuple
from itertools import starmap
from streamlink.compat import urljoin, urlparse
__all__ = ["load", "M3U8Parser"]
# EXT-X-BYTERANGE
ByteRange = namedtuple("ByteRange", "range offset")
# EXT-X-KEY
Key = namedtuple("Key", "method uri iv key_format key_format_versions")
# EXT-X-MAP
Map = namedtuple("Map", "uri byterange")
# EXT-X-MEDIA
Media = namedtuple("Media", "uri type group_id language name default "
"autoselect forced characteristics")
# EXT-X-START
Start = namedtuple("Start", "time_offset precise")
# EXT-X-STREAM-INF
StreamInfo = namedtuple("StreamInfo", "bandwidth program_id codecs resolution "
"audio video subtitles")
# EXT-X-I-FRAME-STREAM-INF
IFrameStreamInfo = namedtuple("IFrameStreamInfo", "bandwidth program_id "
"codecs resolution video")
Playlist = namedtuple("Playlist", "uri stream_info media is_iframe")
Resolution = namedtuple("Resolution", "width height")
Segment = namedtuple("Segment", "uri duration title key discontinuity "
"byterange date map")
ATTRIBUTE_REGEX = (r"([A-Z\-]+)=(\d+\.\d+|0x[0-9A-z]+|\d+x\d+|\d+|"
r"\"(.+?)\"|[0-9A-z\-]+)")
class M3U8(object):
def __init__(self):
self.is_endlist = False
self.is_master = False
self.allow_cache = None
self.discontinuity_sequence = None
self.iframes_only = None
self.media_sequence = None
self.playlist_type = None
self.target_duration = None
self.start = None
self.version = None
self.media = []
self.playlists = []
self.segments = []
class M3U8Parser(object):
def __init__(self, base_uri=None):
self.base_uri = base_uri
def create_stream_info(self, streaminf, cls=None):
program_id = streaminf.get("PROGRAM-ID")
if program_id:
program_id = int(program_id)
bandwidth = streaminf.get("BANDWIDTH")
if bandwidth:
bandwidth = float(bandwidth)
resolution = streaminf.get("RESOLUTION")
if resolution:
resolution = self.parse_resolution(resolution)
codecs = streaminf.get("CODECS")
if codecs:
codecs = codecs.split(",")
else:
codecs = []
if cls == IFrameStreamInfo:
return IFrameStreamInfo(bandwidth, program_id, codecs, resolution,
streaminf.get("VIDEO"))
else:
return StreamInfo(bandwidth, program_id, codecs, resolution,
streaminf.get("AUDIO"), streaminf.get("VIDEO"),
streaminf.get("SUBTITLES"))
def split_tag(self, line):
match = re.match("#(?P<tag>[\w-]+)(:(?P<value>.+))?", line)
if match:
return match.group("tag"), match.group("value").strip()
return None, None
def parse_attributes(self, value):
def map_attribute(key, value, quoted):
return (key, quoted or value)
attr = re.findall(ATTRIBUTE_REGEX, value)
return dict(starmap(map_attribute, attr))
def parse_bool(self, value):
return value == "YES"
def parse_byterange(self, value):
match = re.match("(?P<range>\d+)(@(?P<offset>.+))?", value)
if match:
return ByteRange(int(match.group("range")),
int(match.group("offset") or 0))
def parse_extinf(self, value):
match = re.match("(?P<duration>\d+(\.\d+)?)(,(?P<title>.+))?", value)
if match:
return float(match.group("duration")), match.group("title")
def parse_hex(self, value):
value = value[2:]
if len(value) % 2:
value = "0" + value
return unhexlify(value)
def parse_resolution(self, value):
match = re.match("(\d+)x(\d+)", value)
if match:
width, height = int(match.group(1)), int(match.group(2))
else:
width, height = 0, 0
return Resolution(width, height)
def parse_tag(self, line, transform=None):
tag, value = self.split_tag(line)
if transform:
value = transform(value)
return value
def parse_line(self, lineno, line):
if lineno == 0 and not line.startswith("#EXTM3U"):
raise ValueError("Missing #EXTM3U header")
if not line.startswith("#"):
if self.state.pop("expect_segment", None):
byterange = self.state.pop("byterange", None)
extinf = self.state.pop("extinf", (0, None))
date = self.state.pop("date", None)
map_ = self.state.get("map")
key = self.state.get("key")
segment = Segment(self.uri(line), extinf[0],
extinf[1], key,
self.state.pop("discontinuity", False),
byterange, date, map_)
self.m3u8.segments.append(segment)
elif self.state.pop("expect_playlist", None):
streaminf = self.state.pop("streaminf", {})
stream_info = self.create_stream_info(streaminf)
playlist = Playlist(self.uri(line), stream_info, [], False)
self.m3u8.playlists.append(playlist)
elif line.startswith("#EXTINF"):
self.state["expect_segment"] = True
self.state["extinf"] = self.parse_tag(line, self.parse_extinf)
elif line.startswith("#EXT-X-BYTERANGE"):
self.state["expect_segment"] = True
self.state["byterange"] = self.parse_tag(line, self.parse_byterange)
elif line.startswith("#EXT-X-TARGETDURATION"):
self.m3u8.target_duration = self.parse_tag(line, int)
elif line.startswith("#EXT-X-MEDIA-SEQUENCE"):
self.m3u8.media_sequence = self.parse_tag(line, int)
elif line.startswith("#EXT-X-KEY"):
attr = self.parse_tag(line, self.parse_attributes)
iv = attr.get("IV")
if iv: iv = self.parse_hex(iv)
self.state["key"] = Key(attr.get("METHOD"),
self.uri(attr.get("URI")),
iv, attr.get("KEYFORMAT"),
attr.get("KEYFORMATVERSIONS"))
elif line.startswith("#EXT-X-PROGRAM-DATE-TIME"):
self.state["date"] = self.parse_tag(line)
elif line.startswith("#EXT-X-ALLOW-CACHE"):
self.m3u8.allow_cache = self.parse_tag(line, self.parse_bool)
elif line.startswith("#EXT-X-STREAM-INF"):
self.state["streaminf"] = self.parse_tag(line, self.parse_attributes)
self.state["expect_playlist"] = True
elif line.startswith("#EXT-X-PLAYLIST-TYPE"):
self.m3u8.playlist_type = self.parse_tag(line)
elif line.startswith("#EXT-X-ENDLIST"):
self.m3u8.is_endlist = True
elif line.startswith("#EXT-X-MEDIA"):
attr = self.parse_tag(line, self.parse_attributes)
media = Media(self.uri(attr.get("URI")), attr.get("TYPE"),
attr.get("GROUP-ID"), attr.get("LANGUAGE"),
attr.get("NAME"),
self.parse_bool(attr.get("DEFAULT")),
self.parse_bool(attr.get("AUTOSELECT")),
self.parse_bool(attr.get("FORCED")),
attr.get("CHARACTERISTICS"))
self.m3u8.media.append(media)
elif line.startswith("#EXT-X-DISCONTINUITY"):
self.state["discontinuity"] = True
self.state["map"] = None
elif line.startswith("#EXT-X-DISCONTINUITY-SEQUENCE"):
self.m3u8.discontinuity_sequence = self.parse_tag(line, int)
elif line.startswith("#EXT-X-I-FRAMES-ONLY"):
self.m3u8.iframes_only = True
elif line.startswith("#EXT-X-MAP"):
attr = self.parse_tag(line, self.parse_attributes)
byterange = self.parse_byterange(attr.get("BYTERANGE", ""))
self.state["map"] = Map(attr.get("URI"), byterange)
elif line.startswith("#EXT-X-I-FRAME-STREAM-INF"):
attr = self.parse_tag(line, self.parse_attributes)
streaminf = self.state.pop("streaminf", attr)
stream_info = self.create_stream_info(streaminf, IFrameStreamInfo)
playlist = Playlist(self.uri(attr.get("URI")), stream_info, [], True)
self.m3u8.playlists.append(playlist)
elif line.startswith("#EXT-X-VERSION"):
self.m3u8.version = self.parse_tag(line, int)
elif line.startswith("#EXT-X-START"):
attr = self.parse_tag(line, self.parse_attributes)
start = Start(attr.get("TIME-OFFSET"),
self.parse_bool(attr.get("PRECISE", "NO")))
self.m3u8.start = start
def parse(self, data):
self.state = {}
self.m3u8 = M3U8()
for lineno, line in enumerate(filter(bool, data.splitlines())):
self.parse_line(lineno, line)
# Associate Media entries with each Playlist
for playlist in self.m3u8.playlists:
for media_type in ("audio", "video", "subtitles"):
group_id = getattr(playlist.stream_info, media_type, None)
if group_id:
for media in filter(lambda m: m.group_id == group_id,
self.m3u8.media):
playlist.media.append(media)
self.m3u8.is_master = not not self.m3u8.playlists
return self.m3u8
def uri(self, uri):
if uri and urlparse(uri).scheme:
return uri
elif self.base_uri and uri:
return urljoin(self.base_uri, uri)
else:
return uri
def load(data, base_uri=None, parser=M3U8Parser):
"""Attempts to parse a M3U8 playlist from a string of data.
If specified, *base_uri* is the base URI that relative URIs will
be joined together with, otherwise relative URIs will be as is.
If specified, *parser* can be a M3U8Parser subclass to be used
to parse the data.
"""
return parser(base_uri).parse(data)
|
|
#!/usr/bin/env trial
import copy
import difflib
import gflags
import logging
import mock
import os
import sys
from ct.client import log_client
from ct.client.db import sqlite_connection as sqlitecon
from ct.client.db import sqlite_log_db
from ct.client import state
from ct.client import monitor
from ct.crypto import error
from ct.crypto import merkle
from ct.proto import client_pb2
from twisted.internet import defer
from twisted.trial import unittest
from twisted.web import iweb
from zope.interface import implements
FLAGS = gflags.FLAGS
#TODO(ekasper) to make this setup common to all tests
gflags.DEFINE_bool("verbose_tests", False, "Print test logs")
def dummy_compute_projected_sth(old_sth):
sth = client_pb2.SthResponse()
sth.timestamp = old_sth.timestamp
sth.tree_size = size = old_sth.tree_size
tree = merkle.CompactMerkleTree(
merkle.TreeHasher(), size, ["a"] * merkle.count_bits_set(size))
f = mock.Mock(return_value=(sth, tree))
f.dummy_sth = sth
f.dummy_tree = tree
old_sth.sha256_root_hash = tree.root_hash()
return f
# TODO(robpercival): This is a relatively complicated fake, and may hide subtle
# bugs in how the Monitor interacts with the real EntryProducer. Using the real
# EntryProducer with a FakeAgent, as async_log_client_test does, may be an
# improvement.
class FakeEntryProducer(object):
def __init__(self, start, end, batch_size=None, throw=None):
self._start = start
self._end = end
self._real_start = start
self._real_end = end
self.throw = throw
self.batch_size = batch_size if batch_size else end - start + 1
self.stop = False
@defer.deferredGenerator
def produce(self):
if self.throw:
raise self.throw
for i in range(self._start, self._end, self.batch_size):
entries = []
for j in range(i, min(i + self.batch_size, self._end)):
entry = client_pb2.EntryResponse()
entry.leaf_input = "leaf_input-%d" % j
entry.extra_data = "extra_data-%d" % j
entries.append(entry)
d = self.consumer.consume(entries)
wfd = defer.waitForDeferred(d)
yield wfd
wfd.getResult()
if self.stop:
break
if not self.stop:
self.done.callback(self._end - self._start + 1)
def startProducing(self, consumer):
self.stop = False
self._start = self._real_start
self._end = self._real_end
self.consumer = consumer
self.done = defer.Deferred()
d = self.produce()
d.addErrback(self.stopProducing)
return self.done
def change_range_after_start(self, start, end):
"""Changes query interval exactly when startProducing is ran.
EntryConsumer in Monitor uses Producer interval, so in one of the tests
we have to be able to change that interval when producing is started,
but after consumer is created."""
self._real_start = start
self._real_end = end
def stopProducing(self, failure=None):
self.stop = True
if failure:
self.done.errback(failure)
class FakeLogClient(object):
def __init__(self, sth, servername="log_server", batch_size=None,
get_entries_throw=None):
self.servername = servername
self.sth = sth
self.batch_size = batch_size
self.get_entries_throw = get_entries_throw
def get_sth(self):
d = defer.Deferred()
d.callback(self.sth)
return d
def get_entries(self, start, end):
return FakeEntryProducer(start, end, self.batch_size,
self.get_entries_throw)
def get_sth_consistency(self, old_tree, new_tree):
d = defer.Deferred()
d.callback([])
return d
class InMemoryStateKeeper(object):
def __init__(self, state=None):
self.state = state
def write(self, state):
self.state = state
def read(self, state_type):
if not self.state:
raise state.FileNotFoundError("Boom!")
return_state = state_type()
return_state.CopyFrom(self.state)
return return_state
class MonitorTest(unittest.TestCase):
_DEFAULT_STH = client_pb2.SthResponse()
_DEFAULT_STH.timestamp = 2000
_DEFAULT_STH.tree_size = 10
_DEFAULT_STH.tree_head_signature = "sig"
_DEFAULT_STH_compute_projected = dummy_compute_projected_sth(_DEFAULT_STH)
_NEW_STH = client_pb2.SthResponse()
_NEW_STH.timestamp = 3000
_NEW_STH.tree_size = _DEFAULT_STH.tree_size + 10
_NEW_STH.tree_head_signature = "sig2"
_NEW_STH_compute_projected = dummy_compute_projected_sth(_NEW_STH)
_DEFAULT_STATE = client_pb2.MonitorState()
_DEFAULT_STATE.verified_sth.CopyFrom(_DEFAULT_STH)
_DEFAULT_STH_compute_projected.dummy_tree.save(
_DEFAULT_STATE.unverified_tree)
_DEFAULT_STH_compute_projected.dummy_tree.save(
_DEFAULT_STATE.verified_tree)
def setUp(self):
if not FLAGS.verbose_tests:
logging.disable(logging.CRITICAL)
self.db = sqlite_log_db.SQLiteLogDB(
sqlitecon.SQLiteConnectionManager(":memory:", keepalive=True))
# We can't simply use DB in memory with keepalive True, because different
# thread is writing to the database which results in an sqlite exception.
self.cert_db = mock.MagicMock()
self.state_keeper = InMemoryStateKeeper(copy.deepcopy(self._DEFAULT_STATE))
self.verifier = mock.Mock()
self.hasher = merkle.TreeHasher()
# Make sure the DB knows about the default log server.
log = client_pb2.CtLogMetadata()
log.log_server = "log_server"
self.db.add_log(log)
def verify_state(self, expected_state):
if self.state_keeper.state != expected_state:
state_diff = difflib.unified_diff(
str(expected_state).splitlines(),
str(self.state_keeper.state).splitlines(),
fromfile="expected", tofile="actual", lineterm="", n=5)
raise unittest.FailTest("State is incorrect\n" +
"\n".join(state_diff))
def verify_tmp_data(self, start, end):
# TODO: we are no longer using the temp db
# all the callsites should be updated to test the main db instead
pass
def create_monitor(self, client, skip_scan_entry=True):
m = monitor.Monitor(client, self.verifier, self.hasher, self.db,
self.cert_db, 7, self.state_keeper)
if m:
m._scan_entries = mock.Mock()
return m
def check_db_state_after_successful_updates(self, number_of_updates):
audited_sths = list(self.db.scan_latest_sth_range("log_server"))
for index, audited_sth in enumerate(audited_sths):
if index % 2 != 0:
self.assertEqual(client_pb2.UNVERIFIED,
audited_sth.audit.status)
else:
self.assertEqual(client_pb2.VERIFIED,
audited_sth.audit.status)
self.assertEqual(len(audited_sths), number_of_updates * 2)
def test_update(self):
client = FakeLogClient(self._NEW_STH)
m = self.create_monitor(client)
m._compute_projected_sth_from_tree = self._NEW_STH_compute_projected
def check_state(result):
# Check that we wrote the state...
expected_state = client_pb2.MonitorState()
expected_state.verified_sth.CopyFrom(self._NEW_STH)
m._compute_projected_sth_from_tree.dummy_tree.save(
expected_state.verified_tree)
m._compute_projected_sth_from_tree.dummy_tree.save(
expected_state.unverified_tree)
self.verify_state(expected_state)
self.verify_tmp_data(self._DEFAULT_STH.tree_size,
self._NEW_STH.tree_size-1)
self.check_db_state_after_successful_updates(1)
for audited_sth in self.db.scan_latest_sth_range(m.servername):
self.assertEqual(self._NEW_STH, audited_sth.sth)
d = m.update()
d.addCallback(self.assertTrue)
d.addCallback(lambda x: m._certdb_reporter_done_callback())
d.addCallback(check_state)
return d
def test_first_update(self):
client = FakeLogClient(self._DEFAULT_STH)
self.state_keeper.state = None
m = self.create_monitor(client)
m._compute_projected_sth_from_tree = self._DEFAULT_STH_compute_projected
def check_state(result):
# Check that we wrote the state...
self.verify_state(self._DEFAULT_STATE)
self.verify_tmp_data(0, self._DEFAULT_STH.tree_size-1)
self.check_db_state_after_successful_updates(1)
for audited_sth in self.db.scan_latest_sth_range(m.servername):
self.assertEqual(self._DEFAULT_STH, audited_sth.sth)
d = m.update()
d.addCallback(self.assertTrue)
d.addCallback(lambda x: m._certdb_reporter_done_callback())
d.addCallback(check_state)
return d
def test_update_no_new_entries(self):
client = FakeLogClient(self._DEFAULT_STH)
m = self.create_monitor(client)
d = m.update()
d.addCallback(self.assertTrue)
def check_state(result):
# Check that we kept the state...
self.verify_state(self._DEFAULT_STATE)
# ...and wrote no entries.
self.check_db_state_after_successful_updates(0)
d.addCallback(check_state)
return d
def test_update_recovery(self):
client = FakeLogClient(self._NEW_STH)
# Setup initial state to be as though an update had failed part way
# through.
initial_state = copy.deepcopy(self._DEFAULT_STATE)
initial_state.pending_sth.CopyFrom(self._NEW_STH)
self._NEW_STH_compute_projected.dummy_tree.save(
initial_state.unverified_tree)
self.state_keeper.write(initial_state)
m = self.create_monitor(client)
m._compute_projected_sth_from_tree = self._NEW_STH_compute_projected
d = m.update()
d.addCallback(self.assertTrue)
d.addCallback(lambda x: m._certdb_reporter_done_callback())
def check_state(result):
# Check that we wrote the state...
expected_state = copy.deepcopy(initial_state)
expected_state.ClearField("pending_sth")
expected_state.verified_sth.CopyFrom(self._NEW_STH)
m._compute_projected_sth_from_tree.dummy_tree.save(
expected_state.verified_tree)
m._compute_projected_sth_from_tree.dummy_tree.save(
expected_state.unverified_tree)
self.verify_state(expected_state)
self.check_db_state_after_successful_updates(1)
for audited_sth in self.db.scan_latest_sth_range(m.servername):
self.assertEqual(self._NEW_STH, audited_sth.sth)
d.addCallback(check_state)
return d
def test_update_rolls_back_unverified_tree_on_scan_error(self):
client = FakeLogClient(self._NEW_STH)
m = self.create_monitor(client)
m._compute_projected_sth_from_tree = self._NEW_STH_compute_projected
m._scan_entries = mock.Mock(side_effect=ValueError("Boom!"))
def check_state(result):
# The changes to the unverified tree should have been discarded,
# so that entries are re-fetched and re-consumed next time.
expected_state = copy.deepcopy(self._DEFAULT_STATE)
expected_state.pending_sth.CopyFrom(self._NEW_STH)
self.verify_state(expected_state)
# The new STH should have been verified prior to the error.
audited_sths = list(self.db.scan_latest_sth_range(m.servername))
self.assertEqual(len(audited_sths), 2)
self.assertEqual(audited_sths[0].audit.status, client_pb2.VERIFIED)
self.assertEqual(audited_sths[1].audit.status, client_pb2.UNVERIFIED)
return m.update().addCallback(self.assertFalse).addCallback(check_state)
def test_update_call_sequence(self):
# Test that update calls update_sth and update_entries in sequence,
# and bails on first error, so we can test each of them separately.
# Each of these functions checks if functions were properly called
# and runs step in sequence of updates.
def check_calls_sth_fails(result):
m._update_sth.assert_called_once_with()
m._update_entries.assert_called_once_with()
m._update_sth.reset_mock()
m._update_entries.reset_mock()
m._update_sth.return_value = copy.deepcopy(d_false)
return m.update().addCallback(self.assertFalse)
def check_calls_entries_fail(result):
m._update_sth.assert_called_once_with()
self.assertFalse(m._update_entries.called)
m._update_sth.reset_mock()
m._update_entries.reset_mock()
m._update_sth.return_value = copy.deepcopy(d_true)
m._update_entries.return_value = copy.deepcopy(d_false)
return m.update().addCallback(self.assertFalse)
def check_calls_assert_last_calls(result):
m._update_sth.assert_called_once_with()
m._update_entries.assert_called_once_with()
client = FakeLogClient(self._DEFAULT_STH)
m = self.create_monitor(client)
d_true = defer.Deferred()
d_true.callback(True)
d_false = defer.Deferred()
d_false.callback(False)
#check regular correct update
m._update_sth = mock.Mock(return_value=copy.deepcopy(d_true))
m._update_entries = mock.Mock(return_value=copy.deepcopy(d_true))
d = m.update().addCallback(self.assertTrue)
d.addCallback(check_calls_sth_fails)
d.addCallback(check_calls_entries_fail)
d.addCallback(check_calls_assert_last_calls)
return d
def test_update_sth(self):
client = FakeLogClient(self._NEW_STH)
m = self.create_monitor(client)
def check_state(result):
# Check that we updated the state.
expected_state = copy.deepcopy(self._DEFAULT_STATE)
expected_state.pending_sth.CopyFrom(self._NEW_STH)
self.verify_state(expected_state)
audited_sths = list(self.db.scan_latest_sth_range(m.servername))
self.assertEqual(len(audited_sths), 2)
self.assertEqual(audited_sths[0].audit.status, client_pb2.VERIFIED)
self.assertEqual(audited_sths[1].audit.status, client_pb2.UNVERIFIED)
return m._update_sth().addCallback(self.assertTrue
).addCallback(check_state)
def test_update_sth_fails_for_invalid_sth(self):
client = FakeLogClient(self._NEW_STH)
self.verifier.verify_sth.side_effect = error.VerifyError("Boom!")
m = self.create_monitor(client)
def check_state(result):
# Check that we kept the state.
self.verify_state(self._DEFAULT_STATE)
self.check_db_state_after_successful_updates(0)
return m._update_sth().addCallback(self.assertFalse
).addCallback(check_state)
def test_update_sth_fails_for_stale_sth(self):
sth = client_pb2.SthResponse()
sth.CopyFrom(self._DEFAULT_STH)
sth.tree_size -= 1
sth.timestamp -= 1
client = FakeLogClient(sth)
m = self.create_monitor(client)
d = defer.Deferred()
d.callback(True)
m._verify_consistency = mock.Mock(return_value=d)
def check_state(result):
self.assertTrue(m._verify_consistency.called)
args, _ = m._verify_consistency.call_args
self.assertTrue(args[0].timestamp < args[1].timestamp)
# Check that we kept the state.
self.verify_state(self._DEFAULT_STATE)
return m._update_sth().addCallback(self.assertFalse
).addCallback(check_state)
def test_update_sth_fails_for_inconsistent_sth(self):
client = FakeLogClient(self._NEW_STH)
# The STH is in fact OK but fake failure.
self.verifier.verify_sth_consistency.side_effect = (
error.ConsistencyError("Boom!"))
m = self.create_monitor(client)
def check_state(result):
# Check that we kept the state.
self.verify_state(self._DEFAULT_STATE)
audited_sths = list(self.db.scan_latest_sth_range(m.servername))
self.assertEqual(len(audited_sths), 2)
self.assertEqual(audited_sths[0].audit.status,
client_pb2.VERIFY_ERROR)
self.assertEqual(audited_sths[1].audit.status,
client_pb2.UNVERIFIED)
for audited_sth in audited_sths:
self.assertEqual(self._DEFAULT_STH.sha256_root_hash,
audited_sth.sth.sha256_root_hash)
return m._update_sth().addCallback(self.assertFalse
).addCallback(check_state)
def test_update_sth_fails_on_client_error(self):
client = FakeLogClient(self._NEW_STH)
def get_sth():
return defer.maybeDeferred(mock.Mock(side_effect=log_client.HTTPError("Boom!")))
client.get_sth = get_sth
m = self.create_monitor(client)
def check_state(result):
# Check that we kept the state.
self.verify_state(self._DEFAULT_STATE)
self.check_db_state_after_successful_updates(0)
return m._update_sth().addCallback(self.assertFalse
).addCallback(check_state)
def test_update_entries_fails_on_client_error(self):
client = FakeLogClient(self._NEW_STH,
get_entries_throw=log_client.HTTPError("Boom!"))
client.get_entries = mock.Mock(
return_value=client.get_entries(0, self._NEW_STH.tree_size - 2))
m = self.create_monitor(client)
# Get the new STH, then try (and fail) to update entries
d = m._update_sth().addCallback(self.assertTrue)
d.addCallback(lambda x: m._update_entries()).addCallback(self.assertFalse)
def check_state(result):
# Check that we wrote no entries.
expected_state = copy.deepcopy(self._DEFAULT_STATE)
expected_state.pending_sth.CopyFrom(self._NEW_STH)
self.verify_state(expected_state)
d.addCallback(check_state)
return d
def test_update_entries_fails_not_enough_entries(self):
client = FakeLogClient(self._NEW_STH)
faker_fake_entry_producer = FakeEntryProducer(0,
self._NEW_STH.tree_size)
faker_fake_entry_producer.change_range_after_start(0, 5)
client.get_entries = mock.Mock(
return_value=faker_fake_entry_producer)
m = self.create_monitor(client)
m._compute_projected_sth = self._NEW_STH_compute_projected
# Get the new STH first.
return m._update_sth().addCallback(self.assertTrue).addCallback(
lambda x: m._update_entries().addCallback(self.assertFalse))
def test_update_entries_fails_in_the_middle(self):
client = FakeLogClient(self._NEW_STH)
faker_fake_entry_producer = FakeEntryProducer(
self._DEFAULT_STH.tree_size,
self._NEW_STH.tree_size)
faker_fake_entry_producer.change_range_after_start(
self._DEFAULT_STH.tree_size, self._NEW_STH.tree_size - 5)
client.get_entries = mock.Mock(return_value=faker_fake_entry_producer)
m = self.create_monitor(client)
m._compute_projected_sth = self._NEW_STH_compute_projected
fake_fetch = mock.MagicMock()
def try_again_with_all_entries(_):
m._fetch_entries = fake_fetch
return m._update_entries()
# Get the new STH first.
return m._update_sth().addCallback(self.assertTrue).addCallback(
lambda _: m._update_entries().addCallback(self.assertFalse)
).addCallback(try_again_with_all_entries).addCallback(lambda _:
fake_fetch.assert_called_once_with(15, 19))
if __name__ == "__main__" or __name__ == "ct.client.monitor_test":
sys.argv = FLAGS(sys.argv)
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os
import os.path
import shutil
import subprocess
import tempfile
import urllib.parse
import functools
import docutils.nodes
import docutils.parsers.rst
import docutils.parsers.rst.directives
import docutils.statemachine
import jinja2
import sphinx
import qiime2
loader = jinja2.PackageLoader('sphinx_extensions.command_block', 'templates')
jinja_env = jinja2.Environment(loader=loader)
class download_node(docutils.nodes.Element):
def __init__(self, id_, url, saveas, *args, **kwargs):
super().__init__(*args, **kwargs)
self.id = id_
self.url = url
self.saveas = saveas
def visit_download_node(self, node):
pass
def depart_download_node(self, node):
template = jinja_env.get_template('download.html')
rendered = template.render(node=node)
self.body.append(rendered)
def setup_working_dir(app):
app.command_block_working_dir = tempfile.TemporaryDirectory(
prefix='qiime2-docs-command-block-')
def teardown_working_dir(app, exception):
app.command_block_working_dir.cleanup()
OutputPath = collections.namedtuple('OutputPath', ['file', 'url'])
class CommandBlockDirective(docutils.parsers.rst.Directive):
has_content = True
option_spec = {
'no-exec': docutils.parsers.rst.directives.flag,
'url': docutils.parsers.rst.directives.unchanged_required,
'saveas': docutils.parsers.rst.directives.unchanged_required,
}
def run(self):
command_mode = True if self.name == 'command-block' else False
opts = self.options
download_opts = [k in opts for k in ['url', 'saveas']]
if command_mode:
self.assert_has_content()
if any(download_opts):
raise sphinx.errors.ExtensionError('command-block does not '
'support the following '
'options: `url`, `saveas`.')
commands = functools.reduce(self._parse_multiline_commands,
self.content, [])
nodes = [self._get_literal_block_node(self.content)]
else:
if self.content:
raise sphinx.errors.ExtensionError('Content block not '
'supported for the '
'download directive.')
if not all(download_opts):
raise sphinx.errors.ExtensionError('Missing options for the '
'download directive. '
'Please specify `url` and '
'`saveas`.')
commands = ['wget -O "%s" "%s"' % (opts['saveas'], opts['url'])]
id_ = self.state.document.settings.env.new_serialno('download')
nodes = [download_node(id_, opts['url'], opts['saveas'])]
env = self._get_env()
if not (env.config.command_block_no_exec or 'no-exec' in self.options):
working_dir = os.path.join(env.app.command_block_working_dir.name,
env.docname)
os.makedirs(working_dir, exist_ok=True)
self._execute_commands(commands, working_dir)
if command_mode:
artifacts, visualizations = self._get_output_paths(working_dir)
if artifacts or visualizations:
nodes.append(
self._get_output_links_node(artifacts, visualizations))
return nodes
def _get_env(self):
return self.state.document.settings.env
def _get_literal_block_node(self, commands):
content = '\n'.join(commands)
node = docutils.nodes.literal_block(content, content)
node['language'] = 'shell'
return node
def _execute_commands(self, commands, working_dir):
app = self._get_env().app
for command in commands:
command = command.strip()
if not command:
continue
try:
app.info("Running command: %s" % command)
comp_proc = subprocess.run(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir,
shell=True,
universal_newlines=True)
except OSError as e:
raise sphinx.errors.ExtensionError("Unable to execute "
"command %r: %s" %
(command, e))
if comp_proc.returncode != 0:
msg = (
"Command %r exited with non-zero return code %d.\n\n"
"stdout:\n\n%s\n\n"
"stderr:\n\n%s" %
(command, comp_proc.returncode, comp_proc.stdout,
comp_proc.stderr)
)
raise sphinx.errors.ExtensionError(msg)
def _get_output_paths(self, working_dir):
env = self._get_env()
# TODO don't harcode build dir. Not sure how to get this value from
# Sphinx programmatically.
root_build_dir = 'build/html'
doc_data_dir = os.path.join(root_build_dir, 'data', env.docname)
artifacts = []
visualizations = []
for dirpath, _, filenames in os.walk(working_dir):
for filename in filenames:
if filename.endswith('.qza') or filename.endswith('.qzv'):
src_filepath = os.path.join(dirpath, filename)
file_relpath = os.path.relpath(src_filepath,
start=working_dir)
dest_dir = os.path.join(doc_data_dir,
os.path.dirname(file_relpath))
os.makedirs(dest_dir, exist_ok=True)
dest_filepath = os.path.join(dest_dir, filename)
if os.path.exists(dest_filepath):
if (os.path.getmtime(dest_filepath) <
os.path.getmtime(src_filepath)):
msg = (
"Command overwrote path %r that was created "
"by a previous command in this file. Output "
"overwriting is not supported by the `%s` "
"directive." % (file_relpath, self.name)
)
raise sphinx.errors.ExtensionError(msg)
else:
shutil.copyfile(src_filepath, dest_filepath)
url_relpath = os.path.relpath(dest_filepath,
root_build_dir)
output_path = OutputPath(file=file_relpath,
url=url_relpath)
if filename.endswith('.qza'):
artifacts.append(output_path)
elif filename.endswith('.qzv'):
visualizations.append(output_path)
return artifacts, visualizations
def _get_output_links_node(self, artifacts, visualizations):
# TODO it may be worth making the output data links admonition its
# own type of admonition (e.g. similar to `qiime1-users` or
# `question` custom admonitions). Or maybe keeping it a general
# admonition and adding a custom `class` option is sufficient if
# we're mainly going for CSS styling. For now, a general admonition
# works.
node = docutils.nodes.admonition()
content = []
content.extend(self._get_output_links(artifacts, 'artifacts'))
content.extend(self._get_output_links(visualizations,
'visualizations'))
env = self._get_env()
content = docutils.statemachine.ViewList(content, env.docname)
self.state.nested_parse(content, 0, node)
return node
def _get_output_links(self, output_paths, name):
content = []
if output_paths:
# TODO it would be nice to not hardcode this.
url_prefix = 'https://docs.qiime2.org/%s/' % qiime2.__release__
# TODO it would be cool to format the artifacts/visualizations
# as tables instead of unordered lists, but will take a little
# work to format the RST tables correctly.
content.append('**Output %s:**' % name)
content.append('')
content.append('')
for output_path in output_paths:
download_url = url_prefix + output_path.url
content.append(
'* :file:`%s`: '
'`view <https://view.qiime2.org?src=%s>`__ | '
'`download <%s>`__' %
(output_path.file, urllib.parse.quote_plus(download_url),
download_url))
content.append('')
return content
def _parse_multiline_commands(self, previous, next):
result = previous.copy()
if result and result[-1].endswith('\\'):
result[-1] = result[-1][:-1]
result[-1] += next.strip()
else:
result.append(next.strip())
return result
def setup(app):
app.connect('builder-inited', setup_working_dir)
app.connect('build-finished', teardown_working_dir)
app.add_directive('command-block', CommandBlockDirective)
app.add_directive('download', CommandBlockDirective)
app.add_config_value('command_block_no_exec', False, 'html')
app.add_node(download_node, html=(visit_download_node,
depart_download_node))
return {'version': '0.0.1'}
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function;
"""
Converts a set of Objective-C headers commented using TomDoc to headers documented using Doxygen or Appledoc
"""
__author__ = 'Whirliwig'
__license__ = 'MIT'
__version__ = '0.5'
__email__ = 'ant@dervishsoftware.com'
DEBUG = False
verbose = False
import sys
from optparse import OptionParser
from glob import glob
from os import path, makedirs
from collections import OrderedDict
from textwrap import dedent
import re
def debug_log(log_message):
if DEBUG:
print(log_message, file=sys.stderr)
# From http://code.activeself.state.com/recipes/410692/
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
yield self.match
raise StopIteration
def match(self, *args):
if self.fall or not args:
return True
elif self.value in args:
self.fall = True
return True
else:
return False
# States for inside class declaration
OUTSIDE_COMMENT = 0
INSIDE_COMMENT = 1
BRIEF_DESCRIPTION = 2
DETAILED_DESCRIPTION = 3
PARAM_DESCRIPTION = 4
EXAMPLES_SECTION = 5
RETURN_DESCRIPTION = 6
# Top-level states
OUTSIDE_CLASS_DECL = 0
INSIDE_CLASS_DECL = 1
class CommentBlock(object):
def __init__(self):
self.params = OrderedDict()
self.brief = ''
self.detail = ''
self.param_name = None
self.param_description = ''
self.return_description = ''
self.examples = ''
def has_brief(self):
return len(self.brief) > 0
def has_detail(self):
return len(self.detail) > 0
def has_return(self):
return len(self.return_description) > 0
def has_params(self):
return len(self.params) > 0
def has_examples(self):
return len(self.examples) > 0
def has_non_brief_content(self):
return self.has_detail() or self.has_params() \
or self.has_examples() or self.has_return()
def has_content(self):
return self.has_brief() or self.has_non_brief_content()
def set_current_param(self, name=None, desc=''):
if self.param_name:
self.params[self.param_name] = self.param_description
self.param_description = desc
self.param_name = name
class TranslateHeaderParser(object):
comment_line_regex = re.compile(r'^\s{0,3}(?:/?\*\*?|//[^/]?)(\s*)(.*)')
def __init__(self, file_handle, header_name=None):
self.input_file_handle = file_handle
self.header_name = header_name
def parse(self, output_file_handle, source_code_formatter):
if self.header_name and verbose:
print('Parsing {}'.format(self.header_name))
for line in self.input_file_handle:
line = line.rstrip('\n')
matches = TranslateHeaderParser.comment_line_regex.match(line)
if matches:
leading_spaces, content = matches.groups()
print("///{}{}".format(leading_spaces,content), file=output_file_handle)
else:
print(line, file=output_file_handle)
class ObjcHeaderParser(object):
comment_line_regex = re.compile(r'^(?:/\*\*?|///?)(\s*)(.*)')
interface_regex = \
re.compile(r'^\s*@interface\s+(\w+(?:\s+:)|\w+\s*\(\w+\))')
end_regex = re.compile(r'^\s*@end')
param_regex = re.compile(r'(\w+)\s+-\s+(.+)$')
return_regex = re.compile(r'[Rr]eturns\s+(.+)$')
examples_regex = re.compile(r'^\s*Example[s:]')
list_regex = re.compile(r'^[\-1-9\*]\.?\s')
def __init__(self, file_handle, header_name=None):
self.input_file_handle = file_handle
self.header_name = header_name
self.state = OUTSIDE_COMMENT
self.outer_state = OUTSIDE_CLASS_DECL
self.comment = CommentBlock()
def next_section(self, content):
return_matches = ObjcHeaderParser.return_regex.match(content)
new_state = None
if return_matches:
self.comment.set_current_param()
debug_log('>>>>Start of returns: {}'.format(content))
self.comment.return_description = return_matches.group(1)
new_state = RETURN_DESCRIPTION
else:
param_matches = ObjcHeaderParser.param_regex.match(content)
if param_matches:
self.comment.set_current_param(param_matches.group(1),
param_matches.group(2))
debug_log('>>>>Param: {} = {}'.format(self.comment.param_name,
self.comment.param_description))
new_state = PARAM_DESCRIPTION
else:
if ObjcHeaderParser.examples_regex.match(content):
self.comment.detail += '''
**Examples**
'''
self.comment.set_current_param()
debug_log('>>>>Start of examples: {}'.format(content))
new_state = EXAMPLES_SECTION
return new_state
def parse(self, output_file_handle, source_code_formatter):
if self.header_name and verbose:
print('Parsing {}'.format(self.header_name))
saved_comment = ''
for line in self.input_file_handle:
line = line.strip()
matches = ObjcHeaderParser.comment_line_regex.match(line)
if matches or len(line) == 0 and self.state != OUTSIDE_COMMENT:
if matches:
leading_spaces, content = matches.groups()
for case in switch(self.state):
if case(OUTSIDE_COMMENT, INSIDE_COMMENT):
if content:
new_state = self.next_section(content)
if not new_state:
debug_log('>>>>Brief: {}'.format(content))
self.state = BRIEF_DESCRIPTION
self.comment.brief = \
' '.join([self.comment.brief,
content])
else:
self.state = new_state
else:
self.state = INSIDE_COMMENT
elif case(BRIEF_DESCRIPTION):
if not content:
debug_log('<<<<End Brief')
self.state = DETAILED_DESCRIPTION
else:
self.comment.brief = \
' '.join([self.comment.brief, content])
elif case(DETAILED_DESCRIPTION):
if content:
new_state = self.next_section(content)
if not new_state:
debug_log('>>>>Detail: {}'.format(content))
if ObjcHeaderParser.list_regex.match(content):
self.comment.detail += '\n'
else:
self.comment.detail += ' '
self.comment.detail += content
else:
self.state = new_state
else:
self.comment.detail = \
'{}\n'.format(self.comment.detail)
elif case(EXAMPLES_SECTION):
if content:
new_state = self.next_section(content)
if not new_state:
debug_log('>>>>Examples: {}'.format(content))
self.comment.examples += '\n'
self.comment.examples += leading_spaces
self.comment.examples += content
else:
self.state = new_state
else:
self.comment.examples = \
'{}\n'.format(self.comment.examples)
elif case(PARAM_DESCRIPTION):
if content:
new_state = self.next_section(content)
if not new_state:
debug_log('>>>>Param: {}'.format(content))
self.comment.param_description = \
' '.join([self.comment.param_description,
content])
else:
self.state = new_state
else:
debug_log('<<<<End Param {}'.format(self.comment.param_name))
self.comment.set_current_param()
self.state = DETAILED_DESCRIPTION
elif case(RETURN_DESCRIPTION):
if content:
debug_log('>>>>Return: {}'.format(content))
self.comment.return_description = \
' '.join([self.comment.return_description,
content])
else:
self.state = DETAILED_DESCRIPTION
if self.state is not OUTSIDE_COMMENT:
saved_comment += line
saved_comment += '\n'
else:
# Not a comment line
if_matches = ObjcHeaderParser.interface_regex.match(line)
if if_matches:
self.outer_state = INSIDE_CLASS_DECL
if self.state == OUTSIDE_COMMENT:
output_file_handle.write(source_code_formatter.single_line_comment(
'Documentation for {}'.format(if_matches.group(1))))
if self.state != OUTSIDE_COMMENT:
debug_log('Leaving comment')
if self.outer_state == INSIDE_CLASS_DECL \
and self.comment.has_content():
# Process comment here
formatted_comment = \
source_code_formatter.format_source(self.comment)
if formatted_comment:
output_file_handle.write(formatted_comment)
elif self.comment.has_content():
# A comment outside a class declaration will be printed verbatim
output_file_handle.write(saved_comment)
self.comment = CommentBlock()
saved_comment = ''
self.state = OUTSIDE_COMMENT
if ObjcHeaderParser.end_regex.match(line) \
and self.outer_state == INSIDE_CLASS_DECL:
self.outer_state = OUTSIDE_CLASS_DECL
output_file_handle.write('{}\n'.format(line))
class SourceCodeFormatter(object):
def format_source(self, comment):
pass
class DoxygenSourceCodeFormatter(SourceCodeFormatter):
def format_source(self, comment):
output = None
if not comment.has_brief() and comment.has_return():
comment.brief = \
'Returns {}'.format(comment.return_description.split('.'
)[0])
if comment.has_brief():
output = '//! {}'.format(comment.brief.strip())
if comment.has_non_brief_content():
output += '''
/*!
'''
if comment.has_detail():
detail_sections = comment.detail.strip().split('\n')
for detail_section in detail_sections:
output += \
''' * {}
*
'''.format(detail_section.strip())
if comment.has_examples():
output += ' * \code\n'
output += '\n'.join([' * {}'.format(x) for x in
comment.examples.strip('\n').split('\n'
)])
output += '''
* \endcode
'''
if comment.has_params():
for (param_name, param_description) in \
comment.params.items():
output += \
''' * \param {} {}
*
'''.format(param_name,
param_description)
if comment.has_return():
output += \
' * \\return {}\n'.format(comment.return_description)
output += ' */'
output += '\n'
if DEBUG:
print(output)
return output
def single_line_comment(self, content):
return '//! {}\n'.format(content)
class AppledocSourceCodeFormatter(SourceCodeFormatter):
selector_regex = re.compile(r'(\[[\w :+\-]+\])')
class_regex = re.compile(r'(\s)(RAC\w+)\b')
def add_crossrefs(self, comment):
# comment = AppledocSourceCodeFormatter.selector_regex.sub(r' \1 ',comment)
# comment = AppledocSourceCodeFormatter.class_regex.sub(r'\1\2 ',comment)
return comment
def format_source(self, comment):
output = None
if not comment.has_brief() and comment.has_return():
comment.brief = \
'Returns {}'.format(comment.return_description.split('.'
)[0])
if comment.has_brief():
output = \
'/** {}'.format(self.add_crossrefs(comment.brief.strip()))
if not comment.has_non_brief_content():
output += ' */'
if comment.has_non_brief_content():
if not output:
output = '/**'
output += '''
*
'''
if comment.has_detail():
detail_sections = \
self.add_crossrefs(comment.detail.strip()).split('\n'
)
for detail_section in detail_sections:
output += \
''' * {}
*
'''.format(detail_section.strip())
if comment.has_examples():
output += '\n'.join([' *\t{}'.format(x) for x in
dedent(comment.examples.strip('\n')).split('\n'
)])
output += '\n'
if comment.has_params():
for (param_name, param_description) in \
comment.params.items():
output += \
''' * \param {} {}
*
'''.format(param_name,
self.add_crossrefs(param_description))
if comment.has_return():
output += \
' * \\return {}\n'.format(self.add_crossrefs(comment.return_description))
output += ' */'
output += '\n'
if DEBUG:
print(output)
return output
def single_line_comment(self, content):
return '/** {} */\n'.format(content)
class InputTranslator:
tomdoc = 0
simple = 1
class OutputGenerator:
appledoc = 0
doxygen = 1
def generate(input_dirs, output_dir, input_translator=InputTranslator.tomdoc, generator=OutputGenerator.appledoc, verbose=False):
use_stdin = False
use_stdout = False
if len(input_dirs) == 0:
use_stdin = True
if use_stdin and not output_dir or output_dir == '-':
use_stdout = True
if not use_stdin:
input_paths = [path.abspath(p) for p in input_dirs]
if len(input_paths) > 1:
common_prefix = path.commonprefix(input_paths)
else:
common_prefix = path.dirname(input_paths[0])
if output_dir:
output_dir = path.abspath(output_dir)
else:
output_dir = path.abspath('./formatted_headers')
if generator == OutputGenerator.appledoc:
source_code_formatter = AppledocSourceCodeFormatter()
else:
source_code_formatter = DoxygenSourceCodeFormatter()
if not use_stdout and not path.exists(output_dir):
makedirs(output_dir)
for header_path in input_paths:
if path.isdir(header_path):
files = glob(path.join(header_path, '*'))
else:
files = [header_path]
files = [f for f in files if path.isfile(f)
and path.splitext(f)[1] == '.h']
for header_file in files:
relative_path = path.relpath(header_file,common_prefix)
if not use_stdout:
output_file = path.join(output_dir, relative_path)
write_dir = path.dirname(output_file)
if not path.exists(write_dir):
makedirs(path.dirname(output_file))
with open(header_file, 'rU') as input_file_handle:
with open(output_file, 'w') as \
output_file_handle:
if verbose:
print('Converting {} --> {}'.format(header_file,
output_file))
if input_translator == InputTranslator.tomdoc:
header_parser = \
ObjcHeaderParser(input_file_handle,
path.basename(header_file))
else:
header_parser = \
TranslateHeaderParser(input_file_handle,
path.basename(header_file))
header_parser.parse(output_file_handle,
source_code_formatter)
else:
with open(header_file, 'rU') as input_file_handle:
header_parser = ObjcHeaderParser(input_file_handle,
path.basename(header_file))
header_parser.parse(sys.stdout,
source_code_formatter)
else:
header_parser = ObjcHeaderParser(sys.stdin)
header_parser.parse(sys.stdout)
def parse_args():
parser = \
OptionParser(usage='usage: %prog [options] filenames|directory'
, version='%prog 1.0')
parser.add_option( # optional because action defaults to "store"
'-o',
'--outputdir',
action='store',
dest='outputdir',
default=None,
help='The directory to put output files',
)
parser.add_option(
'-a',
'--appledoc',
action='store_true',
dest='appledoc',
default=False,
help='Generate Appledoc output',
)
parser.add_option(
'-d',
'--doxygen',
action='store_true',
dest='doxygen',
default=False,
help='Generate Doxygen output',
)
parser.add_option(
'-d',
'--doxygen',
action='store_true',
dest='doxygen',
default=False,
help='Generate Doxygen output',
)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='Turn on verbose output',
)
(options, args) = parser.parse_args()
output_dir = options.outputdir
if options.appledoc:
generator = OutputGenerator.appledoc
elif options.doxygen:
generator = OutputGenerator.doxygen
else:
print('Must specify --appledoc or --doxygen')
parser.usage()
sys.exit(1)
verbose = options.verbose
return (args, output_dir, generator, verbose)
"""
--------------------------------------------------------------------------------
MAIN
--------------------------------------------------------------------------------
"""
if __name__ == '__main__':
(input_dirs, output_dir, generator, verbose) = parse_args()
generate(input_dirs, output_dir, generator, verbose)
|
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Monitors Docker containers for CPU and memory usage, and
# prepares an HTML timeline based on said monitoring.
#
# Usage example:
# mon = monitor.ContainerMonitor("monitoring.txt")
# mon.start()
# # container1 is an object with attributes id, name, and logfile.
# mon.add(container1)
# mon.add(container2)
# mon.stop()
# timeline = monitor.Timeline("monitoring.txt",
# [container1, container2],
# re.compile(">>> "))
# timeline.create("output.html")
import datetime
import json
import logging
import os
import shutil
import subprocess
import threading
import time
# Unit for reporting user/system CPU seconds in cpuacct.stat.
# See https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt and time(7).
USER_HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
def total_memory():
"""Returns total RAM on system, in GB."""
return _memory()[0]
def used_memory():
"""Returns total used RAM on system, in GB."""
return _memory()[1]
def _memory():
"""Returns (total, used) memory on system, in GB.
Used is computed as total - available.
Calls "free" and parses output. Sample output for reference:
total used free shared buffers cache available
Mem: 126747197440 26363965440 56618553344 31678464 2091614208 41673064448 99384889344
Swap: 0 0 0
"""
free_lines = subprocess.check_output(["free", "-b", "-w"]).split('\n')
free_grid = [x.split() for x in free_lines]
# Identify columns for "total" and "available"
total_idx = free_grid[0].index("total")
available_idx = free_grid[0].index("available")
total = int(free_grid[1][1 + total_idx])
available = int(free_grid[1][1 + available_idx])
used = total - available
total_gb = total / (1024.0 * 1024.0 * 1024.0)
used_gb = used / (1024.0 * 1024.0 * 1024.0)
return (total_gb, used_gb)
def datetime_to_seconds_since_epoch(dt):
"""Converts a Python datetime to seconds since the epoch."""
return time.mktime(dt.timetuple())
def split_timestamp(line):
"""Parses timestamp at beginning of a line.
Returns a tuple of seconds since the epoch and the rest
of the line. Returns None on parse failures.
"""
LENGTH = 26
FORMAT = "%Y-%m-%d %H:%M:%S.%f"
t = line[:LENGTH]
return (datetime_to_seconds_since_epoch(datetime.datetime.strptime(t, FORMAT)),
line[LENGTH + 1:])
class ContainerMonitor(object):
"""Monitors Docker containers.
Monitoring data is written to a file. An example is:
2018-02-02 09:01:37.143591 d8f640989524be3939a70557a7bf7c015ba62ea5a105a64c94472d4ebca93c50 cpu user 2 system 5
2018-02-02 09:01:37.143591 d8f640989524be3939a70557a7bf7c015ba62ea5a105a64c94472d4ebca93c50 memory cache 11481088 rss 4009984 rss_huge 0 mapped_file 8605696 dirty 24576 writeback 0 pgpgin 4406 pgpgout 624 pgfault 3739 pgmajfault 99 inactive_anon 0 active_anon 3891200 inactive_file 7614464 active_file 3747840 unevictable 0 hierarchical_memory_limit 9223372036854771712 total_cache 11481088 total_rss 4009984 total_rss_huge 0 total_mapped_file 8605696 total_dirty 24576 total_writeback 0 total_pgpgin 4406 total_pgpgout 624 total_pgfault 3739 total_pgmajfault 99 total_inactive_anon 0 total_active_anon 3891200 total_inactive_file 7614464 total_active_file 3747840 total_unevictable 0
That is, the format is:
<timestamp> <container> cpu user <usercpu> system <systemcpu>
<timestamp> <container> memory <contents of memory.stat without newlines>
<usercpu> and <systemcpu> are in the units of USER_HZ.
See https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt for documentation
on memory.stat; it's in the "memory" cgroup, often mounted at
/sys/fs/cgroup/memory/<cgroup>/memory.stat.
This format is parsed back by the Timeline class below and should
not be considered an API.
"""
def __init__(self, output_path, frequency_seconds=1):
"""frequency_seconds is how often metrics are gathered"""
self.containers = []
self.output_path = output_path
self.keep_monitoring = None
self.monitor_thread = None
self.frequency_seconds = frequency_seconds
def start(self):
self.keep_monitoring = True
self.monitor_thread = threading.Thread(target=self._monitor)
self.monitor_thread.setDaemon(True)
self.monitor_thread.start()
def stop(self):
self.keep_monitoring = False
self.monitor_thread.join()
def add(self, container):
"""Adds monitoring for container, which is an object with property 'id'."""
self.containers.append(container)
@staticmethod
def _metrics_from_stat_file(root, container, stat):
"""Returns metrics stat file contents.
root: a cgroups root (a path as a string)
container: an object with string attribute id
stat: a string filename
Returns contents of <root>/<container.id>/<stat>
with newlines replaced with spaces.
Returns None on errors.
"""
dirname = os.path.join(root, "docker", container.id)
if not os.path.isdir(dirname):
# Container may no longer exist.
return None
try:
statcontents = file(os.path.join(dirname, stat)).read()
return statcontents.replace("\n", " ").strip()
except IOError, e:
# Ignore errors; cgroup can disappear on us.
logging.warning("Ignoring exception reading cgroup. " +
"This can happen if container just exited. " + str(e))
return None
def _monitor(self):
"""Monitors CPU usage of containers.
Otput is stored in self.output_path.
Also, keeps track of minimum and maximum memory usage (for the machine).
"""
# Ubuntu systems typically mount cpuacct cgroup in /sys/fs/cgroup/cpu,cpuacct,
# but this can vary by OS distribution.
all_cgroups = subprocess.check_output(
"findmnt -n -o TARGET -t cgroup --source cgroup".split()
).split("\n")
cpuacct_root = [c for c in all_cgroups if "cpuacct" in c][0]
memory_root = [c for c in all_cgroups if "memory" in c][0]
logging.info("Using cgroups: cpuacct %s, memory %s", cpuacct_root, memory_root)
self.min_memory_usage_gb = None
self.max_memory_usage_gb = None
with file(self.output_path, "w") as output:
while self.keep_monitoring:
# Use a single timestamp for a given round of monitoring.
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
for c in self.containers:
cpu = self._metrics_from_stat_file(cpuacct_root, c, "cpuacct.stat")
memory = self._metrics_from_stat_file(memory_root, c, "memory.stat")
if cpu:
output.write("%s %s cpu %s\n" % (now, c.id, cpu))
if memory:
output.write("%s %s memory %s\n" % (now, c.id, memory))
output.flush()
# Machine-wide memory usage
m = used_memory()
if self.min_memory_usage_gb is None:
self.min_memory_usage_gb, self.max_memory_usage_gb = m, m
else:
self.min_memory_usage_gb = min(self.min_memory_usage_gb, m)
self.max_memory_usage_gb = max(self.max_memory_usage_gb, m)
time.sleep(self.frequency_seconds)
class Timeline(object):
"""Given metric and log data for containers, creates a timeline report.
This is a standalone HTML file with a timeline for the log files and CPU charts for
the containers. The HTML uses https://developers.google.com/chart/ for rendering
the charts, which happens in the browser.
"""
def __init__(self, monitor_file, containers, interesting_re, buildname):
self.monitor_file = monitor_file
self.containers = containers
self.interesting_re = interesting_re
self.buildname = buildname
def logfile_timeline(self, container):
"""Returns a list of (name, timestamp, line) tuples for interesting lines in
the container's logfile. container is expected to have name and logfile attributes.
"""
interesting_lines = [
line.strip()
for line in file(container.logfile)
if self.interesting_re.search(line)]
return [(container.name,) + split_timestamp(line) for line in interesting_lines]
def parse_metrics(self, f):
"""Parses timestamped metric lines.
Given metrics lines like:
2017-10-25 10:08:30.961510 87d5562a5fe0ea075ebb2efb0300d10d23bfa474645bb464d222976ed872df2a cpu user 33 system 15
Returns an iterable of (ts, container, user_cpu, system_cpu). It also updates
container.peak_total_rss and container.total_user_cpu and container.total_system_cpu.
"""
prev_by_container = {}
peak_rss_by_container = {}
for line in f:
ts, rest = split_timestamp(line.rstrip())
total_rss = None
try:
container, metric_type, rest2 = rest.split(" ", 2)
if metric_type == "cpu":
_, user_cpu_s, _, system_cpu_s = rest2.split(" ", 3)
elif metric_type == "memory":
memory_metrics = rest2.split(" ")
total_rss = int(memory_metrics[memory_metrics.index("total_rss") + 1 ])
except:
logging.warning("Skipping metric line: %s", line)
continue
if total_rss is not None:
peak_rss_by_container[container] = max(peak_rss_by_container.get(container, 0),
total_rss)
continue
prev_ts, prev_user, prev_system = prev_by_container.get(
container, (None, None, None))
user_cpu = int(user_cpu_s)
system_cpu = int(system_cpu_s)
if prev_ts is not None:
# Timestamps are seconds since the epoch and are floats.
dt = ts - prev_ts
assert type(dt) == float
if dt != 0:
yield ts, container, (user_cpu - prev_user)/dt/USER_HZ,\
(system_cpu - prev_system)/dt/USER_HZ
prev_by_container[container] = ts, user_cpu, system_cpu
# Now update container totals
for c in self.containers:
if c.id in prev_by_container:
_, u, s = prev_by_container[c.id]
c.total_user_cpu, c.total_system_cpu = u / USER_HZ, s / USER_HZ
if c.id in peak_rss_by_container:
c.peak_total_rss = peak_rss_by_container[c.id]
def create(self, output):
# Read logfiles
timelines = []
for c in self.containers:
if not os.path.exists(c.logfile):
logging.warning("Missing log file: %s", c.logfile)
continue
timelines.append(self.logfile_timeline(c))
# Convert timelines to JSON
min_ts = None
timeline_json = []
for timeline in timelines:
for current_line, next_line in zip(timeline, timeline[1:]):
name, ts_current, msg = current_line
_, ts_next, _ = next_line
timeline_json.append(
[name, msg, ts_current, ts_next]
)
if not timeline_json:
logging.warning("No timeline data; skipping timeline")
return
min_ts = min(x[2] for x in timeline_json)
for row in timeline_json:
row[2] = row[2] - min_ts
row[3] = row[3] - min_ts
# metrics_by_container: container -> [ ts, user, system ]
metrics_by_container = dict()
max_metric_ts = 0
container_by_id = dict()
for c in self.containers:
container_by_id[c.id] = c
for ts, container_id, user, system in self.parse_metrics(file(self.monitor_file)):
container = container_by_id.get(container_id)
if not container:
continue
if ts > max_metric_ts:
max_metric_ts = ts
if ts < min_ts:
# We ignore metrics that show up before the timeline's
# first messages. This largely avoids a bug in the
# Google Charts visualization code wherein one of the series seems
# to wrap around.
continue
metrics_by_container.setdefault(
container.name, []).append((ts - min_ts, user, system))
with file(output, "w") as o:
template_path = os.path.join(os.path.dirname(__file__), "timeline.html.template")
shutil.copyfileobj(file(template_path), o)
o.write("\n<script>\nvar data = \n")
json.dump(dict(buildname=self.buildname, timeline=timeline_json,
metrics=metrics_by_container, max_ts=(max_metric_ts - min_ts)), o, indent=2)
o.write("</script>")
o.close()
|
|
import os.path
from UserDict import DictMixin
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.db.models.fields.files import ImageField, ImageFieldFile
from django.core.files.base import ContentFile
from django.utils.safestring import mark_safe
from django.utils.functional import curry
from django.utils.html import escape
from django.conf import settings
from sorl.thumbnail.base import Thumbnail
from sorl.thumbnail.main import DjangoThumbnail, build_thumbnail_name
from sorl.thumbnail.utils import delete_thumbnails
REQUIRED_ARGS = ('size',)
ALL_ARGS = {
'size': 'requested_size',
'options': 'opts',
'quality': 'quality',
'basedir': 'basedir',
'subdir': 'subdir',
'prefix': 'prefix',
'extension': 'extension',
}
BASE_ARGS = {
'size': 'requested_size',
'options': 'opts',
'quality': 'quality',
}
TAG_HTML = '<img src="%(src)s" width="%(width)s" height="%(height)s" alt="" />'
class ThumbsDict(object, DictMixin):
def __init__(self, descriptor):
super(ThumbsDict, self).__init__()
self.descriptor = descriptor
def keys(self):
return self.descriptor.field.extra_thumbnails.keys()
class LazyThumbs(ThumbsDict):
def __init__(self, *args, **kwargs):
super(LazyThumbs, self).__init__(*args, **kwargs)
self.cached = {}
def __getitem__(self, key):
thumb = self.cached.get(key)
if not thumb:
args = self.descriptor.field.extra_thumbnails[key]
thumb = self.descriptor._build_thumbnail(args)
self.cached[key] = thumb
return thumb
def keys(self):
return self.descriptor.field.extra_thumbnails.keys()
class ThumbTags(ThumbsDict):
def __getitem__(self, key):
thumb = self.descriptor.extra_thumbnails[key]
return self.descriptor._build_thumbnail_tag(thumb)
class BaseThumbnailFieldFile(ImageFieldFile):
def _build_thumbnail(self, args):
# Build the DjangoThumbnail kwargs.
kwargs = {}
for k, v in args.items():
kwargs[ALL_ARGS[k]] = v
# Build the destination filename and return the thumbnail.
name_kwargs = {}
for key in ['size', 'options', 'quality', 'basedir', 'subdir',
'prefix', 'extension']:
name_kwargs[key] = args.get(key)
source = getattr(self.instance, self.field.name)
dest = build_thumbnail_name(source.name, **name_kwargs)
return DjangoThumbnail(source, relative_dest=dest, **kwargs)
def _build_thumbnail_tag(self, thumb):
opts = dict(src=escape(thumb), width=thumb.width(),
height=thumb.height())
return mark_safe(self.field.thumbnail_tag % opts)
def _get_extra_thumbnails(self):
if self.field.extra_thumbnails is None:
return None
if not hasattr(self, '_extra_thumbnails'):
self._extra_thumbnails = LazyThumbs(self)
return self._extra_thumbnails
extra_thumbnails = property(_get_extra_thumbnails)
def _get_extra_thumbnails_tag(self):
if self.field.extra_thumbnails is None:
return None
return ThumbTags(self)
extra_thumbnails_tag = property(_get_extra_thumbnails_tag)
def save(self, *args, **kwargs):
# Optionally generate the thumbnails after the image is saved.
super(BaseThumbnailFieldFile, self).save(*args, **kwargs)
if self.field.generate_on_save:
self.generate_thumbnails()
def delete(self, *args, **kwargs):
# Delete any thumbnails too (and not just ones defined here in case
# the {% thumbnail %} tag was used or the thumbnail sizes changed).
relative_source_path = getattr(self.instance, self.field.name).name
delete_thumbnails(relative_source_path)
super(BaseThumbnailFieldFile, self).delete(*args, **kwargs)
def generate_thumbnails(self):
# Getting the thumbs generates them.
if self.extra_thumbnails:
self.extra_thumbnails.values()
class ImageWithThumbnailsFieldFile(BaseThumbnailFieldFile):
def _get_thumbnail(self):
return self._build_thumbnail(self.field.thumbnail)
thumbnail = property(_get_thumbnail)
def _get_thumbnail_tag(self):
return self._build_thumbnail_tag(self.thumbnail)
thumbnail_tag = property(_get_thumbnail_tag)
def generate_thumbnails(self, *args, **kwargs):
self.thumbnail.generate()
Super = super(ImageWithThumbnailsFieldFile, self)
return Super.generate_thumbnails(*args, **kwargs)
class ThumbnailFieldFile(BaseThumbnailFieldFile):
def save(self, name, content, *args, **kwargs):
new_content = StringIO()
# Build the Thumbnail kwargs.
thumbnail_kwargs = {}
for k, argk in BASE_ARGS.items():
if not k in self.field.thumbnail:
continue
thumbnail_kwargs[argk] = self.field.thumbnail[k]
Thumbnail(source=content, dest=new_content, **thumbnail_kwargs)
new_content = ContentFile(new_content.read())
super(ThumbnailFieldFile, self).save(name, new_content, *args,
**kwargs)
def _get_thumbnail_tag(self):
opts = dict(src=escape(self.url), width=self.width,
height=self.height)
return mark_safe(self.field.thumbnail_tag % opts)
thumbnail_tag = property(_get_thumbnail_tag)
class BaseThumbnailField(ImageField):
def __init__(self, *args, **kwargs):
# The new arguments for this field aren't explicitly defined so that
# users can still use normal ImageField positional arguments.
self.extra_thumbnails = kwargs.pop('extra_thumbnails', None)
self.thumbnail_tag = kwargs.pop('thumbnail_tag', TAG_HTML)
self.generate_on_save = kwargs.pop('generate_on_save', False)
super(BaseThumbnailField, self).__init__(*args, **kwargs)
_verify_thumbnail_attrs(self.thumbnail)
if self.extra_thumbnails:
for extra, attrs in self.extra_thumbnails.items():
name = "%r of 'extra_thumbnails'"
_verify_thumbnail_attrs(attrs, name)
def south_field_triple(self):
"""
Return a suitable description of this field for South.
"""
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.files.ImageField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
class ImageWithThumbnailsField(BaseThumbnailField):
"""
photo = ImageWithThumbnailsField(
upload_to='uploads',
thumbnail={'size': (80, 80), 'options': ('crop', 'upscale'),
'extension': 'png'},
extra_thumbnails={
'admin': {'size': (70, 50), 'options': ('sharpen',)},
}
)
"""
attr_class = ImageWithThumbnailsFieldFile
def __init__(self, *args, **kwargs):
self.thumbnail = kwargs.pop('thumbnail', None)
super(ImageWithThumbnailsField, self).__init__(*args, **kwargs)
class ThumbnailField(BaseThumbnailField):
"""
avatar = ThumbnailField(
upload_to='uploads',
size=(200, 200),
options=('crop',),
extra_thumbnails={
'admin': {'size': (70, 50), 'options': (crop, 'sharpen')},
}
)
"""
attr_class = ThumbnailFieldFile
def __init__(self, *args, **kwargs):
self.thumbnail = {}
for attr in ALL_ARGS:
if attr in kwargs:
self.thumbnail[attr] = kwargs.pop(attr)
super(ThumbnailField, self).__init__(*args, **kwargs)
def _verify_thumbnail_attrs(attrs, name="'thumbnail'"):
for arg in REQUIRED_ARGS:
if arg not in attrs:
raise TypeError('Required attr %r missing in %s arg' % (arg, name))
for attr in attrs:
if attr not in ALL_ARGS:
raise TypeError('Invalid attr %r found in %s arg' % (arg, name))
|
|
import glob
import numpy as np
from string import digits
from scipy.interpolate import pchip, Akima1DInterpolator
from openmdao.main.api import Component, Assembly
from openmdao.lib.datatypes.api import VarTree, Float, Array, Bool, Str, List, Int
from fusedwind.turbine.geometry_vt import BladeSurfaceVT, BladePlanformVT, Curve, AirfoilShape
from fusedwind.turbine.geometry import RedistributedBladePlanform, SplineComponentBase, FFDSplineComponentBase
from fusedwind.turbine.structure_vt import BladeStructureVT3D, CrossSectionStructureVT, BeamStructureVT
from fusedwind.turbine.rotoraero_vt import LoadVectorCaseList
from fusedwind.interface import base, implement_base
from fusedwind.lib.geom_tools import curvature
@base
class BladeStructureReaderBase(Component):
st3d = VarTree(BladeStructureVT3D(), iotype='out',
desc='Vartree containing discrete definition of blade structure')
@base
class BladeStructureWriterBase(Component):
st3d = VarTree(BladeStructureVT3D(), iotype='in',
desc='Vartree containing discrete definition of blade structure')
@base
class ModifyBladeStructureBase(Component):
st3dIn = VarTree(BladeStructureVT3D(), iotype='in',
desc='Vartree containing initial discrete definition of blade structure')
st3dOut = VarTree(BladeStructureVT3D(), iotype='out',
desc='Vartree containing modified discrete definition of blade structure')
@implement_base(BladeStructureReaderBase)
class BladeStructureReader(Component):
"""
input file reader of BladeStructureVT3D data
"""
filebase = Str(iotype='in')
st3d = VarTree(BladeStructureVT3D(), iotype='out',
desc='Vartree containing discrete definition of blade structure')
def execute(self):
self.read_layups()
self.read_materials()
def read_materials(self):
fid = open(self.filebase + '.mat', 'r')
materials = fid.readline().split()[1:]
data = np.loadtxt(fid)
for i, name in enumerate(materials):
mat = self.st3d.add_material(name)
try:
d = data[i, :]
except:
d = data
mat.E1 = d[0]
mat.E2 = d[1]
mat.E3 = d[2]
mat.nu12 = d[3]
mat.nu13 = d[4]
mat.nu23 = d[5]
mat.G12 = d[6]
mat.G13 = d[7]
mat.G23 = d[8]
mat.rho = d[9]
failcrit = {1:'maximum_strain', 2:'maximum_stress', 3:'tsai_wu'}
fid = open(self.filebase + '.failmat', 'r')
materials = fid.readline().split()[1:]
data = np.loadtxt(fid)
for i, name in enumerate(materials):
mat = self.st3d.add_material(name)
try:
d = data[i, :]
except:
d = data
mat.failure_criterium = failcrit[int(d[0])]
mat.s11_t = d[1]
mat.s22_t = d[2]
mat.s33_t = d[3]
mat.s11_c = d[4]
mat.s22_c = d[5]
mat.s33_c = d[6]
mat.t12 = d[7]
mat.t13 = d[8]
mat.t23 = d[9]
mat.e11_c = d[10]
mat.e22_c = d[11]
mat.e33_c = d[12]
mat.e11_t = d[13]
mat.e22_t = d[14]
mat.e33_t = d[15]
mat.g12 = d[16]
mat.g13 = d[17]
mat.g23 = d[18]
mat.gM0 = d[19]
mat.C1a = d[20]
mat.C2a = d[21]
mat.C3a = d[22]
mat.C4a = d[23]
def read_layups(self):
"""
dp3d data format:
# web00 web01\n
<DP index0> <DP index1>\n
<DP index0> <DP index0>\n
# s DP00 DP01 DP02 DP03 DP04 DP05\n
<float> <float> <float> <float> ... <float>\n
. . . . .\n
. . . . .\n
. . . . .\n
st3d data format:\n
# region00\n
# s triax uniax core uniax01 triax01 core01\n
<float> <float> <float> <float> <float> <float> <float>\n
. . . . . . .\n
. . . . . . .\n
. . . . . . .\n
"""
self.dp_files = glob.glob(self.filebase + '.dp3d')
self.layup_files = glob.glob(self.filebase + '.st3d')
for dpfile in self.dp_files:
self._logger.info('reading dp_file: %s' % dpfile)
dpfid = open(dpfile, 'r')
# read webs
wnames = dpfid.readline().split()[1:]
iwebs = []
for w, wname in enumerate(wnames):
line = dpfid.readline().split()[1:]
line = [int(entry) for entry in line]
iwebs.append(line)
nwebs = len(iwebs)
header = dpfid.readline()
dpdata = np.loadtxt(dpfile)
nreg = dpdata.shape[1] - 2
try:
regions = header.split()[1:]
assert len(regions) == nreg
except:
regions = ['region%02d' % i for i in range(nreg)]
self.st3d.configure_regions(nreg, names=regions)
self.st3d.configure_webs(len(wnames), iwebs, names=wnames)
self.st3d.x = dpdata[:, 0]
self.st3d.DP00 = dpdata[:, 1]
for i, rname in enumerate(regions):
r = getattr(self.st3d, rname)
self._logger.info(' adding region: %s' % rname)
dpname = 'DP%02d' % (i + 1)
setattr(self.st3d, dpname, dpdata[:, i + 2])
layup_file = '_'.join([self.filebase, rname]) + '.st3d'
self._logger.info(' reading layup file %s' % layup_file)
fid = open(layup_file, 'r')
rrname = fid.readline().split()[1]
lheader = fid.readline().split()[1:]
cldata = np.loadtxt(fid)
layers = lheader[1:]
nl = len(lheader)
s = cldata[:, 0]
r.thickness = np.zeros(self.st3d.x.shape[0])
DP0 = getattr(self.st3d, 'DP%02d' % i)
DP1 = getattr(self.st3d, 'DP%02d' % (i + 1))
r.width = DP1 - DP0
for il, lname in enumerate(layers):
self._logger.info(' adding layer %s' % lname)
l = r.add_layer(lname)
l.thickness = cldata[:, il + 1]
r.thickness += l.thickness
try:
l.angle = cldata[:, il + 1 + nl]
except:
l.angle = np.zeros(s.shape[0])
for i, rname in enumerate(wnames):
r = getattr(self.st3d, rname)
self._logger.info(' adding web: %s' % rname)
layup_file = '_'.join([self.filebase, rname]) + '.st3d'
self._logger.info(' reading layup file %s' % layup_file)
fid = open(layup_file, 'r')
rrname = fid.readline().split()[1]
lheader = fid.readline().split()[1:]
cldata = np.loadtxt(fid)
layers = lheader[1:]
nl = len(lheader)
r.thickness = np.zeros(self.st3d.x.shape[0])
r.width = np.zeros(self.st3d.x.shape[0])
# assert len(lheader) == cldata.shape[1]
s = cldata[:, 0]
for il, lname in enumerate(layers):
self._logger.info(' adding layer %s' % lname)
l = r.add_layer(lname)
l.thickness = cldata[:, il + 1]
r.thickness += l.thickness
try:
l.angle = cldata[:, il + 1 + nl]
except:
l.angle = np.zeros(s.shape[0])
@implement_base(BladeStructureWriterBase)
class BladeStructureWriter(Component):
"""
input file writer of BladeStructureVT3D data
"""
filebase = Str('blade', iotype='in')
st3d = VarTree(BladeStructureVT3D(), iotype='in')
def execute(self):
try:
if '-fd' in self.itername or '-fd' in self.parent.itername:
return
else:
self.fbase = self.filebase + '_' + str(self.exec_count)
except:
self.fbase = self.filebase
self.write_layup_data()
self.write_materials()
def write_materials(self):
fid = open(self.fbase + '.mat', 'w')
fid.write('# %s\n' % (' '.join(self.st3d.materials.keys())))
fid.write('# E1 E2 E3 nu12 nu13 nu23 G12 G13 G23 rho\n')
matdata = []
for name, mat in self.st3d.materials.iteritems():
data = np.array([mat.E1,
mat.E2,
mat.E3,
mat.nu12,
mat.nu13,
mat.nu23,
mat.G12,
mat.G13,
mat.G23,
mat.rho])
matdata.append(data)
np.savetxt(fid, np.asarray(matdata))
failcrit = dict(maximum_strain=1, maximum_stress=2, tsai_wu=3)
fid = open(self.fbase + '.failmat', 'w')
fid.write('# %s\n' % (' '.join(self.st3d.materials.keys())))
fid.write('# failcrit s11_t s22_t s33_t s11_c s22_c s33_c'
't12 t13 t23 e11_c e22_c e33_c e11_t e22_t e33_t g12 g13 g23'
'gM0 C1a C2a C3a C4a\n')
matdata = []
for name, mat in self.st3d.materials.iteritems():
data = np.array([failcrit[mat.failure_criterium],
mat.s11_t,
mat.s22_t,
mat.s33_t,
mat.s11_c,
mat.s22_c,
mat.s33_c,
mat.t12,
mat.t13,
mat.t23,
mat.e11_c,
mat.e22_c,
mat.e33_c,
mat.e11_t,
mat.e22_t,
mat.e33_t,
mat.g12,
mat.g13,
mat.g23,
mat.gM0,
mat.C1a,
mat.C2a,
mat.C3a,
mat.C4a])
matdata.append(data)
fmt = '%i ' + ' '.join(23*['%.20e'])
np.savetxt(fid, np.asarray(matdata), fmt=fmt)
def write_layup_data(self):
DPs = []
fid1 = open(self.fbase + '.dp3d', 'w')
fid1.write('# %s\n' % (' '.join(self.st3d.webs)))
for web in self.st3d.iwebs:
fid1.write('# %i %i\n' % (web[0], web[1]))
fid1.write('# s %s\n' % (' '.join(self.st3d.DPs)))
DPs.append(self.st3d.x)
for i, rname in enumerate(self.st3d.regions):
self._logger.info(' writing region: %s' % rname)
reg = getattr(self.st3d, rname)
DP = getattr(self.st3d, 'DP%02d' % (i))
DPs.append(DP)
fname = '_'.join([self.fbase, rname]) + '.st3d'
fid = open(fname, 'w')
lnames = ' '.join(reg.layers)
fid.write('# %s\n' % rname)
fid.write('# s %s\n' % lnames)
data = []
data.append(self.st3d.x)
for lname in reg.layers:
self._logger.info(' writing layer: %s' % lname)
layer = getattr(reg, lname)
data.append(layer.thickness)
for lname in reg.layers:
self._logger.info(' writing layer: %s' % lname)
layer = getattr(reg, lname)
data.append(layer.angle)
data = np.asarray(data).T
np.savetxt(fid, data)
fid.close()
DPs.append(getattr(self.st3d, 'DP%02d' % (i + 1)))
DPs = np.asarray(DPs).T
np.savetxt(fid1, DPs)
for i, wname in enumerate(self.st3d.webs):
self._logger.info(' writing web: %s' % rname)
reg = getattr(self.st3d, wname)
fname = '_'.join([self.fbase, wname]) + '.st3d'
fid = open(fname, 'w')
lnames = ' '.join(reg.layers)
fid.write('# %s\n' % wname)
fid.write('# s %s\n' % lnames)
data = []
data.append(self.st3d.x)
for lname in reg.layers:
self._logger.info(' writing layer: %s' % lname)
layer = getattr(reg, lname)
data.append(layer.thickness)
for lname in reg.layers:
self._logger.info(' writing layer: %s' % lname)
layer = getattr(reg, lname)
data.append(layer.angle)
data = np.asarray(data).T
np.savetxt(fid, data)
fid.close()
@base
class BeamStructureReaderBase(Component):
beam_structure = VarTree(BeamStructureVT(), iotype='out')
@base
class BeamStructureWriterBase(Component):
beam_structure = VarTree(BeamStructureVT(), iotype='in')
@implement_base(BeamStructureReaderBase)
class BeamStructureReader(Component):
"""
Default reader for a beam structure file.
"""
filename = Str(iotype='in')
beam_structure = VarTree(BeamStructureVT(), iotype='out')
def execute(self):
"""
The format of the file should be:
main_s[0] dm[1] x_cg[2] y_cg[3] ri_x[4] ri_y[5] x_sh[6] y_sh[7] E[8] ...\n
G[9] I_x[10] I_y[11] K[12] k_x[13] k_y[14] A[15] pitch[16] x_e[17] y_e[18]
Sub-classes can overwrite this function to change the reader's behaviour.
"""
print 'reading blade structure'
if self.filename is not '':
try:
st_data = np.loadtxt(self.filename)
except:
raise RuntimeError('Error reading file %s, %s'% (self.st_filename))
if st_data.shape[1] < 19:
raise RuntimeError('Blade planform data: expected dim = 19, got dim = %i,%s'%(st_data.shape[1]))
self.beam_structure.s = st_data[:, 0]
self.beam_structure.dm = st_data[:, 1]
self.beam_structure.x_cg = st_data[:, 2]
self.beam_structure.y_cg = st_data[:, 3]
self.beam_structure.ri_x = st_data[:, 4]
self.beam_structure.ri_y = st_data[:, 5]
self.beam_structure.x_sh = st_data[:, 6]
self.beam_structure.y_sh = st_data[:, 7]
self.beam_structure.E = st_data[:, 8]
self.beam_structure.G = st_data[:, 9]
self.beam_structure.I_x = st_data[:, 10]
self.beam_structure.I_y = st_data[:, 11]
self.beam_structure.J = st_data[:, 12]
self.beam_structure.k_x = st_data[:, 13]
self.beam_structure.k_y = st_data[:, 14]
self.beam_structure.A = st_data[:, 15]
self.beam_structure.pitch = st_data[:, 16]
self.beam_structure.x_e = st_data[:, 17]
self.beam_structure.y_e = st_data[:, 18]
@implement_base(BeamStructureWriterBase)
class BeamStructureWriter(Component):
"""
Default writer for a beam structure file.
"""
filename = Str(iotype='in')
beam_structure = VarTree(BeamStructureVT(), iotype='in',
desc='Vartree containing beam definition of blade structure')
def execute(self):
fid = open(self.filename, 'w')
# generate header
header = ['r', 'm', 'x_cg', 'y_cg', 'ri_x', 'ri_y', 'x_sh', 'y_sh', 'E',
'G', 'I_x', 'I_y', 'J', 'k_x', 'k_y', 'A', 'pitch', 'x_e', 'y_e']
exp_prec = 10 # exponential precesion
col_width = exp_prec + 8 # column width required for exp precision
header_full = '# ' + ''.join([(hh + ' [%i]').center(col_width+1)%i for i, hh in enumerate(header)])+'\n'
fid.write(header_full)
# convert to array
st = self.beam_structure
data = np.array([st.s,
st.dm,
st.x_cg,
st.y_cg,
st.ri_x,
st.ri_y,
st.x_sh,
st.y_sh,
st.E,
st.G,
st.I_x,
st.I_y,
st.J,
st.k_x,
st.k_y,
st.A,
st.pitch,
st.x_e,
st.y_e]).T
np.savetxt(fid, data, fmt='%'+' %i.%ie' % (col_width, exp_prec) )
fid.close()
@implement_base(ModifyBladeStructureBase)
class SplinedBladeStructure(Assembly):
"""
Class for building a complete spline parameterized
representation of the blade structure.
Outputs a BladeStructureVT3D vartree with a discrete
representation of the structural geometry.
Interface with a BladeStructureBuilder class for generating code specific
inputs.
"""
x = Array(iotype='in', desc='spanwise resolution of blade')
span_ni = Int(20, iotype='in', desc='Number of discrete points along span')
nC = Int(8, iotype='in', desc='Number of spline control points along span')
Cx = Array(iotype='in', desc='spanwise distribution of spline control points')
st3dIn = VarTree(BladeStructureVT3D(), iotype='in',
desc='Vartree containing initial discrete definition of blade structure')
st3dOut = VarTree(BladeStructureVT3D(), iotype='out',
desc='Vartree containing re-splined discrete definition of blade structure')
def __init__(self):
"""
initialize the blade structure
parameters
-----------
nsec: int
total number of sections in blade
"""
super(SplinedBladeStructure, self).__init__()
self._nsec = 0
self.add('pf', RedistributedBladePlanform())
self.driver.workflow.add('pf')
self.create_passthrough('pf.pfIn')
self.create_passthrough('pf.pfOut')
self.connect('x', 'pf.x')
def configure_bladestructure(self, spline_type='pchip'):
"""
method for trawling through the st3dIn vartree
and initializing all spline curves in the assembly
"""
if self.x.shape[0] == 0:
self.x = np.linspace(0, 1, self.span_ni)
else:
self.span_ni = self.x.shape[0]
if self.Cx.shape[0] == 0:
self.Cx = np.linspace(0, 1, self.nC)
else:
self.nC = self.Cx.shape[0]
self.st3dOut = self.st3dIn.copy()
self.connect('x', 'st3dOut.x')
sec = self.st3dIn
nr = len(sec.regions)
for ip in range(nr + 1):
dpname = 'DP%02d' % ip
# division point spline
DPc = self.add(dpname, FFDSplineComponentBase(self.nC))
self.driver.workflow.add(dpname)
# DPc.log_level = logging.DEBUG
DPc.set_spline(spline_type)
x = getattr(sec, 'x')
DP = getattr(sec, dpname)
self.connect('x', '%s.x' % dpname)
self.connect('Cx', dpname + '.Cx')
DPc.xinit = x
DPc.Pinit = DP
self.connect(dpname + '.P', '.'.join(['st3dOut', dpname]))
self.create_passthrough(dpname + '.C', alias=dpname + '_C')
# regions
if ip < nr:
rname = 'region%02d' % ip
region = getattr(sec, rname)
for lname in region.layers:
layer = getattr(region, lname)
lcname = 'r%02d%s' % (ip, lname)
# thickness spline
lcomp = self.add(lcname+'T', FFDSplineComponentBase(self.nC))
self.driver.workflow.add(lcname+'T')
# lcomp.log_level = logging.DEBUG
lcomp.set_spline(spline_type)
self.connect('x', '%s.x' % (lcname + 'T'))
lcomp.xinit = sec.x
lcomp.Pinit = layer.thickness
self.connect('Cx', lcname+'T' + '.Cx')
self.connect('%sT.P'%lcname, '.'.join(['st3dOut', rname, lname, 'thickness']))
# angle spline
lcomp = self.add(lcname+'A', FFDSplineComponentBase(self.nC))
self.driver.workflow.add(lcname+'A')
# lcomp.log_level = logging.DEBUG
lcomp.set_spline(spline_type)
self.connect('x', '%s.x' % (lcname + 'A'))
self.create_passthrough(lcname+'T' + '.C', alias=lcname+'T' + '_C')
lcomp.xinit = sec.x
lcomp.Pinit = layer.angle
self.connect('Cx', lcname+'A' + '.Cx')
self.connect('%sA.P'%lcname, '.'.join(['st3dOut', rname, lname, 'angle']))
self.create_passthrough(lcname+'A' + '.C', alias=lcname+'A' + '_C')
# shear webs
for wname in sec.webs:
web = getattr(sec, wname)
for lname in web.layers:
layer = getattr(web, lname)
lcname = '%s%s' % (wname, lname)
# thickness spline
lcomp = self.add(lcname+'T', FFDSplineComponentBase(self.nC))
# lcomp.log_level = logging.DEBUG
self.driver.workflow.add(lcname+'T')
lcomp.set_spline(spline_type)
self.connect('x', '%s.x' % (lcname + 'T'))
lcomp.xinit = sec.x
lcomp.Pinit = layer.thickness
self.connect('Cx', lcname+'T' + '.Cx')
self.connect('%sT.P'%lcname, '.'.join(['st3dOut', wname, lname, 'thickness']))
self.create_passthrough(lcname+'T' + '.C', alias=lcname+'T' + '_C')
# angle spline
lcomp = self.add(lcname+'A', FFDSplineComponentBase(self.nC))
# lcomp.log_level = logging.DEBUG
self.driver.workflow.add(lcname+'A')
lcomp.set_spline(spline_type)
self.connect('x', '%s.x' % (lcname + 'A'))
lcomp.xinit = sec.x
lcomp.Pinit = layer.angle
self.connect('Cx', lcname+'A' + '.Cx')
self.connect('%sA.P'%lcname, '.'.join(['st3dOut', wname, lname, 'angle']))
self.create_passthrough(lcname+'A' + '.C', alias=lcname+'A' + '_C')
# copy materials to output VT
self.st3dOut.materials = self.st3dIn.materials.copy()
def _post_execute(self):
"""
update all thicknesses and region widths
"""
super(SplinedBladeStructure, self)._post_execute()
for i, rname in enumerate(self.st3dOut.regions):
region = getattr(self.st3dOut, rname)
DP0 = getattr(self.st3dOut, 'DP%02d' % i)
DP1 = getattr(self.st3dOut, 'DP%02d' % (i + 1))
width = DP1 - DP0
for ix in range(width.shape[0]):
if width[ix] < 0.:
DPt = DP0[ix]
DP0[ix] = DP1[ix]
DP1[ix] = DPt
width[ix] *= -1.
self._logger.warning('switching DPs %i %i for section %i' %
(i, i + 1, ix))
region.width = width * self.pf.pfOut.chord * self.pfOut.blade_length
region.thickness = np.zeros(self.st3dOut.x.shape)
for layer in region.layers:
region.thickness += np.maximum(0., getattr(region, layer).thickness)
for i, rname in enumerate(self.st3dOut.webs):
region = getattr(self.st3dOut, rname)
region.thickness = np.zeros(self.st3dOut.x.shape)
for layer in region.layers:
region.thickness += np.maximum(0., getattr(region, layer).thickness)
class BladeStructureProperties(Component):
surface = VarTree(BladeSurfaceVT(), iotype='in', desc='Stacked blade surface object')
pf = VarTree(BladePlanformVT(), iotype='in', desc='planform')
st3d = VarTree(BladeStructureVT3D(), iotype='in', desc='Blade structure definition')
cap_ids = List([[0,0], [0,0]], iotype='in', desc='indices of cap DPs'
'[[capL0, capL1], [capU0, capU1]]')
pacc_u = Array(iotype='out', desc='upper side pitch axis aft cap center')
pacc_l = Array(iotype='out', desc='lower side pitch axis aft cap center')
pacc_u_curv = Array(iotype='out', desc='upper side pitch axis aft cap center curvature')
pacc_l_curv = Array(iotype='out', desc='lower side pitch axis aft cap center curvature')
def __init__(self, nW=2):
super(BladeStructureProperties, self).__init__()
for w in range(nW):
self.add('alphaW%i' % w, Array(iotype='out', desc='Web%02d angle' % w))
self.add('dW%i' % w, Array(iotype='out', desc='Web%02d offset' % w))
def execute(self):
self.dp_curves = []
self.scurves = []
ni = self.pf.chord.shape[0]
nDP = len(self.st3d.DPs)
for i in range(nDP):
name = 'DP%02d' % i
c = getattr(self.st3d, name)
self.scurves.append(Akima1DInterpolator(self.st3d.x, c))
dp = np.zeros([self.surface.surface.shape[1], self.surface.surface.shape[2]])
self.dp_curves.append(dp)
for i in range(self.surface.surface.shape[1]):
x = self.surface.surface[:, i, :]
span = self.pf.s[i]
af = AirfoilShape(points=x)
for j in range(nDP):
s_chord = self.scurves[j](span)
xx = af.interp_s(af.s_to_01(s_chord))
self.dp_curves[j][i, :] = xx
self.pacc_l = self.dp_curves[self.cap_ids[0][0]].copy()
self.pacc_u = self.dp_curves[self.cap_ids[1][0]].copy()
self.pacc_l[:, [0, 1]] = (self.dp_curves[self.cap_ids[0][0]][:, [0,1]] + \
self.dp_curves[self.cap_ids[0][1]][:, [0,1]]) / 2.
self.pacc_u[:, [0, 1]] = (self.dp_curves[self.cap_ids[1][0]][:, [0,1]] + \
self.dp_curves[self.cap_ids[1][1]][:, [0,1]]) / 2.
self.pacc_l_curv = np.zeros((ni, 2))
self.pacc_u_curv = np.zeros((ni, 2))
self.pacc_l_curv[:, 0] = self.pacc_l[:, 2]
self.pacc_u_curv[:, 0] = self.pacc_u[:, 2]
self.pacc_l_curv[:, 1] = curvature(self.pacc_l)
self.pacc_u_curv[:, 1] = curvature(self.pacc_u)
self.dW0 = self.dp_curves[self.cap_ids[0][0]].copy()
self.dW1 = self.dp_curves[self.cap_ids[0][1]].copy()
self.dW0[:, [0, 1]] = self.dp_curves[self.cap_ids[0][0]][:, [0,1]] -\
self.dp_curves[self.cap_ids[1][0]][:, [0,1]]
self.dW1[:, [0, 1]] = self.dp_curves[self.cap_ids[0][1]][:, [0,1]] -\
self.dp_curves[self.cap_ids[1][1]][:, [0,1]]
self.alphaW0 = np.array([np.arctan(a) for a in self.dW0[:, 0]/self.dW0[:, 1]]) * 180. / np.pi
self.alphaW1 = np.array([np.arctan(a) for a in self.dW1[:, 0]/self.dW1[:, 1]]) * 180. / np.pi
@base
class BladeStructureBuilderBase(Component):
"""
base class for components that can interpret the BladeStructure3DVT
vartree and generate input for specific types of codes.
"""
surface = VarTree(BladeSurfaceVT(), iotype='in', desc='Stacked blade surface object')
st3d = VarTree(BladeStructureVT3D(), iotype='in', desc='Blade structure definition')
def execute(self):
raise NotImplementedError('%s.execute needs to be overwritten by derived classes' % self.get_pathname())
def get_material(self, name):
"""
retrieve a material by its name
parameters
----------
name: string
name of material
returns
-------
mat: object
MaterialProps VariableTree object
"""
# strip integers from name to be safe
st = ''.join(i for i in name if i.isalpha())
try:
return self.st3d.materials[st]
except:
return None
@implement_base(BladeStructureBuilderBase)
class BladeStructureCSBuilder(BladeStructureBuilderBase):
"""
Class that generates a series of 2D cross-sectional property
vartrees (CrossSectionStructureVT) used by structural codes like BECAS
"""
blade_length = Float(1., iotype='in')
surface = VarTree(BladeSurfaceVT(), iotype='in', desc='Stacked blade surface object')
st3d = VarTree(BladeStructureVT3D(), iotype='in', desc='Blade structure definition')
cs2d = List(iotype='out', desc='List of cross-sectional properties'
'vartrees')
def execute(self):
"""
generate cross sections at every spanwise node of the st3d vartree
"""
# clear list of outputs!
self.cs2d = []
ni = self.st3d.x.shape[0]
for i in range(ni):
x = self.st3d.x[i]
# print 'adding section at r/R = %2.2f' % x
st2d = CrossSectionStructureVT()
st2d.s = x * self.blade_length
st2d.DPs = []
try:
airfoil = self.surface.interpolate_profile(x)[:, [0, 1]] * self.blade_length
st2d.airfoil.initialize(airfoil)
except:
pass
for ir, rname in enumerate(self.st3d.regions):
reg = getattr(self.st3d, rname)
if reg.thickness[i] < 1.e-5:
print 'zero thickness region!', rname
continue
DP0 = getattr(self.st3d, 'DP%02d' % ir)
DP1 = getattr(self.st3d, 'DP%02d' % (ir + 1))
r = st2d.add_region(rname.upper())
st2d.DPs.append(DP0[i])
r.s0 = DP0[i]
r.s1 = DP1[i]
r.thickness = reg.thickness[i]
for lname in reg.layers:
lay = getattr(reg, lname)
if lay.thickness[i] > 0.:
l = r.add_layer(lname)
# try:
lnamebase = lname.translate(None, digits)
st2d.add_material(lnamebase, self.get_material(lname).copy())
# except:
# raise RuntimeError('Material %s not in materials list' % lname)
l.materialname = lnamebase
l.thickness = max(0., lay.thickness[i])
try:
l.angle = lay.angle[i]
except:
l.angle = 0.
st2d.DPs.append(DP1[i])
for ir, rname in enumerate(self.st3d.webs):
reg = getattr(self.st3d, rname)
if reg.thickness[i] < 1.e-5:
continue
r = st2d.add_web(rname.upper())
try:
DP0 = getattr(self.st3d, 'DP%02d' % self.st3d.iwebs[ir][0])
except:
DP0 = getattr(self.st3d, 'DP%02d' % (len(self.st3d.regions) + self.st3d.iwebs[ir][0] + 1))
try:
DP1 = getattr(self.st3d, 'DP%02d' % self.st3d.iwebs[ir][1])
except:
DP1 = getattr(self.st3d, 'DP%02d' % (len(self.st3d.regions) + self.st3d.iwebs[ir][1] + 1))
r.s0 = DP0[i]
r.s1 = DP1[i]
r.thickness = reg.thickness[i]
for lname in reg.layers:
lay = getattr(reg, lname)
if lay.thickness[i] > 1.e-5:
l = r.add_layer(lname)
try:
lnamebase = lname.translate(None, digits)
st2d.add_material(lnamebase, self.get_material(lname).copy())
except:
raise RuntimeError('Material %s not in materials list' % lname)
l.materialname = lnamebase
l.thickness = max(0., lay.thickness[i])
try:
l.angle = lay.angle[i]
except:
l.angle = 0.
self.cs2d.append(st2d)
@base
class BeamStructureCSCode(Component):
"""
Base class for computing beam structural properties using a cross-sectional
code such as PreComp, BECAS or VABS.
The analysis assumes that the list of CrossSectionStructureVT's and the
BladePlanformVT are interpolated onto the structural grid, and that
the code itself is responsible for the meshing of the cross sections.
"""
cs2d = List(CrossSectionStructureVT, iotype='in', desc='Blade cross sectional structure geometry')
pf = VarTree(BladePlanformVT(), iotype='in', desc='Blade planform discretized according to'
'the structural resolution')
beam_structure = VarTree(BeamStructureVT(), iotype='out', desc='Structural beam properties')
@base
class StressRecoveryCSCode(Component):
"""
Base class for performing cross sectional failure analysis
using codes like BECAS and VABS.
This analysis will typically be in a workflow preceeded by
a call to a BeamStructureCSCode. It is assumed that the list of
LoadVectorCaseList vartrees are interpolated onto the structural grid.
Note that the failure criterium and material safety factors are specified
for each individual material in the MaterialProps variable tree.
"""
load_cases = List(LoadVectorCaseList, iotype='in',
desc='List of lists of section load vectors for each radial section'
'used to perform failure analysis')
failure = Array(iotype='out', desc='Failure parameter. Shape: ((len(load_cases), n_radial_sections))')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.