gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from __future__ import unicode_literals
import json
from moto.core.responses import BaseResponse
from .models import glue_backend
from .exceptions import (
PartitionAlreadyExistsException,
PartitionNotFoundException,
TableNotFoundException
)
class GlueResponse(BaseResponse):
@property
def glue_backend(self):
return glue_backend
@property
def parameters(self):
return json.loads(self.body)
def create_database(self):
database_name = self.parameters['DatabaseInput']['Name']
self.glue_backend.create_database(database_name)
return ""
def get_database(self):
database_name = self.parameters.get('Name')
database = self.glue_backend.get_database(database_name)
return json.dumps({'Database': {'Name': database.name}})
def create_table(self):
database_name = self.parameters.get('DatabaseName')
table_input = self.parameters.get('TableInput')
table_name = table_input.get('Name')
self.glue_backend.create_table(database_name, table_name, table_input)
return ""
def get_table(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('Name')
table = self.glue_backend.get_table(database_name, table_name)
return json.dumps({'Table': table.as_dict()})
def update_table(self):
database_name = self.parameters.get('DatabaseName')
table_input = self.parameters.get('TableInput')
table_name = table_input.get('Name')
table = self.glue_backend.get_table(database_name, table_name)
table.update(table_input)
return ""
def get_table_versions(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
table = self.glue_backend.get_table(database_name, table_name)
return json.dumps({
"TableVersions": [
{
"Table": table.as_dict(version=n),
"VersionId": str(n + 1),
} for n in range(len(table.versions))
],
})
def get_table_version(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
table = self.glue_backend.get_table(database_name, table_name)
ver_id = self.parameters.get('VersionId')
return json.dumps({
"TableVersion": {
"Table": table.as_dict(version=ver_id),
"VersionId": ver_id,
},
})
def get_tables(self):
database_name = self.parameters.get('DatabaseName')
tables = self.glue_backend.get_tables(database_name)
return json.dumps({
'TableList': [
table.as_dict() for table in tables
]
})
def delete_table(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('Name')
resp = self.glue_backend.delete_table(database_name, table_name)
return json.dumps(resp)
def batch_delete_table(self):
database_name = self.parameters.get('DatabaseName')
errors = []
for table_name in self.parameters.get('TablesToDelete'):
try:
self.glue_backend.delete_table(database_name, table_name)
except TableNotFoundException:
errors.append({
"TableName": table_name,
"ErrorDetail": {
"ErrorCode": "EntityNotFoundException",
"ErrorMessage": "Table not found"
}
})
out = {}
if errors:
out["Errors"] = errors
return json.dumps(out)
def get_partitions(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
if 'Expression' in self.parameters:
raise NotImplementedError("Expression filtering in get_partitions is not implemented in moto")
table = self.glue_backend.get_table(database_name, table_name)
return json.dumps({
'Partitions': [
p.as_dict() for p in table.get_partitions()
]
})
def get_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
values = self.parameters.get('PartitionValues')
table = self.glue_backend.get_table(database_name, table_name)
p = table.get_partition(values)
return json.dumps({'Partition': p.as_dict()})
def batch_get_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
partitions_to_get = self.parameters.get('PartitionsToGet')
table = self.glue_backend.get_table(database_name, table_name)
partitions = []
for values in partitions_to_get:
try:
p = table.get_partition(values=values["Values"])
partitions.append(p.as_dict())
except PartitionNotFoundException:
continue
return json.dumps({'Partitions': partitions})
def create_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
part_input = self.parameters.get('PartitionInput')
table = self.glue_backend.get_table(database_name, table_name)
table.create_partition(part_input)
return ""
def batch_create_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
table = self.glue_backend.get_table(database_name, table_name)
errors_output = []
for part_input in self.parameters.get('PartitionInputList'):
try:
table.create_partition(part_input)
except PartitionAlreadyExistsException:
errors_output.append({
'PartitionValues': part_input['Values'],
'ErrorDetail': {
'ErrorCode': 'AlreadyExistsException',
'ErrorMessage': 'Partition already exists.'
}
})
out = {}
if errors_output:
out["Errors"] = errors_output
return json.dumps(out)
def update_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
part_input = self.parameters.get('PartitionInput')
part_to_update = self.parameters.get('PartitionValueList')
table = self.glue_backend.get_table(database_name, table_name)
table.update_partition(part_to_update, part_input)
return ""
def delete_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
part_to_delete = self.parameters.get('PartitionValues')
table = self.glue_backend.get_table(database_name, table_name)
table.delete_partition(part_to_delete)
return ""
def batch_delete_partition(self):
database_name = self.parameters.get('DatabaseName')
table_name = self.parameters.get('TableName')
table = self.glue_backend.get_table(database_name, table_name)
errors_output = []
for part_input in self.parameters.get('PartitionsToDelete'):
values = part_input.get('Values')
try:
table.delete_partition(values)
except PartitionNotFoundException:
errors_output.append({
'PartitionValues': values,
'ErrorDetail': {
'ErrorCode': 'EntityNotFoundException',
'ErrorMessage': 'Partition not found',
}
})
out = {}
if errors_output:
out['Errors'] = errors_output
return json.dumps(out)
| |
# trace-test.py -- Python harness for JavaScript trace tests.
import datetime, os, re, sys, traceback
import subprocess
from subprocess import *
DEBUGGER_INFO = {
"gdb": {
"interactive": True,
"args": "-q --args"
},
"valgrind": {
"interactive": False,
"args": "--leak-check=full"
}
}
# Backported from Python 3.1 posixpath.py
def _relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if start is None:
start = os.curdir
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.curdir
return os.path.join(*rel_list)
os.path.relpath = _relpath
class Test:
def __init__(self, path, slow, allow_oom, tmflags, error, valgrind):
""" path path to test file
slow True means the test is slow-running
allow_oom True means OOM should not be considered a failure
valgrind True means test should run under valgrind """
self.path = path
self.slow = slow
self.allow_oom = allow_oom
self.tmflags = tmflags
self.error = error
self.valgrind = valgrind
COOKIE = '|trace-test|'
@classmethod
def from_file(cls, path, options):
slow = allow_oom = valgrind = False
error = tmflags = ''
line = open(path).readline()
i = line.find(cls.COOKIE)
if i != -1:
meta = line[i + len(cls.COOKIE):].strip('\n')
parts = meta.split(';')
for part in parts:
part = part.strip()
if not part:
continue
name, _, value = part.partition(':')
if value:
value = value.strip()
if name == 'TMFLAGS':
tmflags = value
elif name == 'error':
error = value
else:
print('warning: unrecognized |trace-test| attribute %s'%part)
else:
if name == 'slow':
slow = True
elif name == 'allow-oom':
allow_oom = True
elif name == 'valgrind':
valgrind = options.valgrind
else:
print('warning: unrecognized |trace-test| attribute %s'%part)
return cls(path, slow, allow_oom, tmflags, error, valgrind or options.valgrind_all)
def find_tests(dir, substring = None):
ans = []
for dirpath, dirnames, filenames in os.walk(dir):
dirnames.sort()
filenames.sort()
if dirpath == '.':
continue
for filename in filenames:
if not filename.endswith('.js'):
continue
if filename in ('shell.js', 'browser.js', 'jsref.js'):
continue
test = os.path.join(dirpath, filename)
if substring is None or substring in os.path.relpath(test, dir):
ans.append(test)
return ans
def get_test_cmd(path, lib_dir):
libdir_var = lib_dir
if not libdir_var.endswith('/'):
libdir_var += '/'
expr = "const platform=%r; const libdir=%r;"%(sys.platform, libdir_var)
return [ JS, '-j', '-e', expr, '-f', os.path.join(lib_dir, 'prolog.js'),
'-f', path ]
def run_test(test, lib_dir):
if test.tmflags:
env = os.environ.copy()
env['TMFLAGS'] = test.tmflags
else:
env = None
cmd = get_test_cmd(test.path, lib_dir)
if (test.valgrind and
any([os.path.exists(os.path.join(d, 'valgrind'))
for d in os.environ['PATH'].split(os.pathsep)])):
valgrind_prefix = [ 'valgrind',
'-q',
'--smc-check=all',
'--error-exitcode=1',
'--leak-check=full']
if os.uname()[0] == 'Darwin':
valgrind_prefix += ['--dsymutil=yes']
cmd = valgrind_prefix + cmd
if OPTIONS.show_cmd:
print(subprocess.list2cmdline(cmd))
# close_fds is not supported on Windows and will cause a ValueError.
close_fds = sys.platform != 'win32'
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=close_fds, env=env)
out, err = p.communicate()
out, err = out.decode(), err.decode()
if OPTIONS.show_output:
sys.stdout.write(out)
sys.stdout.write(err)
if test.valgrind:
sys.stdout.write(err)
return (check_output(out, err, p.returncode, test.allow_oom, test.error),
out, err)
def check_output(out, err, rc, allow_oom, expectedError):
if expectedError:
return expectedError in err
for line in out.split('\n'):
if line.startswith('Trace stats check failed'):
return False
for line in err.split('\n'):
if 'Assertion failed:' in line:
return False
if rc != 0:
# Allow a non-zero exit code if we want to allow OOM, but only if we
# actually got OOM.
return allow_oom and ': out of memory' in err
return True
def run_tests(tests, test_dir, lib_dir):
pb = None
if not OPTIONS.hide_progress and not OPTIONS.show_cmd:
try:
from progressbar import ProgressBar
pb = ProgressBar('', len(tests), 13)
except ImportError:
pass
failures = []
complete = False
doing = 'before starting'
try:
for i, test in enumerate(tests):
doing = 'on %s'%test.path
ok, out, err = run_test(test, lib_dir)
doing = 'after %s'%test.path
if not ok:
failures.append(test.path)
if OPTIONS.tinderbox:
if ok:
print('TEST-PASS | trace-test.py | %s'%test.path)
else:
lines = [ _ for _ in out.split('\n') + err.split('\n')
if _ != '' ]
if len(lines) >= 1:
msg = lines[-1]
else:
msg = ''
print('TEST-UNEXPECTED-FAIL | trace-test.py | %s: %s'%
(test.path, msg))
n = i + 1
if pb:
pb.label = '[%3d|%3d|%3d]'%(n - len(failures), len(failures), n)
pb.update(n)
complete = True
except KeyboardInterrupt:
print('TEST-UNEXPECTED_FAIL | trace-test.py | %s'%test.path)
if pb:
pb.finish()
if failures:
if OPTIONS.write_failures:
try:
out = open(OPTIONS.write_failures, 'w')
for test in failures:
out.write(os.path.relpath(test, test_dir) + '\n')
out.close()
except IOError:
sys.stderr.write("Exception thrown trying to write failure file '%s'\n"%
OPTIONS.write_failures)
traceback.print_exc()
sys.stderr.write('---\n')
print('FAILURES:')
for test in failures:
if OPTIONS.show_failed:
print(' ' + subprocess.list2cmdline(get_test_cmd(test, lib_dir)))
else:
print(' ' + test)
return False
else:
print('PASSED ALL' + ('' if complete else ' (partial run -- interrupted by user %s)'%doing))
return True
if __name__ == '__main__':
script_path = os.path.abspath(__file__)
script_dir = os.path.dirname(script_path)
test_dir = os.path.join(script_dir, 'tests')
lib_dir = os.path.join(script_dir, 'lib')
# The [TESTS] optional arguments are paths of test files relative
# to the trace-test/tests directory.
from optparse import OptionParser
op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
help='show js shell command run')
op.add_option('-f', '--show-failed-cmd', dest='show_failed',
action='store_true', help='show command lines of failed tests')
op.add_option('-o', '--show-output', dest='show_output', action='store_true',
help='show output from js shell')
op.add_option('-x', '--exclude', dest='exclude', action='append',
help='exclude given test dir or path')
op.add_option('--no-slow', dest='run_slow', action='store_false',
help='do not run tests marked as slow')
op.add_option('--no-progress', dest='hide_progress', action='store_true',
help='hide progress bar')
op.add_option('--tinderbox', dest='tinderbox', action='store_true',
help='Tinderbox-parseable output format')
op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
help='Write a list of failed tests to [FILE]')
op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
help='Run test files listed in [FILE]')
op.add_option('-R', '--retest', dest='retest', metavar='FILE',
help='Retest using test list file [FILE]')
op.add_option('-g', '--debug', dest='debug', action='store_true',
help='Run test in gdb')
op.add_option('--valgrind', dest='valgrind', action='store_true',
help='Enable the |valgrind| flag, if valgrind is in $PATH.')
op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
help='Run all tests with valgrind, if valgrind is in $PATH.')
(OPTIONS, args) = op.parse_args()
if len(args) < 1:
op.error('missing JS_SHELL argument')
# We need to make sure we are using backslashes on Windows.
JS, test_args = os.path.normpath(args[0]), args[1:]
JS = os.path.realpath(JS) # Burst through the symlinks!
if OPTIONS.retest:
OPTIONS.read_tests = OPTIONS.retest
OPTIONS.write_failures = OPTIONS.retest
test_list = []
read_all = True
if test_args:
read_all = False
for arg in test_args:
test_list += find_tests(test_dir, arg)
if OPTIONS.read_tests:
read_all = False
try:
f = open(OPTIONS.read_tests)
for line in f:
test_list.append(os.path.join(test_dir, line.strip('\n')))
f.close()
except IOError:
if OPTIONS.retest:
read_all = True
else:
sys.stderr.write("Exception thrown trying to read test file '%s'\n"%
OPTIONS.read_tests)
traceback.print_exc()
sys.stderr.write('---\n')
if read_all:
test_list = find_tests(test_dir)
if OPTIONS.exclude:
exclude_list = []
for exclude in OPTIONS.exclude:
exclude_list += find_tests(test_dir, exclude)
test_list = [ test for test in test_list if test not in set(exclude_list) ]
if not test_list:
print >> sys.stderr, "No tests found matching command line arguments."
sys.exit(0)
test_list = [ Test.from_file(_, OPTIONS) for _ in test_list ]
if not OPTIONS.run_slow:
test_list = [ _ for _ in test_list if not _.slow ]
if OPTIONS.debug:
if len(test_list) > 1:
print('Multiple tests match command line arguments, debugger can only run one')
for tc in test_list:
print(' %s'%tc.path)
sys.exit(1)
tc = test_list[0]
cmd = [ 'gdb', '--args' ] + get_test_cmd(tc.path, lib_dir)
call(cmd)
sys.exit()
try:
ok = run_tests(test_list, test_dir, lib_dir)
if not ok:
sys.exit(2)
except OSError:
if not os.path.exists(JS):
print >> sys.stderr, "JS shell argument: file does not exist: '%s'"%JS
sys.exit(1)
else:
raise
| |
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
glutils.py
Pythonesque wrappers around certain OpenGL functions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from OpenGL import GL
import numpy
from contextlib import contextmanager
import weakref
from OpenGL.GL import framebufferobjects as FBO
import sys
class gl(object):
@classmethod
def ResetGL(cls):
DisplayList.destroyAllLists()
@classmethod
@contextmanager
def glPushMatrix(cls, matrixmode):
try:
GL.glMatrixMode(matrixmode)
GL.glPushMatrix()
yield
finally:
GL.glMatrixMode(matrixmode)
GL.glPopMatrix()
@classmethod
@contextmanager
def glPushAttrib(cls, *attribs):
allAttribs = reduce(lambda a, b: a | b, attribs)
try:
GL.glPushAttrib(allAttribs)
yield
finally:
GL.glPopAttrib()
@classmethod
@contextmanager
def glPushClientAttrib(cls, *attribs):
allAttribs = reduce(lambda a, b: a | b, attribs)
try:
GL.glPushClientAttrib(allAttribs)
yield
finally:
GL.glPopClientAttrib()
@classmethod
@contextmanager
def glBegin(cls, type):
try:
GL.glBegin(type)
yield
finally:
GL.glEnd()
@classmethod
@contextmanager
def glEnable(cls, *enables):
try:
GL.glPushAttrib(GL.GL_ENABLE_BIT)
for e in enables:
GL.glEnable(e)
yield
finally:
GL.glPopAttrib()
@classmethod
@contextmanager
def glEnableClientState(cls, *enables):
try:
GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)
for e in enables:
GL.glEnableClientState(e)
yield
finally:
GL.glPopClientAttrib()
listCount = 0
@classmethod
def glGenLists(cls, n):
cls.listCount += n
return GL.glGenLists(n)
@classmethod
def glDeleteLists(cls, base, n):
cls.listCount -= n
return GL.glDeleteLists(base, n)
allDisplayLists = []
class DisplayList(object):
def __init__(self, drawFunc=None):
self.drawFunc = drawFunc
self._list = None
self.dirty = True
def _delete(r):
allDisplayLists.remove(r)
allDisplayLists.append(weakref.ref(self, _delete))
@classmethod
def destroyAllLists(self):
allLists = []
for listref in allDisplayLists:
list = listref()
if list:
list.destroy()
allLists.append(listref)
allDisplayLists[:] = allLists
def invalidate(self):
self.dirty = True
def destroy(self):
if self._list is not None:
GL.glDeleteLists(self._list, 1)
self._list = None
self.dirty = True
def compile(self, drawFunc):
if not self.dirty and self._list is not None:
return
self._compile(drawFunc)
def _compile(self, drawFunc):
drawFunc = (drawFunc or self.drawFunc)
if drawFunc is None:
return
if self._list is None:
l = gl.glGenLists(1)
self._list = numpy.array([l], 'uintc')
l = self._list[0]
GL.glNewList(l, GL.GL_COMPILE)
drawFunc()
#try:
GL.glEndList()
#except GL.GLError:
# print "Error while compiling display list. Retrying display list code to pinpoint error"
# self.drawFunc()
self.dirty = False
def getList(self, drawFunc=None):
self.compile(drawFunc)
return self._list
if "-debuglists" in sys.argv:
def call(self, drawFunc=None):
drawFunc = (drawFunc or self.drawFunc)
if drawFunc is None:
return
drawFunc()
else:
def call(self, drawFunc=None):
self.compile(drawFunc)
GL.glCallLists(self._list)
class Texture(object):
allTextures = []
defaultFilter = GL.GL_NEAREST
def __init__(self, textureFunc=None, minFilter=None, magFilter=None, maxLOD=4):
# maxLOD setting of 4 ensures 16x16 textures reduce to 1x1 and no smaller
self.minFilter = minFilter or self.defaultFilter
self.magFilter = magFilter or self.defaultFilter
if textureFunc is None:
textureFunc = lambda: None
self.textureFunc = textureFunc
self._texID = GL.glGenTextures(1)
self.dirty = True
self.maxLOD = maxLOD
def load(self):
if not self.dirty:
return
self.dirty = False
def _delete(r):
Texture.allTextures.remove(r)
self.allTextures.append(weakref.ref(self, _delete))
self.bind()
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, self.minFilter)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, self.magFilter)
self.textureFunc()
if self.minFilter in (GL.GL_LINEAR_MIPMAP_LINEAR,
GL.GL_LINEAR_MIPMAP_NEAREST,
GL.GL_NEAREST_MIPMAP_LINEAR,
GL.GL_NEAREST_MIPMAP_NEAREST):
if bool(GL.glGenerateMipmap):
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LOD, self.maxLOD)
GL.glGenerateMipmap(GL.GL_TEXTURE_2D)
def dispose(self):
if self._texID is not None:
GL.glDeleteTextures(self._texID)
self._texID = None
def bind(self):
self.load()
GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
def invalidate(self):
self.dirty = True
class FramebufferTexture(Texture):
def __init__(self, width, height, drawFunc):
tex = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, tex)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA8, width, height, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, None)
self.enabled = False
self._texID = tex
if bool(FBO.glGenFramebuffers) and "Intel" not in GL.glGetString(GL.GL_VENDOR):
buf = FBO.glGenFramebuffers(1)
depthbuffer = FBO.glGenRenderbuffers(1)
FBO.glBindFramebuffer(FBO.GL_FRAMEBUFFER, buf)
FBO.glBindRenderbuffer(FBO.GL_RENDERBUFFER, depthbuffer)
FBO.glRenderbufferStorage(FBO.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, width, height)
FBO.glFramebufferRenderbuffer(FBO.GL_FRAMEBUFFER, FBO.GL_DEPTH_ATTACHMENT, FBO.GL_RENDERBUFFER, depthbuffer)
FBO.glFramebufferTexture2D(FBO.GL_FRAMEBUFFER, FBO.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D, tex, 0)
status = FBO.glCheckFramebufferStatus(FBO.GL_FRAMEBUFFER)
if status != FBO.GL_FRAMEBUFFER_COMPLETE:
print ("glCheckFramebufferStatus: " + str(status))
self.enabled = False
return
FBO.glBindFramebuffer(FBO.GL_FRAMEBUFFER, buf)
with gl.glPushAttrib(GL.GL_VIEWPORT_BIT):
GL.glViewport(0, 0, width, height)
drawFunc()
FBO.glBindFramebuffer(FBO.GL_FRAMEBUFFER, 0)
FBO.glDeleteFramebuffers(1, [buf])
FBO.glDeleteRenderbuffers(1, [depthbuffer])
self.enabled = True
else:
GL.glReadBuffer(GL.GL_BACK)
GL.glPushAttrib(GL.GL_VIEWPORT_BIT | GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT | GL.GL_STENCIL_TEST | GL.GL_STENCIL_BUFFER_BIT)
GL.glDisable(GL.GL_STENCIL_TEST)
GL.glViewport(0, 0, width, height)
GL.glScissor(0, 0, width, height)
with gl.glEnable(GL.GL_SCISSOR_TEST):
drawFunc()
GL.glBindTexture(GL.GL_TEXTURE_2D, tex)
GL.glReadBuffer(GL.GL_BACK)
GL.glCopyTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, 0, 0, width, height)
GL.glPopAttrib()
def debugDrawPoint(point):
GL.glColor(1.0, 1.0, 0.0, 1.0)
GL.glPointSize(9.0)
with gl.glBegin(GL.GL_POINTS):
GL.glVertex3f(*point)
| |
# Copyright 2021 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run MLPerf Inference benchmarks."""
import posixpath
import re
from typing import Any, Dict, List
from absl import flags
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_benchmarks import mlperf_benchmark
from perfkitbenchmarker.linux_packages import cuda_toolkit
from perfkitbenchmarker.linux_packages import docker
from perfkitbenchmarker.linux_packages import nvidia_driver
FLAGS = flags.FLAGS
MLPERF_INFERENCE_VERSION = 'v1.1'
_MLPERF_SCRATCH_PATH = '/scratch'
_DLRM_DATA_MODULE = 'criteo'
_DLRM_DATA = 'day_23.gz'
_DLRM_PREPROCESSED_DATA = 'full_recalib.tar.gz'
_DLRM_MODEL = '40m_limit.tar.gz'
_DLRM_ROW_FREQ = 'tb00_40M.pt'
BENCHMARK_NAME = 'mlperf_inference'
BENCHMARK_CONFIG = """
mlperf_inference:
description: Runs MLPerf Inference Benchmark.
vm_groups:
default:
disk_spec: *default_500_gb
vm_spec:
GCP:
machine_type: a2-highgpu-1g
zone: us-central1-b
boot_disk_size: 200
AWS:
machine_type: p4d.24xlarge
zone: us-west-2a
boot_disk_size: 200
Azure:
machine_type: Standard_ND96asr_v4
zone: westus2
boot_disk_size: 200
"""
_SCENARIOS = flags.DEFINE_enum('mlperf_inference_scenarios', 'server',
['server', 'singlestream', 'offline'],
'MLPerf has defined three different scenarios')
_PERFORMANCE_METADATA = [
'benchmark',
'coalesced_tensor',
'gpu_batch_size',
'gpu_copy_streams',
'gpu_inference_streams',
'input_dtype',
'input_format',
'precision',
'scenario',
'server_target_qps',
'system',
'tensor_path',
'use_graphs',
'config_name',
'config_ver',
'accuracy_level',
'optimization_level',
'inference_server',
'system_id',
'use_cpu',
'power_limit',
'cpu_freq',
'gpu_num_bundles',
'log_dir',
'SUT name',
'Scenario',
'Mode',
'Scheduled samples per second',
'Result is',
'Performance constraints satisfied',
'Min duration satisfied',
'Min queries satisfied',
'Completed samples per second',
'Min latency (ns)',
'Max latency (ns)',
'Mean latency (ns)',
'50.00 percentile latency (ns)',
'90.00 percentile latency (ns)',
'95.00 percentile latency (ns)',
'97.00 percentile latency (ns)',
'99.00 percentile latency (ns)',
'99.90 percentile latency (ns)',
'samples_per_query',
'target_latency (ns)',
'max_async_queries',
'min_duration (ms)',
'max_duration (ms)',
'min_query_count',
'max_query_count',
'qsl_rng_seed',
'sample_index_rng_seed',
'schedule_rng_seed',
'accuracy_log_rng_seed',
'accuracy_log_probability',
'accuracy_log_sampling_target',
'print_timestamps',
'performance_issue_unique',
'performance_issue_same',
'performance_issue_same_index',
'performance_sample_count',
]
_ACCURACY_METADATA = [
'benchmark',
'coalesced_tensor',
'gpu_batch_size',
'gpu_copy_streams',
'gpu_inference_streams',
'input_dtype',
'input_format',
'precision',
'scenario',
'server_target_qps',
'system',
'tensor_path',
'use_graphs',
'config_name',
'config_ver',
'accuracy_level',
'optimization_level',
'inference_server',
'system_id',
'use_cpu',
'power_limit',
'cpu_freq',
'test_mode',
'fast',
'gpu_num_bundles',
'log_dir',
]
def GetConfig(user_config: Dict[str, Any]) -> Dict[str, Any]:
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
return config
def Prepare(bm_spec: benchmark_spec.BenchmarkSpec) -> None:
"""Install and set up MLPerf Inference on the target vm.
Args:
bm_spec: The benchmark specification
Raises:
errors.Config.InvalidValue upon both GPUs and TPUs appear in the config
"""
vm = bm_spec.vms[0]
repository = f'inference_results_{MLPERF_INFERENCE_VERSION}'
vm.RemoteCommand(f'git clone https://github.com/mlcommons/{repository}.git')
makefile = f'{repository}/closed/NVIDIA/Makefile'
vm_util.ReplaceText(vm, 'shell uname -p', 'shell uname -m', makefile)
requirements = f'{repository}/closed/NVIDIA/docker/requirements.1'
vm_util.ReplaceText(vm, 'opencv-python-headless==4.5.2.52',
'opencv-python-headless==4.5.3.56', requirements)
if nvidia_driver.CheckNvidiaGpuExists(vm):
vm.Install('cuda_toolkit')
vm.Install('nvidia_driver')
vm.Install('nvidia_docker')
benchmark = FLAGS.mlperf_benchmark
bm_spec.env_cmd = (f'export MLPERF_SCRATCH_PATH={_MLPERF_SCRATCH_PATH} && '
f'cd {repository}/closed/NVIDIA')
docker.AddUser(vm)
vm.RobustRemoteCommand(
f'{bm_spec.env_cmd} && '
'make build_docker NO_BUILD=1 && '
'make docker_add_user && '
'make launch_docker DOCKER_COMMAND="make clean" && '
'make launch_docker DOCKER_COMMAND="make link_dirs"',
should_log=True)
if benchmark == mlperf_benchmark.DLRM:
# Download data
data_dir = posixpath.join(_MLPERF_SCRATCH_PATH, 'data', _DLRM_DATA_MODULE)
vm.DownloadPreprovisionedData(data_dir, _DLRM_DATA_MODULE, _DLRM_DATA)
vm.RemoteCommand(f'cd {data_dir} && gzip -d {_DLRM_DATA}')
# Download model
model_dir = posixpath.join(_MLPERF_SCRATCH_PATH, 'models',
FLAGS.mlperf_benchmark)
vm.DownloadPreprovisionedData(model_dir, FLAGS.mlperf_benchmark,
_DLRM_MODEL)
vm.RemoteCommand(f'cd {model_dir} && '
f'tar -zxvf {_DLRM_MODEL} && '
f'rm -f {_DLRM_MODEL}')
vm.DownloadPreprovisionedData(model_dir, FLAGS.mlperf_benchmark,
_DLRM_ROW_FREQ)
# Preprocess Data
preprocessed_data_dir = posixpath.join(_MLPERF_SCRATCH_PATH,
'preprocessed_data',
_DLRM_DATA_MODULE)
vm.DownloadPreprovisionedData(preprocessed_data_dir, _DLRM_DATA_MODULE,
_DLRM_PREPROCESSED_DATA)
vm.RemoteCommand(f'cd {preprocessed_data_dir} && '
f'tar -zxvf {_DLRM_PREPROCESSED_DATA} && '
f'rm -f {_DLRM_PREPROCESSED_DATA}')
else:
vm.RobustRemoteCommand(
f'{bm_spec.env_cmd} && '
'make launch_docker DOCKER_COMMAND='
f'"make download_data BENCHMARKS={benchmark}"',
should_log=True)
vm.RobustRemoteCommand(
f'{bm_spec.env_cmd} && '
'make launch_docker DOCKER_COMMAND='
f'"make download_model BENCHMARKS={benchmark}"',
should_log=True)
vm.RobustRemoteCommand(
f'{bm_spec.env_cmd} && '
'make launch_docker DOCKER_COMMAND='
f'"make preprocess_data BENCHMARKS={benchmark}"',
should_log=True)
vm.RobustRemoteCommand(
f'{bm_spec.env_cmd} && '
'make launch_docker DOCKER_COMMAND='
'"make build" && '
'make launch_docker DOCKER_COMMAND='
'"make generate_engines RUN_ARGS=\''
f'--benchmarks={FLAGS.mlperf_benchmark} '
f'--scenarios={_SCENARIOS.value}\'"',
should_log=True)
def _CreateMetadataDict(
bm_spec: benchmark_spec.BenchmarkSpec) -> Dict[str, Any]:
"""Create metadata dict to be used in run results.
Args:
bm_spec: The benchmark specification. Contains all data that is required to
run the benchmark.
Returns:
metadata dict
"""
metadata = {
'model': FLAGS.mlperf_benchmark,
'version': MLPERF_INFERENCE_VERSION,
}
vms = bm_spec.vms
num_vms = len(vms)
vm = vms[0]
gpus_per_node = nvidia_driver.QueryNumberOfGpus(vm)
total_gpus = gpus_per_node * num_vms
metadata.update(cuda_toolkit.GetMetadata(vm))
metadata['total_gpus'] = total_gpus
return metadata
def MakePerformanceSamplesFromOutput(base_metadata: Dict[str, Any],
output: str) -> List[sample.Sample]:
"""Create performance samples containing metrics.
Args:
base_metadata: dict contains all the metadata that reports.
output: string, command output
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mlperf_inference_benchmark_test.py
Returns:
Samples containing training metrics.
"""
metadata = {}
for column_name in _PERFORMANCE_METADATA:
metadata[f'mlperf {column_name}'] = regex_util.ExtractExactlyOneMatch(
fr'{re.escape(column_name)} *: *(.*)', output)
metadata.update(base_metadata)
throughput = regex_util.ExtractFloat(
r': result_scheduled_samples_per_sec: (\d+\.\d+)', output)
return [sample.Sample('throughput', float(throughput), 'samples/s', metadata)]
def MakeAccuracySamplesFromOutput(base_metadata: Dict[str, Any],
output: str) -> List[sample.Sample]:
"""Create accuracy samples containing metrics.
Args:
base_metadata: dict contains all the metadata that reports.
output: string, command output
Returns:
Samples containing training metrics.
"""
metadata = {}
for column_name in _ACCURACY_METADATA:
metadata[f'mlperf {column_name}'] = regex_util.ExtractExactlyOneMatch(
fr'{re.escape(column_name)} *: *(.*)', output)
accuracy = regex_util.ExtractFloat(
r': Accuracy = (\d+\.\d+), Threshold = \d+\.\d+\. Accuracy test PASSED',
output)
metadata['Threshold'] = regex_util.ExtractFloat(
r': Accuracy = \d+\.\d+, Threshold = (\d+\.\d+)\. Accuracy test PASSED',
output)
metadata.update(base_metadata)
return [sample.Sample('accuracy', float(accuracy), '%', metadata)]
def Run(bm_spec: benchmark_spec.BenchmarkSpec) -> List[sample.Sample]:
"""Run MLPerf Inference on the cluster.
Args:
bm_spec: The benchmark specification. Contains all data that is required to
run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vm = bm_spec.vms[0]
metadata = _CreateMetadataDict(bm_spec)
stdout, _ = vm.RobustRemoteCommand(
f'{bm_spec.env_cmd} && '
'make launch_docker DOCKER_COMMAND="make run_harness RUN_ARGS=\''
f'--benchmarks={FLAGS.mlperf_benchmark} '
f'--scenarios={_SCENARIOS.value} --fast --test_mode=PerformanceOnly\'"',
should_log=True)
performance_samples = MakePerformanceSamplesFromOutput(metadata, stdout)
stdout, _ = vm.RobustRemoteCommand(
f'{bm_spec.env_cmd} && '
'make launch_docker DOCKER_COMMAND="make run_harness RUN_ARGS=\''
f'--benchmarks={FLAGS.mlperf_benchmark} '
f'--scenarios={_SCENARIOS.value} --fast --test_mode=AccuracyOnly\'"',
should_log=True)
accuracy_samples = MakeAccuracySamplesFromOutput(metadata, stdout)
return performance_samples + accuracy_samples
def Cleanup(unused_bm_spec: benchmark_spec.BenchmarkSpec) -> None:
"""Cleanup MLPerf Inference on the cluster."""
pass
| |
import numpy as np
import openmdao.api as om
from pycycle.flow_in import FlowIn
from pycycle.thermo.cea import species_data
from pycycle.thermo.thermo import Thermo
from pycycle.passthrough import PassThrough
from pycycle.element_base import Element
class BPRcalc(om.ExplicitComponent):
"""Calculates flow split"""
def setup(self):
self.add_input('W_in', 1.0, desc='total weight flow in', units='lbm/s')
self.add_input('BPR', 1.5, desc='ratio of mass flow in Fl_O2 to Fl_O1')
self.add_output('W1', 0.44, desc='weight flow for Fl_O1', units='lbm/s')
self.add_output('W2', 0.56, desc='Weight flow for Fl_O2', units='lbm/s')
self.declare_partials('*', '*')
def compute(self, inputs, outputs):
BPR = inputs['BPR']
outputs['W1'] = inputs['W_in']/(BPR+1)
outputs['W2'] = inputs['W_in'] - outputs['W1']
def compute_partials(self, inputs, J):
BPR = inputs['BPR']
W = inputs['W_in']
J['W1','BPR'] = -W/((BPR+1)**2)
J['W1','W_in'] = 1.0/(BPR+1)
J['W2','BPR'] = W/((BPR + 1)**2)
J['W2','W_in'] = 1.0 - 1.0/(BPR+1)#BPR / (BPR + 1.0)
class Splitter(Element):
"""
Splits a single incomming flow into two outgoing flows
--------------
Flow Stations
--------------
Fl_I
Fl_O1
Fl_O2
-------------
Design
-------------
inputs
--------
BPR
MN1
MN2
outputs
--------
-------------
Off-Design
-------------
inputs
--------
BPR
area1
area2
outputs
--------
"""
def initialize(self):
self.options.declare('statics', default=True,
desc='If True, calculate static properties.')
self.default_des_od_conns = [
('Fl_O1:stat:area', 'area1'),
('Fl_O2:stat:area', 'area2')
]
super().initialize()
def pyc_setup_output_ports(self):
self.copy_flow('Fl_I', 'Fl_O1')
self.copy_flow('Fl_I', 'Fl_O2')
def setup(self):
thermo_method = self.options['thermo_method']
thermo_data = self.options['thermo_data']
statics = self.options['statics']
design = self.options['design']
composition = self.Fl_I_data['Fl_I']
# Create inlet flowstation
flow_in = FlowIn(fl_name='Fl_I')
self.add_subsystem('flow_in', flow_in, promotes_inputs=('Fl_I:*',))
# Split the flows
self.add_subsystem('split_calc', BPRcalc(), promotes_inputs=('BPR', ('W_in', 'Fl_I:stat:W')))
# Set Fl_out1 totals based on T, P
real_flow1 = Thermo(mode='total_TP', fl_name='Fl_O1:tot',
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
self.add_subsystem('real_flow1', real_flow1,
promotes_inputs=(('composition', 'Fl_I:tot:composition'),
('P', 'Fl_I:tot:P'),
('T', 'Fl_I:tot:T')),
promotes_outputs=('Fl_O1:tot:*', ))
# Set Fl_out2 totals based on T, P
real_flow2 = Thermo(mode='total_TP', fl_name='Fl_O2:tot',
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
self.add_subsystem('real_flow2', real_flow2, promotes_inputs=(('composition', 'Fl_I:tot:composition'),
('P', 'Fl_I:tot:P'),
('T', 'Fl_I:tot:T')),
promotes_outputs=('Fl_O2:tot:*', ))
if statics:
if design:
# Calculate static properties
out1_stat = Thermo(mode='static_MN', fl_name='Fl_O1:stat',
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
prom_in = [('composition', 'Fl_I:tot:composition'),
('MN','MN1')]
prom_out = ['Fl_O1:stat:*']
self.add_subsystem('out1_stat', out1_stat, promotes_inputs=prom_in,
promotes_outputs=prom_out)
self.connect('Fl_O1:tot:S', 'out1_stat.S')
self.connect('Fl_O1:tot:h', 'out1_stat.ht')
self.connect('Fl_O1:tot:P', 'out1_stat.guess:Pt')
self.connect('Fl_O1:tot:gamma', 'out1_stat.guess:gamt')
self.connect('split_calc.W1', 'out1_stat.W')
out2_stat = Thermo(mode='static_MN', fl_name='Fl_O2:stat',
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
prom_in = [('composition', 'Fl_I:tot:composition'),
('MN','MN2')]
prom_out = ['Fl_O2:stat:*']
self.add_subsystem('out2_stat', out2_stat, promotes_inputs=prom_in,
promotes_outputs=prom_out)
self.connect('Fl_O2:tot:S', 'out2_stat.S')
self.connect('Fl_O2:tot:h', 'out2_stat.ht')
self.connect('Fl_O2:tot:P', 'out2_stat.guess:Pt')
self.connect('Fl_O2:tot:gamma', 'out2_stat.guess:gamt')
self.connect('split_calc.W2', 'out2_stat.W')
else:
# Calculate static properties
out1_stat = Thermo(mode='static_A', fl_name='Fl_O1:stat',
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
prom_in = [('composition', 'Fl_I:tot:composition'),
('area','area1')]
prom_out = ['Fl_O1:stat:*']
self.add_subsystem('out1_stat', out1_stat, promotes_inputs=prom_in,
promotes_outputs=prom_out)
self.connect('Fl_O1:tot:S', 'out1_stat.S')
self.connect('Fl_O1:tot:h', 'out1_stat.ht')
self.connect('Fl_O1:tot:P', 'out1_stat.guess:Pt')
self.connect('Fl_O1:tot:gamma', 'out1_stat.guess:gamt')
self.connect('split_calc.W1', 'out1_stat.W')
out2_stat = Thermo(mode='static_A', fl_name='Fl_O2:stat',
method=thermo_method,
thermo_kwargs={'composition':composition,
'spec':thermo_data})
prom_in = [('composition', 'Fl_I:tot:composition'),
('area','area2')]
prom_out = ['Fl_O2:stat:*']
self.add_subsystem('out2_stat', out2_stat, promotes_inputs=prom_in,
promotes_outputs=prom_out)
self.connect('Fl_O2:tot:S', 'out2_stat.S')
self.connect('Fl_O2:tot:h', 'out2_stat.ht')
self.connect('Fl_O2:tot:P', 'out2_stat.guess:Pt')
self.connect('Fl_O2:tot:gamma', 'out2_stat.guess:gamt')
self.connect('split_calc.W2', 'out2_stat.W')
else:
self.add_subsystem('W1_passthru', PassThrough('split_calc_W1', 'Fl_O1:stat:W', 1.0, units= "lbm/s"),
promotes=['*'])
self.add_subsystem('W2_passthru', PassThrough('split_calc_W2', 'Fl_O2:stat:W', 1.0, units= "lbm/s"),
promotes=['*'])
self.connect('split_calc.W1', 'split_calc_W1')
self.connect('split_calc.W2', 'split_calc_W2')
super().setup()
if __name__ == "__main__":
p = om.Problem()
des_vars = p.model.add_subsystem('des_vars', om.IndepVarComp(), promotes=['*'])
des_vars.add_output('W_in', 1.0, units='lbm/s')
des_vars.add_output('BPR', 1.5, units=None)
p.model.add_subsystem('comp', BPRcalc(), promotes=['*'])
p.setup()
p.run_model()
p.check_partials()
| |
"""Support for HomematicIP Cloud climate devices."""
from __future__ import annotations
from typing import Any
from homematicip.aio.device import AsyncHeatingThermostat, AsyncHeatingThermostatCompact
from homematicip.aio.group import AsyncHeatingGroup
from homematicip.base.enums import AbsenceType
from homematicip.device import Switch
from homematicip.functionalHomes import IndoorClimateHome
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericEntity
from .hap import HomematicipHAP
HEATING_PROFILES = {"PROFILE_1": 0, "PROFILE_2": 1, "PROFILE_3": 2}
COOLING_PROFILES = {"PROFILE_4": 3, "PROFILE_5": 4, "PROFILE_6": 5}
ATTR_PRESET_END_TIME = "preset_end_time"
PERMANENT_END_TIME = "permanent"
HMIP_AUTOMATIC_CM = "AUTOMATIC"
HMIP_MANUAL_CM = "MANUAL"
HMIP_ECO_CM = "ECO"
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP climate from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = []
for device in hap.home.groups:
if isinstance(device, AsyncHeatingGroup):
entities.append(HomematicipHeatingGroup(hap, device))
if entities:
async_add_entities(entities)
class HomematicipHeatingGroup(HomematicipGenericEntity, ClimateEntity):
"""Representation of the HomematicIP heating group.
Heat mode is supported for all heating devices incl. their defined profiles.
Boost is available for radiator thermostats only.
Cool mode is only available for floor heating systems, if basically enabled in the hmip app.
"""
def __init__(self, hap: HomematicipHAP, device: AsyncHeatingGroup) -> None:
"""Initialize heating group."""
device.modelType = "HmIP-Heating-Group"
super().__init__(hap, device)
self._simple_heating = None
if device.actualTemperature is None:
self._simple_heating = self._first_radiator_thermostat
@property
def device_info(self) -> DeviceInfo:
"""Return device specific attributes."""
return {
"identifiers": {(HMIPC_DOMAIN, self._device.id)},
"name": self._device.label,
"manufacturer": "eQ-3",
"model": self._device.modelType,
"via_device": (HMIPC_DOMAIN, self._device.homeId),
}
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE
@property
def target_temperature(self) -> float:
"""Return the temperature we try to reach."""
return self._device.setPointTemperature
@property
def current_temperature(self) -> float:
"""Return the current temperature."""
if self._simple_heating:
return self._simple_heating.valveActualTemperature
return self._device.actualTemperature
@property
def current_humidity(self) -> int:
"""Return the current humidity."""
return self._device.humidity
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie."""
if self._disabled_by_cooling_mode and not self._has_switch:
return HVAC_MODE_OFF
if self._device.boostMode:
return HVAC_MODE_HEAT
if self._device.controlMode == HMIP_MANUAL_CM:
return HVAC_MODE_HEAT if self._heat_mode_enabled else HVAC_MODE_COOL
return HVAC_MODE_AUTO
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes."""
if self._disabled_by_cooling_mode and not self._has_switch:
return [HVAC_MODE_OFF]
return (
[HVAC_MODE_AUTO, HVAC_MODE_HEAT]
if self._heat_mode_enabled
else [HVAC_MODE_AUTO, HVAC_MODE_COOL]
)
@property
def hvac_action(self) -> str | None:
"""
Return the current hvac_action.
This is only relevant for radiator thermostats.
"""
if (
self._device.floorHeatingMode == "RADIATOR"
and self._has_radiator_thermostat
and self._heat_mode_enabled
):
return (
CURRENT_HVAC_HEAT if self._device.valvePosition else CURRENT_HVAC_IDLE
)
return None
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
if self._device.boostMode:
return PRESET_BOOST
if self.hvac_mode in (HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF):
return PRESET_NONE
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType == AbsenceType.VACATION:
return PRESET_AWAY
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.PERMANENT,
]:
return PRESET_ECO
return (
self._device.activeProfile.name
if self._device.activeProfile.name in self._device_profile_names
else None
)
@property
def preset_modes(self) -> list[str]:
"""Return a list of available preset modes incl. hmip profiles."""
# Boost is only available if a radiator thermostat is in the room,
# and heat mode is enabled.
profile_names = self._device_profile_names
presets = []
if (
self._heat_mode_enabled and self._has_radiator_thermostat
) or self._has_switch:
if not profile_names:
presets.append(PRESET_NONE)
presets.append(PRESET_BOOST)
presets.extend(profile_names)
return presets
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._device.minTemperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._device.maxTemperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
if self.min_temp <= temperature <= self.max_temp:
await self._device.set_point_temperature(temperature)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode not in self.hvac_modes:
return
if hvac_mode == HVAC_MODE_AUTO:
await self._device.set_control_mode(HMIP_AUTOMATIC_CM)
else:
await self._device.set_control_mode(HMIP_MANUAL_CM)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode not in self.preset_modes:
return
if self._device.boostMode and preset_mode != PRESET_BOOST:
await self._device.set_boost(False)
if preset_mode == PRESET_BOOST:
await self._device.set_boost()
if preset_mode in self._device_profile_names:
profile_idx = self._get_profile_idx_by_name(preset_mode)
if self._device.controlMode != HMIP_AUTOMATIC_CM:
await self.async_set_hvac_mode(HVAC_MODE_AUTO)
await self._device.set_active_profile(profile_idx)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes of the access point."""
state_attr = super().extra_state_attributes
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.VACATION,
]:
state_attr[ATTR_PRESET_END_TIME] = self._indoor_climate.absenceEndTime
elif self._indoor_climate.absenceType == AbsenceType.PERMANENT:
state_attr[ATTR_PRESET_END_TIME] = PERMANENT_END_TIME
return state_attr
@property
def _indoor_climate(self) -> IndoorClimateHome:
"""Return the hmip indoor climate functional home of this group."""
return self._home.get_functionalHome(IndoorClimateHome)
@property
def _device_profiles(self) -> list[Any]:
"""Return the relevant profiles."""
return [
profile
for profile in self._device.profiles
if profile.visible
and profile.name != ""
and profile.index in self._relevant_profile_group
]
@property
def _device_profile_names(self) -> list[str]:
"""Return a collection of profile names."""
return [profile.name for profile in self._device_profiles]
def _get_profile_idx_by_name(self, profile_name: str) -> int:
"""Return a profile index by name."""
relevant_index = self._relevant_profile_group
index_name = [
profile.index
for profile in self._device_profiles
if profile.name == profile_name
]
return relevant_index[index_name[0]]
@property
def _heat_mode_enabled(self) -> bool:
"""Return, if heating mode is enabled."""
return not self._device.cooling
@property
def _disabled_by_cooling_mode(self) -> bool:
"""Return, if group is disabled by the cooling mode."""
return self._device.cooling and (
self._device.coolingIgnored or not self._device.coolingAllowed
)
@property
def _relevant_profile_group(self) -> dict[str, int]:
"""Return the relevant profile groups."""
if self._disabled_by_cooling_mode:
return {}
return HEATING_PROFILES if self._heat_mode_enabled else COOLING_PROFILES
@property
def _has_switch(self) -> bool:
"""Return, if a switch is in the hmip heating group."""
for device in self._device.devices:
if isinstance(device, Switch):
return True
return False
@property
def _has_radiator_thermostat(self) -> bool:
"""Return, if a radiator thermostat is in the hmip heating group."""
return bool(self._first_radiator_thermostat)
@property
def _first_radiator_thermostat(
self,
) -> AsyncHeatingThermostat | AsyncHeatingThermostatCompact | None:
"""Return the first radiator thermostat from the hmip heating group."""
for device in self._device.devices:
if isinstance(
device, (AsyncHeatingThermostat, AsyncHeatingThermostatCompact)
):
return device
return None
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone import clean
from keystone.common import kvs
from keystone.common import utils
from keystone import exception
from keystone import identity
class Identity(kvs.Base, identity.Driver):
# Public interface
def authenticate(self, user_id=None, tenant_id=None, password=None):
"""Authenticate based on a user, tenant and password.
Expects the user object to have a password field and the tenant to be
in the list of tenants on the user.
"""
user_ref = None
tenant_ref = None
metadata_ref = {}
try:
user_ref = self._get_user(user_id)
except exception.UserNotFound:
raise AssertionError('Invalid user / password')
if not utils.check_password(password, user_ref.get('password')):
raise AssertionError('Invalid user / password')
if tenant_id is not None:
if tenant_id not in self.get_projects_for_user(user_id):
raise AssertionError('Invalid tenant')
try:
tenant_ref = self.get_project(tenant_id)
metadata_ref = self.get_metadata(user_id, tenant_id)
except exception.ProjectNotFound:
tenant_ref = None
metadata_ref = {}
except exception.MetadataNotFound:
metadata_ref = {}
return (identity.filter_user(user_ref), tenant_ref, metadata_ref)
def get_project(self, tenant_id):
try:
return self.db.get('tenant-%s' % tenant_id)
except exception.NotFound:
raise exception.ProjectNotFound(project_id=tenant_id)
def list_projects(self):
tenant_keys = filter(lambda x: x.startswith("tenant-"),
self.db.keys())
return [self.db.get(key) for key in tenant_keys]
def get_project_by_name(self, tenant_name, domain_id):
try:
return self.db.get('tenant_name-%s' % tenant_name)
except exception.NotFound:
raise exception.ProjectNotFound(project_id=tenant_name)
def get_project_users(self, tenant_id):
self.get_project(tenant_id)
user_keys = filter(lambda x: x.startswith("user-"), self.db.keys())
user_refs = [self.db.get(key) for key in user_keys]
user_refs = filter(lambda x: tenant_id in x['tenants'], user_refs)
return [identity.filter_user(user_ref) for user_ref in user_refs]
def _get_user(self, user_id):
try:
return self.db.get('user-%s' % user_id)
except exception.NotFound:
raise exception.UserNotFound(user_id=user_id)
def _get_user_by_name(self, user_name, domain_id):
try:
return self.db.get('user_name-%s' % user_name)
except exception.NotFound:
raise exception.UserNotFound(user_id=user_name)
def get_user(self, user_id):
return identity.filter_user(self._get_user(user_id))
def get_user_by_name(self, user_name, domain_id):
return identity.filter_user(
self._get_user_by_name(user_name, domain_id))
def get_metadata(self, user_id=None, tenant_id=None,
domain_id=None, group_id=None):
try:
if user_id:
if tenant_id:
return self.db.get('metadata-%s-%s' % (tenant_id,
user_id))
else:
return self.db.get('metadata-%s-%s' % (domain_id,
user_id))
else:
if tenant_id:
return self.db.get('metadata-%s-%s' % (tenant_id,
group_id))
else:
return self.db.get('metadata-%s-%s' % (domain_id,
group_id))
except exception.NotFound:
raise exception.MetadataNotFound()
def get_role(self, role_id):
try:
return self.db.get('role-%s' % role_id)
except exception.NotFound:
raise exception.RoleNotFound(role_id=role_id)
def list_users(self):
user_ids = self.db.get('user_list', [])
return [self.get_user(x) for x in user_ids]
def list_roles(self):
role_ids = self.db.get('role_list', [])
return [self.get_role(x) for x in role_ids]
def get_projects_for_user(self, user_id):
user_ref = self._get_user(user_id)
return user_ref.get('tenants', [])
def get_roles_for_user_and_project(self, user_id, tenant_id):
self.get_user(user_id)
self.get_project(tenant_id)
try:
metadata_ref = self.get_metadata(user_id, tenant_id)
except exception.MetadataNotFound:
metadata_ref = {}
return metadata_ref.get('roles', [])
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
self.get_user(user_id)
self.get_project(tenant_id)
self.get_role(role_id)
try:
metadata_ref = self.get_metadata(user_id, tenant_id)
except exception.MetadataNotFound:
metadata_ref = {}
roles = set(metadata_ref.get('roles', []))
if role_id in roles:
msg = ('User %s already has role %s in tenant %s'
% (user_id, role_id, tenant_id))
raise exception.Conflict(type='role grant', details=msg)
roles.add(role_id)
metadata_ref['roles'] = list(roles)
self.update_metadata(user_id, tenant_id, metadata_ref)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
try:
metadata_ref = self.get_metadata(user_id, tenant_id)
except exception.MetadataNotFound:
metadata_ref = {}
roles = set(metadata_ref.get('roles', []))
if role_id not in roles:
msg = 'Cannot remove role that has not been granted, %s' % role_id
raise exception.RoleNotFound(message=msg)
roles.remove(role_id)
metadata_ref['roles'] = list(roles)
if not len(roles):
self.db.delete('metadata-%s-%s' % (tenant_id, user_id))
user_ref = self._get_user(user_id)
tenants = set(user_ref.get('tenants', []))
tenants.remove(tenant_id)
user_ref['tenants'] = list(tenants)
self.update_user(user_id, user_ref)
else:
self.update_metadata(user_id, tenant_id, metadata_ref)
# CRUD
def create_user(self, user_id, user):
user['name'] = clean.user_name(user['name'])
try:
self.get_user(user_id)
except exception.UserNotFound:
pass
else:
msg = 'Duplicate ID, %s.' % user_id
raise exception.Conflict(type='user', details=msg)
try:
self.get_user_by_name(user['name'], user['domain_id'])
except exception.UserNotFound:
pass
else:
msg = 'Duplicate name, %s.' % user['name']
raise exception.Conflict(type='user', details=msg)
user = utils.hash_user_password(user)
new_user = user.copy()
new_user.setdefault('groups', [])
self.db.set('user-%s' % user_id, new_user)
self.db.set('user_name-%s' % new_user['name'], new_user)
user_list = set(self.db.get('user_list', []))
user_list.add(user_id)
self.db.set('user_list', list(user_list))
return identity.filter_user(new_user)
def update_user(self, user_id, user):
if 'name' in user:
user['name'] = clean.user_name(user['name'])
existing = self.db.get('user_name-%s' % user['name'])
if existing and user_id != existing['id']:
msg = 'Duplicate name, %s.' % user['name']
raise exception.Conflict(type='user', details=msg)
# get the old name and delete it too
try:
old_user = self.db.get('user-%s' % user_id)
except exception.NotFound:
raise exception.UserNotFound(user_id=user_id)
new_user = old_user.copy()
user = utils.hash_user_password(user)
new_user.update(user)
if new_user['id'] != user_id:
raise exception.ValidationError('Cannot change user ID')
self.db.delete('user_name-%s' % old_user['name'])
self.db.set('user-%s' % user_id, new_user)
self.db.set('user_name-%s' % new_user['name'], new_user)
return new_user
def add_user_to_group(self, user_id, group_id):
self.get_group(group_id)
user_ref = self._get_user(user_id)
groups = set(user_ref.get('groups', []))
groups.add(group_id)
self.update_user(user_id, {'groups': list(groups)})
def check_user_in_group(self, user_id, group_id):
self.get_group(group_id)
user_ref = self._get_user(user_id)
if group_id not in set(user_ref.get('groups', [])):
raise exception.NotFound(_('User not found in group'))
def remove_user_from_group(self, user_id, group_id):
self.get_group(group_id)
user_ref = self._get_user(user_id)
groups = set(user_ref.get('groups', []))
try:
groups.remove(group_id)
except KeyError:
raise exception.NotFound(_('User not found in group'))
self.update_user(user_id, {'groups': list(groups)})
def list_users_in_group(self, group_id):
self.get_group(group_id)
user_keys = filter(lambda x: x.startswith("user-"), self.db.keys())
user_refs = [self.db.get(key) for key in user_keys]
user_refs_for_group = filter(lambda x: group_id in x['groups'],
user_refs)
return [identity.filter_user(x) for x in user_refs_for_group]
def list_groups_for_user(self, user_id):
user_ref = self._get_user(user_id)
group_ids = user_ref.get('groups', [])
return [self.get_group(x) for x in group_ids]
def delete_user(self, user_id):
try:
old_user = self.db.get('user-%s' % user_id)
except exception.NotFound:
raise exception.UserNotFound(user_id=user_id)
self.db.delete('user_name-%s' % old_user['name'])
self.db.delete('user-%s' % user_id)
user_list = set(self.db.get('user_list', []))
user_list.remove(user_id)
self.db.set('user_list', list(user_list))
def create_project(self, tenant_id, tenant):
tenant['name'] = clean.project_name(tenant['name'])
try:
self.get_project(tenant_id)
except exception.ProjectNotFound:
pass
else:
msg = 'Duplicate ID, %s.' % tenant_id
raise exception.Conflict(type='tenant', details=msg)
try:
self.get_project_by_name(tenant['name'], tenant['domain_id'])
except exception.ProjectNotFound:
pass
else:
msg = 'Duplicate name, %s.' % tenant['name']
raise exception.Conflict(type='tenant', details=msg)
self.db.set('tenant-%s' % tenant_id, tenant)
self.db.set('tenant_name-%s' % tenant['name'], tenant)
return tenant
def update_project(self, tenant_id, tenant):
if 'name' in tenant:
tenant['name'] = clean.project_name(tenant['name'])
try:
existing = self.db.get('tenant_name-%s' % tenant['name'])
if existing and tenant_id != existing['id']:
msg = 'Duplicate name, %s.' % tenant['name']
raise exception.Conflict(type='tenant', details=msg)
except exception.NotFound:
pass
# get the old name and delete it too
try:
old_project = self.db.get('tenant-%s' % tenant_id)
except exception.NotFound:
raise exception.ProjectNotFound(project_id=tenant_id)
new_project = old_project.copy()
new_project.update(tenant)
new_project['id'] = tenant_id
self.db.delete('tenant_name-%s' % old_project['name'])
self.db.set('tenant-%s' % tenant_id, new_project)
self.db.set('tenant_name-%s' % new_project['name'], new_project)
return new_project
def delete_project(self, tenant_id):
try:
old_project = self.db.get('tenant-%s' % tenant_id)
except exception.NotFound:
raise exception.ProjectNotFound(project_id=tenant_id)
self.db.delete('tenant_name-%s' % old_project['name'])
self.db.delete('tenant-%s' % tenant_id)
def create_metadata(self, user_id, tenant_id, metadata,
domain_id=None, group_id=None):
return self.update_metadata(user_id, tenant_id, metadata,
domain_id, group_id)
def update_metadata(self, user_id, tenant_id, metadata,
domain_id=None, group_id=None):
if user_id:
if tenant_id:
self.db.set('metadata-%s-%s' % (tenant_id, user_id), metadata)
user_ref = self._get_user(user_id)
tenants = set(user_ref.get('tenants', []))
if tenant_id not in tenants:
tenants.add(tenant_id)
user_ref['tenants'] = list(tenants)
self.update_user(user_id, user_ref)
else:
self.db.set('metadata-%s-%s' % (domain_id, user_id), metadata)
else:
if tenant_id:
self.db.set('metadata-%s-%s' % (tenant_id, group_id), metadata)
else:
self.db.set('metadata-%s-%s' % (domain_id, group_id), metadata)
return metadata
def create_role(self, role_id, role):
try:
self.get_role(role_id)
except exception.RoleNotFound:
pass
else:
msg = 'Duplicate ID, %s.' % role_id
raise exception.Conflict(type='role', details=msg)
for role_ref in self.list_roles():
if role['name'] == role_ref['name']:
msg = 'Duplicate name, %s.' % role['name']
raise exception.Conflict(type='role', details=msg)
self.db.set('role-%s' % role_id, role)
role_list = set(self.db.get('role_list', []))
role_list.add(role_id)
self.db.set('role_list', list(role_list))
return role
def update_role(self, role_id, role):
old_role_ref = None
for role_ref in self.list_roles():
if role['name'] == role_ref['name'] and role_id != role_ref['id']:
msg = 'Duplicate name, %s.' % role['name']
raise exception.Conflict(type='role', details=msg)
if role_id == role_ref['id']:
old_role_ref = role_ref
if old_role_ref is None:
raise exception.RoleNotFound(role_id=role_id)
new_role = old_role_ref.copy()
new_role.update(role)
new_role['id'] = role_id
self.db.set('role-%s' % role_id, new_role)
return role
def delete_role(self, role_id):
try:
self.db.delete('role-%s' % role_id)
metadata_keys = filter(lambda x: x.startswith("metadata-"),
self.db.keys())
for key in metadata_keys:
tenant_id = key.split('-')[1]
user_id = key.split('-')[2]
try:
self.remove_role_from_user_and_project(user_id,
tenant_id,
role_id)
except exception.RoleNotFound:
pass
except exception.NotFound:
raise exception.RoleNotFound(role_id=role_id)
role_list = set(self.db.get('role_list', []))
role_list.remove(role_id)
self.db.set('role_list', list(role_list))
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None):
self.get_role(role_id)
if user_id:
self.get_user(user_id)
if group_id:
self.get_group(group_id)
if domain_id:
self.get_domain(domain_id)
if project_id:
self.get_project(project_id)
try:
metadata_ref = self.get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
roles = set(metadata_ref.get('roles', []))
roles.add(role_id)
metadata_ref['roles'] = list(roles)
self.update_metadata(user_id, project_id, metadata_ref,
domain_id, group_id)
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None):
if user_id:
self.get_user(user_id)
if group_id:
self.get_group(group_id)
if domain_id:
self.get_domain(domain_id)
if project_id:
self.get_project(project_id)
try:
metadata_ref = self.get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
return [self.get_role(x) for x in metadata_ref.get('roles', [])]
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None):
self.get_role(role_id)
if user_id:
self.get_user(user_id)
if group_id:
self.get_group(group_id)
if domain_id:
self.get_domain(domain_id)
if project_id:
self.get_project(project_id)
try:
metadata_ref = self.get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
role_ids = set(metadata_ref.get('roles', []))
if role_id not in role_ids:
raise exception.RoleNotFound(role_id=role_id)
return self.get_role(role_id)
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None):
self.get_role(role_id)
if user_id:
self.get_user(user_id)
if group_id:
self.get_group(group_id)
if domain_id:
self.get_domain(domain_id)
if project_id:
self.get_project(project_id)
try:
metadata_ref = self.get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
roles = set(metadata_ref.get('roles', []))
try:
roles.remove(role_id)
except KeyError:
raise exception.RoleNotFound(role_id=role_id)
metadata_ref['roles'] = list(roles)
self.update_metadata(user_id, project_id, metadata_ref,
domain_id, group_id)
# domain crud
def create_domain(self, domain_id, domain):
try:
self.get_domain(domain_id)
except exception.DomainNotFound:
pass
else:
msg = 'Duplicate ID, %s.' % domain_id
raise exception.Conflict(type='domain', details=msg)
try:
self.get_domain_by_name(domain['name'])
except exception.DomainNotFound:
pass
else:
msg = 'Duplicate name, %s.' % domain['name']
raise exception.Conflict(type='domain', details=msg)
self.db.set('domain-%s' % domain_id, domain)
self.db.set('domain_name-%s' % domain['name'], domain)
domain_list = set(self.db.get('domain_list', []))
domain_list.add(domain_id)
self.db.set('domain_list', list(domain_list))
return domain
def list_domains(self):
domain_ids = self.db.get('domain_list', [])
return [self.get_domain(x) for x in domain_ids]
def get_domain(self, domain_id):
try:
return self.db.get('domain-%s' % domain_id)
except exception.NotFound:
raise exception.DomainNotFound(domain_id=domain_id)
def get_domain_by_name(self, domain_name):
try:
return self.db.get('domain_name-%s' % domain_name)
except exception.NotFound:
raise exception.DomainNotFound(domain_id=domain_name)
def update_domain(self, domain_id, domain):
orig_domain = self.get_domain(domain_id)
domain['id'] = domain_id
self.db.set('domain-%s' % domain_id, domain)
self.db.set('domain_name-%s' % domain['name'], domain)
if domain['name'] != orig_domain['name']:
self.db.delete('domain_name-%s' % orig_domain['name'])
return domain
def delete_domain(self, domain_id):
domain = self.get_domain(domain_id)
self.db.delete('domain-%s' % domain_id)
self.db.delete('domain_name-%s' % domain['name'])
domain_list = set(self.db.get('domain_list', []))
domain_list.remove(domain_id)
self.db.set('domain_list', list(domain_list))
# group crud
def create_group(self, group_id, group):
try:
return self.db.get('group-%s' % group_id)
except exception.NotFound:
pass
else:
msg = _('Duplicate ID, %s.') % group_id
raise exception.Conflict(type='group', details=msg)
try:
self.db.get('group_name-%s' % group['name'])
except exception.NotFound:
pass
else:
msg = _('Duplicate name, %s.') % group['name']
raise exception.Conflict(type='group', details=msg)
self.db.set('group-%s' % group_id, group)
self.db.set('group_name-%s' % group['name'], group)
group_list = set(self.db.get('group_list', []))
group_list.add(group_id)
self.db.set('group_list', list(group_list))
return group
def list_groups(self):
group_ids = self.db.get('group_list', [])
return [self.get_group(x) for x in group_ids]
def get_group(self, group_id):
try:
return self.db.get('group-%s' % group_id)
except exception.NotFound:
raise exception.GroupNotFound(group_id=group_id)
def update_group(self, group_id, group):
# First, make sure we are not trying to change the
# name to one that is already in use
try:
self.db.get('group_name-%s' % group['name'])
except exception.NotFound:
pass
else:
msg = _('Duplicate name, %s.') % group['name']
raise exception.Conflict(type='group', details=msg)
# Now, get the old name and delete it
try:
old_group = self.db.get('group-%s' % group_id)
except exception.NotFound:
raise exception.GroupNotFound(group_id=group_id)
self.db.delete('group_name-%s' % old_group['name'])
# Finally, actually do the update
self.db.set('group-%s' % group_id, group)
self.db.set('group_name-%s' % group['name'], group)
return group
def delete_group(self, group_id):
try:
group = self.db.get('group-%s' % group_id)
except exception.NotFound:
raise exception.GroupNotFound(group_id=group_id)
# Delete any entries in the group lists of all users
user_keys = filter(lambda x: x.startswith("user-"), self.db.keys())
user_refs = [self.db.get(key) for key in user_keys]
for user_ref in user_refs:
groups = set(user_ref.get('groups', []))
if group_id in groups:
groups.remove(group_id)
self.update_user(user_ref['id'], {'groups': list(groups)})
# Now delete the group itself
self.db.delete('group-%s' % group_id)
self.db.delete('group_name-%s' % group['name'])
group_list = set(self.db.get('group_list', []))
group_list.remove(group_id)
self.db.set('group_list', list(group_list))
| |
#!/usr/bin/python
# Howto, Code license, Credits, etc: http://code.google.com/B/BCI-Project-Triathlon/
noGL = False # Set noGL to True for disabling the use of OpenGL (to gain speed, or to avoid python-wx-opengl problems)
import numpy
import wx
import math
import threading
import random
import sys
import os
import random
import InputManager
import WXElements
try:
from wx import glcanvas
haveGLCanvas = True
except ImportError:
haveGLCanvas = False
noGL = True
print "Will start without OpenGL, because wx.glcanvas is not available."
try:
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
haveOpenGL = True
except ImportError:
haveOpenGL = False
noGL = True
print "Will start without OpenGL, because PyOpenGL is not available."
class AppSettings():
def __init__(self,
niaFPS = 10,
deviceName = "OCZ Neural Impulse Actuator",
bands = [(2,4),(5,7),(8,10),(11,13),(14,16),(17,20),(21,24),(25,30),(31,45)]):
self.niaFPS = niaFPS
self.deviceName = deviceName
self.bands = bands
class RawVisualizationPanel(WXElements.GLCanvasBase):
def InitGL(self):
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glEnableClientState(GL_VERTEX_ARRAY)
for eachI in range(len(bciDevice.devices)):
glColor(0.55,0.55,0.3)
wave_array = []
for historyIndex in reversed(xrange(500)):
wave_array = wave_array +[[-1.0+ (2.0*float(historyIndex)/499.0), -1.0+((2.0*eachI)+(0.0000001*bciDevice.working_Data(eachI)[-1-historyIndex]))/len(bciDevice.devices)]]
glVertexPointerf(wave_array)
glDrawArrays(GL_LINE_STRIP, 0, len(wave_array))
for eachI in range(len(bciDevice.devices)):
glColor(0.55,0.55,0.3)
glRasterPos2f(0.2 ,-0.5 +( (2.0*eachI))/len(bciDevice.devices))
for eachChar in ("Device "+str(eachI)+" Raw"):
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(eachChar))
self.SwapBuffers()
def newReading(self):
if self.GetGrandParent().GetSelection()==0:
self.SetCurrent()
self.OnDraw()
def resetReading(self):
if self.GetGrandParent().GetSelection()==0:
self.SetCurrent()
self.OnDraw()
class FFTVisualizationPanel(WXElements.GLCanvasBase):
def InitGL(self):
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glEnableClientState(GL_VERTEX_ARRAY)
for eachI in range(len(bciDevice.devices)):
for eachFingerI in range(len(settings.bands)):
everyFingerI = (eachI*len(settings.bands)) + eachFingerI
glColor(everyFingerI*0.05+0.3,-everyFingerI*0.05+0.9,float(everyFingerI%2))
seg = bciDevice.frequencies(eachI,settings.bands[eachFingerI][0],settings.bands[eachFingerI][1])
avg = sum(seg)/len(seg)
wave_array = [[ -1.0+ (2.0*float(settings.bands[eachFingerI][0])/49.0) , -1.0+((2.0*eachI))/len(bciDevice.devices) ],
[ -1.0+ (2.0*float(settings.bands[eachFingerI][0])/49.0) , -1.0+((2.0*eachI) + avg)/len(bciDevice.devices)],
[ -1.0+ (2.0*float(settings.bands[eachFingerI][1])/49.0), -1.0+((2.0*eachI))/len(bciDevice.devices)],
[ -1.0+ (2.0*float(settings.bands[eachFingerI][1])/49.0), -1.0+((2.0*eachI) + avg)/len(bciDevice.devices)]]
glVertexPointerf(wave_array)
glDrawArrays(GL_QUAD_STRIP, 0, len(wave_array))
for eachI in range(len(bciDevice.devices)):
glColor(1.0,0.55,0.3)
wave_array = []
for freqs in reversed(xrange(50)):
wave_array = wave_array +[[-1.0+ (2.0*float(freqs)/49.0), -1.0+((2.0*eachI)+(bciDevice.frequencies(eachI,0,50)[freqs]))/len(bciDevice.devices)]]
glVertexPointerf(wave_array)
glDrawArrays(GL_LINE_STRIP, 0, len(wave_array))
for eachI in range(len(bciDevice.devices)):
glColor(1.0,0.55,0.3)
glRasterPos2f(0.2 ,-0.55 +( (2.0*eachI))/len(bciDevice.devices))
for eachChar in ("Device "+str(eachI)+" FFT"):
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(eachChar))
glColor(0.55,0.55,0.55)
glRasterPos2f(0.2 ,-0.60 +( (2.0*eachI))/len(bciDevice.devices))
for eachChar in ("Device "+str(eachI)+" EEG Bands"):
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(eachChar))
self.SwapBuffers()
def newReading(self):
if self.GetGrandParent().GetSelection()==2:
self.SetCurrent()
self.OnDraw()
def resetReading(self):
if self.GetGrandParent().GetSelection()==2:
self.SetCurrent()
self.OnDraw()
class FFTHistoryVisualizationPanel(WXElements.GLCanvasBase):
def __init__(self, parent):
self.ylists = [[ 0.0 for each in xrange(len(settings.bands)*len(bciDevice.devices))] for every in range(100)]
self.xlist = [float(i)/float(-1+len(self.ylists[0])) for i in xrange(len(self.ylists[0]))]
WXElements.GLCanvasBase.__init__(self, parent)
def InitGL(self):
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glEnableClientState(GL_VERTEX_ARRAY)
for everyFingerI in range(len(settings.bands)*len(bciDevice.devices)):
glColor(everyFingerI*0.05+0.3,-everyFingerI*0.05+0.9,float(everyFingerI%2))
wave_array = []
for historyIndex in xrange(100):
wave_array.append([-1.0+ (2.0*float(historyIndex)/99.0), -0.9 + (0.1*everyFingerI) + (0.3 * self.ylists[historyIndex][everyFingerI])])
glVertexPointerf(wave_array)
glDrawArrays(GL_LINE_STRIP, 0, len(wave_array))
glRasterPos2f(-0.95 ,-0.95 + (0.1*everyFingerI) )
for eachChar in ("Device "+str(everyFingerI/len(settings.bands))+", Band "+str(everyFingerI%len(settings.bands))+": "+
str(settings.bands[everyFingerI%len(settings.bands)][0])+
"-"+
str(settings.bands[everyFingerI%len(settings.bands)][1])+" Hz"):
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, ord(eachChar))
self.SwapBuffers()
def newReading(self):
newReadings = []
for eachDeviceI in range(len(bciDevice.devices)):
for eachFingerIndex in range(len(settings.bands)):
fingerlist = bciDevice.frequencies(eachDeviceI,settings.bands[eachFingerIndex][0],settings.bands[eachFingerIndex][1])
newReadings.append(float(sum(fingerlist))/float(len(fingerlist)))
self.ylists = [newReadings]+self.ylists[0:99]
if self.GetGrandParent().GetSelection()==3:
self.SetCurrent()
self.OnDraw()
def resetReading(self):
self.ylists = [[0.0 for each in xrange(len(settings.bands)*len(bciDevice.devices))] for every in range(100)]
if self.GetGrandParent().GetSelection()==3:
self.SetCurrent()
self.OnDraw()
class SpectogramVisualizationPanel(WXElements.GLCanvasBase):
def __init__(self, parent):
WXElements.GLCanvasBase.__init__(self, parent)
self.historyLength = 5
self.colorlists = [[ self.spectralColor(0.0) for each in xrange(50*len(bciDevice.devices))] for every in xrange(self.historyLength)]
xlist = [-1.0+(2.0*float(i)/float(-1+self.historyLength)) for i in xrange(self.historyLength)]
ylist = [-1.0+(2.0*float(i)/float(-1+len(self.colorlists[0]))) for i in xrange(len(self.colorlists[0]))]
columns = []
self.quadCols = []
for historyIndex in xrange(self.historyLength):
x = xlist[historyIndex]
columns.append([[x,y] for y in ylist])
for historyIndex in xrange(self.historyLength-1):
self.quadCols.append(zip ( columns[historyIndex] , columns[historyIndex+1]))
self.spectralColorColumHistory = []
for historyIndex in xrange(self.historyLength-1):
self.spectralColorColumHistory.append(zip ( self.colorlists[historyIndex] , self.colorlists[historyIndex+1]))
def InitGL(self):
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
for historyIndex in xrange(self.historyLength-1):
glVertexPointerf(self.quadCols[historyIndex])
glColorPointerf(self.spectralColorColumHistory[historyIndex])
glDrawArrays(GL_QUAD_STRIP, 0, 2*50*len(bciDevice.devices))
self.SwapBuffers()
def newReading(self):
newReadings = []
for eachDeviceI in range(len(bciDevice.devices)):
newReadings.extend(bciDevice.frequencies(eachDeviceI,0,50))
self.colorlists = [map(self.spectralColor,newReadings)]+self.colorlists[0:(self.historyLength-1)]
self.spectralColorColumHistory = [ zip ( self.colorlists[0] , self.colorlists[1])
]+self.spectralColorColumHistory[0:(self.historyLength-2)]
if self.GetGrandParent().GetSelection()==1:
self.SetCurrent()
self.OnDraw()
def resetReading(self):
self.colorlists = [[ self.spectralColor(0.0) for each in xrange(50*len(bciDevice.devices))] for every in xrange(self.historyLength)]
xlist = [-1.0+(2.0*float(i)/float(-1+self.historyLength)) for i in xrange(self.historyLength)]
ylist = [-1.0+(2.0*float(i)/float(-1+len(self.colorlists[0]))) for i in xrange(len(self.colorlists[0]))]
columns = []
self.quadCols = []
for historyIndex in xrange(self.historyLength):
x = xlist[historyIndex]
columns.append([[x,y] for y in ylist])
for historyIndex in xrange(self.historyLength-1):
self.quadCols.append(zip ( columns[historyIndex] , columns[historyIndex+1]))
self.spectralColorColumHistory = []
for historyIndex in xrange(self.historyLength-1):
self.spectralColorColumHistory.append(zip ( self.colorlists[historyIndex] , self.colorlists[historyIndex+1]))
if self.GetGrandParent().GetSelection()==1:
self.SetCurrent()
self.OnDraw()
def spectralColor(self,v):
if v <= 0.0:
return [0.0,0.0,0.0]
elif v <= 0.2:
return [0.0,0.0,v*5.0]
elif v <= 0.5:
return [(v-0.2)/0.3,0.0,1.0]
elif v <= 1.5:
return [1.0,0.0,1.0-(v-0.5)]
elif v <= 11.5:
return [1.0,((v-1.5)*0.05),0.0]
else:
return [1.0,1.0,((v-11.5)*0.008)]
class SettingsPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.fpsField = wx.TextCtrl(self,value=str(settings.niaFPS))
self.fpsField.Bind(wx.EVT_KILL_FOCUS, self.fpsChanged)
panelSizer = wx.FlexGridSizer(0,10,0,0)
panelSizer.AddGrowableCol(0)
panelSizer.Add(wx.StaticText(self,label=""), 0, wx.ALIGN_CENTER, 5)
panelSizer.Add(wx.StaticText(self,label="Samples per second:"), 0, wx.ALIGN_CENTER, 5)
panelSizer.Add(self.fpsField, 0, wx.EXPAND, 5)
panelSizer.AddGrowableCol(3)
panelSizer.Add(wx.StaticText(self,label=""), 0, wx.ALIGN_CENTER, 5)
self.bandChoice = wx.Choice(self,choices=[("EEG Band "+str(i)) for i in xrange(9)])
panelSizer.Add(self.bandChoice, 0, wx.ALIGN_CENTER, 5)
self.bandChoice.Bind(wx.EVT_CHOICE, self.bandChanged)
self.fromFreqField = wx.TextCtrl(self,value=str(settings.bands[0][0]))
panelSizer.Add(self.fromFreqField, 0, wx.EXPAND, 5)
self.fromFreqField.Bind(wx.EVT_KILL_FOCUS, self.freqChanged)
panelSizer.Add(wx.StaticText(self,label="-"), 0, wx.ALIGN_CENTER, 5)
self.toFreqField = wx.TextCtrl(self,value=str(settings.bands[0][1]))
panelSizer.Add(self.toFreqField, 0, wx.EXPAND, 5)
self.toFreqField.Bind(wx.EVT_KILL_FOCUS, self.freqChanged)
panelSizer.Add(wx.StaticText(self,label="Hz"), 0, wx.ALIGN_CENTER, 5)
panelSizer.AddGrowableCol(9)
panelSizer.Add(wx.StaticText(self,label=""), 0, wx.ALIGN_CENTER, 5)
self.SetSizer(panelSizer)
self.SetAutoLayout(1)
def fpsChanged(self, event):
val = 0
try:
val = int(self.fpsField.GetValue())
except ValueError:
val = settings.niaFPS
if (val<1):
val = 1
elif (val>50):
val = 50
settings.niaFPS = val
self.fpsField.SetValue(str(val))
self.GetGrandParent().timer.Stop()
bciDevice.setPoints(int(500.0/settings.niaFPS))
self.GetGrandParent().timer.Start(int(1000.0/settings.niaFPS))
event.Skip()
def bandChanged(self, event):
i = self.bandChoice.GetSelection()
self.fromFreqField.SetValue(str(settings.bands[i][0]))
self.toFreqField.SetValue(str(settings.bands[i][1]))
event.Skip()
def freqChanged(self, event):
i = self.bandChoice.GetSelection()
fr = 0
try:
fr = int(self.fromFreqField.GetValue())
except ValueError:
fr = settings.bands[i][0]
if (fr<0):
fr = 0
elif (fr>100):
fr = 100
to = 0
try:
to = int(self.toFreqField.GetValue())
except ValueError:
to = settings.bands[i][1]
if (to<0):
to = 0
elif (to>100):
to = 100
if to<fr:
sw = fr
fr = to
to = sw
elif to == fr:
to = to+2
if abs(to-fr)==1:
to=to+1
self.fromFreqField.SetValue(str(fr))
self.toFreqField.SetValue(str(to))
settings.bands[i] = (fr,to)
event.Skip()
class GUIMain(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,title="Triathlon Analyzer",size=(600,600))
self.panel = wx.Panel(self, wx.ID_ANY)
MenuBar = wx.MenuBar()
self.FileMenu = wx.Menu()
item = self.FileMenu.Append(wx.ID_ANY, text="Calibrate")
self.Bind(wx.EVT_MENU, self.OnCalibrate, item)
item = self.FileMenu.Append(wx.ID_EXIT, text="Quit")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
MenuBar.Append(self.FileMenu, "Menu")
self.SetMenuBar(MenuBar)
sizer = wx.FlexGridSizer(2,1,0,0)
self.settingsPanel = SettingsPanel(self.panel)
self.tabs = wx.Notebook(self.panel)
rawvisualizationPanel = wx.Panel(self.tabs, wx.ID_ANY)
rawvisualizationSizer = wx.FlexGridSizer(1,1,0,0)
rawvisualizationSizer.AddGrowableRow(0)
rawvisualizationSizer.AddGrowableCol(0)
if noGL:
self.rawvisualizationCanvas = WXElements.NoGLVisualizationPanel(rawvisualizationPanel)
else:
self.rawvisualizationCanvas = RawVisualizationPanel(rawvisualizationPanel)
rawvisualizationSizer.Add(self.rawvisualizationCanvas , 1, wx.EXPAND)
rawvisualizationPanel.SetSizer(rawvisualizationSizer)
visualizationPanel = wx.Panel(self.tabs, wx.ID_ANY)
visualizationSizer = wx.FlexGridSizer(1,1,0,0)
visualizationSizer.AddGrowableRow(0)
visualizationSizer.AddGrowableCol(0)
if noGL:
self.visualizationCanvas = WXElements.NoGLVisualizationPanel(visualizationPanel)
else:
self.visualizationCanvas = FFTVisualizationPanel(visualizationPanel)
visualizationSizer.Add(self.visualizationCanvas , 1, wx.EXPAND)
visualizationPanel.SetSizer(visualizationSizer)
historyPanel = wx.Panel(self.tabs, wx.ID_ANY)
historySizer = wx.FlexGridSizer(1,1,0,0)
historySizer.AddGrowableRow(0)
historySizer.AddGrowableCol(0)
if noGL:
self.historyCanvas = WXElements.NoGLVisualizationPanel(historyPanel)
else:
self.historyCanvas = FFTHistoryVisualizationPanel(historyPanel)
historySizer.Add(self.historyCanvas , 1, wx.EXPAND)
historyPanel.SetSizer(historySizer)
spectogramPanel = wx.Panel(self.tabs, wx.ID_ANY)
spectogramSizer = wx.FlexGridSizer(1,1,0,0)
spectogramSizer.AddGrowableRow(0)
spectogramSizer.AddGrowableCol(0)
if noGL:
self.spectogramCanvas = WXElements.NoGLVisualizationPanel(spectogramPanel)
else:
self.spectogramCanvas = SpectogramVisualizationPanel(spectogramPanel)
spectogramSizer.Add(self.spectogramCanvas , 1, wx.EXPAND)
spectogramPanel.SetSizer(spectogramSizer)
self.tabs.AddPage(rawvisualizationPanel,"Raw")
self.tabs.AddPage(spectogramPanel,"Spectogram")
self.tabs.AddPage(visualizationPanel,"EEG Bands")
self.tabs.AddPage(historyPanel,"EEG Band History")
sizer.AddGrowableCol(0)
sizer.Add(wx.StaticText(self.panel,label=""), 0, wx.ALIGN_CENTER, 5)
sizer.Add(self.settingsPanel , 1, wx.EXPAND)
sizer.Add(wx.StaticText(self.panel,label=""), 0, wx.ALIGN_CENTER, 5)
sizer.AddGrowableRow(3)
sizer.Add(self.tabs , 1, wx.EXPAND)
self.panel.SetSizer(sizer)
self.panel.SetAutoLayout(1)
self.timer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_TIMER, self.NiaUpdate, self.timer)
def OnQuit(self, event):
self.timer.Stop()
self.Close()
def OnCalibrate(self, event):
bciDevice.calibrateAll()
event.Skip()
def NiaUpdate(self, ev):
if bciDevice.deviceType == InputManager.OCZ_NIAx2:
data_thread = threading.Thread(target=bciDevice.record,args=([0]))
data_thread2 = threading.Thread(target=bciDevice.record,args=([1]))
data_thread.start()
data_thread2.start()
bciDevice.process(0)
bciDevice.process(1)
else:
data_thread = threading.Thread(target=bciDevice.record,args=([0]))
data_thread.start()
bciDevice.process(0)
self.rawvisualizationCanvas.newReading()
self.spectogramCanvas.newReading()
self.visualizationCanvas.newReading()
self.historyCanvas.newReading()
ev.Skip()
class NiaEEGApp(wx.App):
def __init__(self, redirect = False):
wx.App.__init__(self)
self.mainWindow = GUIMain()
self.mainWindow.Show(True)
bciDevice.setPoints(int(500.0/settings.niaFPS))
self.mainWindow.timer.Start(int(1000.0/settings.niaFPS))
if __name__ == "__main__":
settings = AppSettings()
selection = WXElements.selection("Select your Device",InputManager.SupportedDevices.keys()[0],InputManager.SupportedDevices.keys())
settings.deviceName = selection
bciDevice = InputManager.BCIDevice(settings.deviceName)
argcp = ''
glutInit(argcp, sys.argv)
niaEEGApp = NiaEEGApp()
niaEEGApp.MainLoop()
| |
from json import JSONDecodeError
from urllib.parse import urljoin
import requests
from django.forms.models import model_to_dict
from django.http import JsonResponse, HttpResponseBadRequest
from django.shortcuts import redirect, get_object_or_404
from django.template.defaultfilters import slugify
from django.urls import reverse
from django.views.generic import View
from workshops.forms import EventLookupForm
from workshops.models import (
Event,
Person,
Sponsorship,
Task,
)
from workshops.util import OnlyForAdminsMixin
from workshops.base_views import AMYCreateView, AMYFormView
from .api import PersonAPIClient, TaskAPIClient, SponsorshipAPIClient
from .forms import PersonAddFormSet, TaskAddFormSet, SponsorshipAddFormSet
class ConferenceImport(OnlyForAdminsMixin, View):
"""
Fetch conference details from `/api/` API endpoint of a PyData conference.
"""
def get(self, request):
try:
url = request.GET['url']
conf = requests.get(urljoin(url, 'api/')).json()
return JsonResponse({
'slug': slugify('{}-{}'.format(conf['start_date'], conf['title'])),
'start': conf['start_date'],
'end': conf['end_date'],
})
except KeyError:
return HttpResponseBadRequest('Missing "url" parameter')
except (requests.exceptions.RequestException, JSONDecodeError):
return HttpResponseBadRequest('Conference site does not support an API')
except Exception as e:
return HttpResponseBadRequest(str(e))
class BaseImport(OnlyForAdminsMixin, View):
"""
Fetch an API endpoint at a PyData conference site.
Returns a JSON response consisting of fields and their values.
"""
def serialize(self, obj):
'''Returns a dict with serializable fields from a model instance'''
raise NotImplementedError()
def get_pk(self, url):
"""
Returns a 2-tuple containing the conference site URL
and the primary key of the object if the URL is valid.
Returns None when the URL is invalid.
"""
raise NotImplementedError()
def get(self, request):
try:
url = request.GET['url']
conf_url, pk = self.get_pk(url).groups()
event = Event.objects.get(url__icontains=conf_url)
client = self.client(event)
obj = client[pk]
return JsonResponse(self.serialize(obj))
except KeyError:
return HttpResponseBadRequest('Missing "url" parameter')
except AttributeError:
return HttpResponseBadRequest('Invalid "url" parameter')
except Event.DoesNotExist:
return HttpResponseBadRequest('Object does not belong to any event')
except requests.exceptions.HTTPError as e:
return HttpResponseBadRequest(
'Request for "{0}" returned status code {1}.'
.format(self.url, e.response.status_code)
)
except requests.exceptions.RequestException:
return HttpResponseBadRequest('Network connection error')
except Exception as e:
return HttpResponseBadRequest(str(e))
class PersonImport(BaseImport):
"""
Fetches details about a speaker from the `/api/speaker/<id>`
API endpoint of a PyData conference.
"""
client = PersonAPIClient
def serialize(self, person):
return {
'username': person.username,
'personal': person.personal,
'family': person.family,
'email': person.email,
'url': person.url,
}
def get_pk(self, url):
return Person.PROFILE_REGEX.match(url)
class TaskImport(BaseImport):
"""
Fetches details about a presentation from the `/api/presentation/<id>`
API endpoint of a PyData conference.
"""
client = TaskAPIClient
def serialize(self, task):
return {
'person': task.person.email,
'role': task.role.pk,
'title': task.title,
'url': task.url,
}
def get_pk(self, url):
return Task.PRESENTATION_REGEX.match(url)
class SponsorshipImport(BaseImport):
"""
Fetches details about a sponsor from the `/api/sponsor/<id>`
API endpoint of a PyData conference.
"""
client = SponsorshipAPIClient
def serialize(self, sponsorship):
return {
'organization': sponsorship.organization.domain,
'amount': sponsorship.amount,
'contact': sponsorship.contact.email,
}
def get_pk(self, url):
return Sponsorship.PROFILE_REGEX.match(url)
class BulkImportEventSelect(OnlyForAdminsMixin, AMYFormView):
form_class = EventLookupForm
template_name = 'workshops/generic_form.html'
title = 'Bulk import a Conference'
def form_valid(self, form):
return redirect(
reverse(
'bulk_import_person',
kwargs={'slug': form.cleaned_data['event'].slug},
),
)
class BaseBulkImport(OnlyForAdminsMixin, AMYCreateView):
"""
Class-based view for importing instances from PyData API client.
Overrides AMYCreateView to populate initial data and a custom
success message.
"""
success_message = 'Successfully imported {count} {model}'
def get_initial(self):
'''Obtain model instances from the API client'''
event = get_object_or_404(Event, slug=self.kwargs['slug'])
client = self.client(event=event)
return [model_to_dict(obj) for obj in client]
def get_form_kwargs(self):
# Magic:
# 1. Formset can only be bound when initialized as
# `formset = form_class(data=request.POST)`
# 2. SingleObjectMixin introduces `instances` in kwargs
# which is not accepted by the form_class.
kwargs = super().get_form_kwargs()
if self.request.method in ('POST', 'PUT'):
return {'data': self.request.POST}
kwargs.pop('instance')
return kwargs
def get(self, *args, **kwargs):
'''Return HTTP400 when API does not respond'''
try:
return super().get(*args, **kwargs)
except (IOError, NotImplementedError) as e:
return HttpResponseBadRequest(str(e))
def get_success_message(self, cleaned_data):
return self.success_message.format(
count=sum([not data['DELETE'] for data in cleaned_data]),
model=self.model._meta.verbose_name_plural
)
class PersonBulkImport(BaseBulkImport):
model = Person
form_class = PersonAddFormSet
client = PersonAPIClient
template_name = 'pydata/bulk-import/person.html'
def get_success_url(self):
return reverse('bulk_import_task', kwargs={'slug': self.kwargs['slug']})
class TaskBulkImport(BaseBulkImport):
model = Task
form_class = TaskAddFormSet
client = TaskAPIClient
template_name = 'pydata/bulk-import/task.html'
def get_success_url(self):
return reverse('bulk_import_sponsorship', kwargs={'slug': self.kwargs['slug']})
class SponsorshipBulkImport(BaseBulkImport):
model = Sponsorship
form_class = SponsorshipAddFormSet
client = SponsorshipAPIClient
template_name = 'pydata/bulk-import/sponsorship.html'
def get_success_url(self):
return reverse('event_details', kwargs={'slug': self.kwargs['slug']})
| |
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos2d "compile" plugin
#
# Copyright 2013 (C) Luis Parravicini
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"compile" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import cocos
import cocos_project
import subprocess
import os
import re
import sys
import shutil
import platform
import json
import build_web
if sys.platform == 'win32':
import _winreg
class CCPluginCompile(cocos.CCPlugin):
"""
compiles a project
"""
BUILD_CONFIG_FILE = "build-cfg.json"
CFG_KEY_WIN32_COPY_FILES = "copy_files"
CFG_KEY_WIN32_MUST_COPY_FILES = "must_copy_files"
CFG_KEY_COPY_RESOURCES = "copy_resources"
CFG_KEY_MUST_COPY_RESOURCES = "must_copy_resources"
OUTPUT_DIR_NATIVE = "bin"
OUTPUT_DIR_SCRIPT_DEBUG = "runtime"
OUTPUT_DIR_SCRIPT_RELEASE = "publish"
PROJ_CFG_KEY_IOS_SIGN_ID = "ios_sign_id"
PROJ_CFG_KEY_ENGINE_DIR = "engine_dir"
BACKUP_SUFFIX = "-backup"
ENGINE_JS_DIR = "frameworks/js-bindings/bindings/script"
@staticmethod
def plugin_name():
return "compile"
@staticmethod
def brief_description():
return "Compiles the current project to binary"
def _add_custom_options(self, parser):
from argparse import ArgumentParser
parser.add_argument("-m", "--mode", dest="mode", default='debug',
help="Set the compile mode, should be debug|release, default is debug.")
parser.add_argument("-j", "--jobs", dest="jobs", type=int, default=1,
help="Allow N jobs at once.")
group = parser.add_argument_group("Android Options")
group.add_argument("--ap", dest="android_platform", type=int, help='parameter for android-update.Without the parameter,the script just build dynamic library for project. Valid android-platform are:[10|11|12|13|14|15|16|17|18|19]')
group.add_argument("--ndk-mode", dest="ndk_mode", help='Set the compile mode of ndk-build, should be debug|release|none, native code will not be compiled when the value is none. Default is same value with -m')
group = parser.add_argument_group("Web Options")
group.add_argument("--source-map", dest="source_map", action="store_true", help='Enable source-map')
group = parser.add_argument_group("iOS Options")
group.add_argument("--sign-identity", dest="sign_id", help="The code sign identity for iOS. It's required when the value of \"-m, -mode\" is release.")
group = parser.add_argument_group("lua/js project arguments")
group.add_argument("--no-res", dest="no_res", action="store_true", help="Package without project resources.")
group.add_argument("--compile-script", dest="compile_script", type=int, choices=[0, 1], help="Diable/Enable the compiling of lua/js script files.")
group = parser.add_argument_group("lua project arguments")
group.add_argument("--lua-encrypt", dest="lua_encrypt", action="store_true", help="Enable the encrypting of lua scripts.")
group.add_argument("--lua-encrypt-key", dest="lua_encrypt_key", help="Specify the encrypt key for the encrypting of lua scripts.")
group.add_argument("--lua-encrypt-sign", dest="lua_encrypt_sign", help="Specify the encrypt sign for the encrypting of lua scripts.")
category = self.plugin_category()
name = self.plugin_name()
usage = "\n\t%%prog %s %s -p <platform> [-s src_dir][-m <debug|release>]" \
"\nSample:" \
"\n\t%%prog %s %s -p android" % (category, name, category, name)
def _check_custom_options(self, args):
if args.mode != 'release':
args.mode = 'debug'
self._mode = 'debug'
if 'release' == args.mode:
self._mode = args.mode
if args.ndk_mode is not None:
self._ndk_mode = args.ndk_mode
else:
self._ndk_mode = self._mode
if args.compile_script is not None:
self._compile_script = bool(args.compile_script)
else:
self._compile_script = (self._mode == "release")
self._ap = args.android_platform
self._jobs = args.jobs
self._has_sourcemap = args.source_map
self._no_res = args.no_res
self._output_dir = self._get_output_dir()
self._sign_id = args.sign_id
if self._project._is_lua_project():
self._lua_encrypt = args.lua_encrypt
self._lua_encrypt_key = args.lua_encrypt_key
self._lua_encrypt_sign = args.lua_encrypt_sign
self._gen_custom_step_args()
def _get_output_dir(self):
project_dir = self._project.get_project_dir()
cur_platform = self._platforms.get_current_platform()
if self._project._is_script_project():
if self._mode == 'debug':
output_dir = os.path.join(project_dir, CCPluginCompile.OUTPUT_DIR_SCRIPT_DEBUG, cur_platform)
else:
output_dir = os.path.join(project_dir, CCPluginCompile.OUTPUT_DIR_SCRIPT_RELEASE, cur_platform)
else:
output_dir = os.path.join(project_dir, CCPluginCompile.OUTPUT_DIR_NATIVE, self._mode, cur_platform)
return output_dir
def _gen_custom_step_args(self):
self._custom_step_args = {
"project-path": self._project.get_project_dir(),
"platform-project-path": self._platforms.project_path(),
"build-mode": self._mode,
"output-dir": self._output_dir
}
if self._platforms.is_android_active():
self._custom_step_args["ndk-build-mode"] = self._ndk_mode
def _build_cfg_path(self):
cur_cfg = self._platforms.get_current_config()
if self._platforms.is_win32_active():
if cur_cfg.build_cfg_path is not None:
project_dir = self._project.get_project_dir()
ret = os.path.join(project_dir, cur_cfg.build_cfg_path)
else:
ret = self._platforms.project_path()
elif self._platforms.is_ios_active():
ret = os.path.join(self._platforms.project_path(), "ios")
elif self._platforms.is_mac_active():
ret = os.path.join(self._platforms.project_path(), "mac")
else:
ret = self._platforms.project_path()
return ret
def _update_build_cfg(self):
build_cfg_dir = self._build_cfg_path()
cfg_file_path = os.path.join(build_cfg_dir, CCPluginCompile.BUILD_CONFIG_FILE)
if not os.path.isfile(cfg_file_path):
return
key_of_copy = None
key_of_must_copy = None
if self._platforms.is_android_active():
from build_android import AndroidBuilder
key_of_copy = AndroidBuilder.CFG_KEY_COPY_TO_ASSETS
key_of_must_copy = AndroidBuilder.CFG_KEY_MUST_COPY_TO_ASSERTS
elif self._platforms.is_win32_active():
key_of_copy = CCPluginCompile.CFG_KEY_WIN32_COPY_FILES
key_of_must_copy = CCPluginCompile.CFG_KEY_WIN32_MUST_COPY_FILES
if key_of_copy is None and key_of_must_copy is None:
return
try:
outfile = None
open_file = open(cfg_file_path)
cfg_info = json.load(open_file)
open_file.close()
open_file = None
changed = False
if key_of_copy is not None:
if cfg_info.has_key(key_of_copy):
src_list = cfg_info[key_of_copy]
ret_list = self._convert_cfg_list(src_list, build_cfg_dir)
cfg_info[CCPluginCompile.CFG_KEY_COPY_RESOURCES] = ret_list
del cfg_info[key_of_copy]
changed = True
if key_of_must_copy is not None:
if cfg_info.has_key(key_of_must_copy):
src_list = cfg_info[key_of_must_copy]
ret_list = self._convert_cfg_list(src_list, build_cfg_dir)
cfg_info[CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES] = ret_list
del cfg_info[key_of_must_copy]
changed = True
if changed:
# backup the old-cfg
split_list = os.path.splitext(CCPluginCompile.BUILD_CONFIG_FILE)
file_name = split_list[0]
ext_name = split_list[1]
bak_name = file_name + "-for-v0.1" + ext_name
bak_file_path = os.path.join(build_cfg_dir, bak_name)
if os.path.exists(bak_file_path):
os.remove(bak_file_path)
os.rename(cfg_file_path, bak_file_path)
# write the new data to file
with open(cfg_file_path, 'w') as outfile:
json.dump(cfg_info, outfile, sort_keys = True, indent = 4)
outfile.close()
outfile = None
finally:
if open_file is not None:
open_file.close()
if outfile is not None:
outfile.close()
def _convert_cfg_list(self, src_list, build_cfg_dir):
ret = []
for element in src_list:
ret_element = {}
if str(element).endswith("/"):
sub_str = element[0:len(element)-1]
ret_element["from"] = sub_str
ret_element["to"] = ""
else:
element_full_path = os.path.join(build_cfg_dir, element)
if os.path.isfile(element_full_path):
to_dir = ""
else:
to_dir = os.path.basename(element)
ret_element["from"] = element
ret_element["to"] = to_dir
ret.append(ret_element)
return ret
def _is_debug_mode(self):
return self._mode == 'debug'
def _remove_file_with_ext(self, work_dir, ext):
file_list = os.listdir(work_dir)
for f in file_list:
full_path = os.path.join(work_dir, f)
if os.path.isdir(full_path):
self._remove_file_with_ext(full_path, ext)
elif os.path.isfile(full_path):
name, cur_ext = os.path.splitext(f)
if cur_ext == ext:
os.remove(full_path)
def compile_scripts(self, src_dir, dst_dir):
if not self._project._is_script_project():
return
if not self._compile_script:
return
cocos_cmd_path = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "cocos")
if self._project._is_lua_project():
rm_ext = ".lua"
compile_cmd = "%s luacompile -s \"%s\" -d \"%s\"" % (cocos_cmd_path, src_dir, dst_dir)
if self._lua_encrypt:
add_para = ""
if self._lua_encrypt_key is not None:
add_para = "%s -k %s" % (add_para, self._lua_encrypt_key)
if self._lua_encrypt_sign is not None:
add_para = "%s -b %s" % (add_para, self._lua_encrypt_sign)
compile_cmd = "%s -e %s" % (compile_cmd, add_para)
elif self._project._is_js_project():
rm_ext = ".js"
compile_cmd = "%s jscompile -s \"%s\" -d \"%s\"" % (cocos_cmd_path, src_dir, dst_dir)
# run compile command
self._run_cmd(compile_cmd)
# remove the source scripts
self._remove_file_with_ext(dst_dir, rm_ext)
def build_android(self):
if not self._platforms.is_android_active():
return
project_dir = self._project.get_project_dir()
build_mode = self._mode
output_dir = self._output_dir
if self._project._is_script_project():
if self._project._is_lua_project():
cocos_root = os.path.join(project_dir, 'frameworks' ,'cocos2d-x')
else:
cocos_root = os.path.join(project_dir, 'frameworks' ,'%s-bindings' % self._project.get_language(), 'cocos2d-x')
else:
cocos_root = os.path.join(project_dir, 'cocos2d')
# check environment variable
ant_root = cocos.check_environment_variable('ANT_ROOT')
sdk_root = cocos.check_environment_variable('ANDROID_SDK_ROOT')
project_android_dir = self._platforms.project_path()
from build_android import AndroidBuilder
builder = AndroidBuilder(self._verbose, cocos_root, project_android_dir, self._no_res, self._project)
args_ndk_copy = self._custom_step_args.copy()
target_platform = self._platforms.get_current_platform()
if not self._project._is_script_project() or self._project._is_native_support():
if self._ndk_mode != "none":
# build native code
cocos.Logging.info("building native")
ndk_build_param = "-j%s" % self._jobs
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_PRE_NDK_BUILD, target_platform, args_ndk_copy)
builder.do_ndk_build(ndk_build_param, self._ndk_mode)
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_POST_NDK_BUILD, target_platform, args_ndk_copy)
# build apk
cocos.Logging.info("building apk")
self.apk_path = builder.do_build_apk(sdk_root, ant_root, self._ap, build_mode, output_dir, self._custom_step_args, self)
cocos.Logging.info("build succeeded.")
def check_ios_mac_build_depends(self):
commands = [
"xcodebuild",
"-version"
]
child = subprocess.Popen(commands, stdout=subprocess.PIPE)
xcode = None
version = None
for line in child.stdout:
if 'Xcode' in line:
xcode, version = str.split(line, ' ')
child.wait()
if xcode is None:
message = "Xcode wasn't installed"
raise cocos.CCPluginError(message)
if version <= '5':
message = "Update xcode please"
raise cocos.CCPluginError(message)
cfg_obj = self._platforms.get_current_config()
if cfg_obj.proj_file is not None:
xcodeproj_name = cfg_obj.proj_file
name = os.path.basename(xcodeproj_name)
else:
name, xcodeproj_name = self.checkFileByExtention(".xcodeproj", self._platforms.project_path())
if not xcodeproj_name:
message = "Can't find the \".xcodeproj\" file"
raise cocos.CCPluginError(message)
self.project_name = name
self.xcodeproj_name = xcodeproj_name
def _remove_res(self, target_path):
build_cfg_dir = self._build_cfg_path()
cfg_file = os.path.join(build_cfg_dir, CCPluginCompile.BUILD_CONFIG_FILE)
if os.path.exists(cfg_file) and os.path.isfile(cfg_file):
# have config file
open_file = open(cfg_file)
cfg_info = json.load(open_file)
open_file.close()
if cfg_info.has_key("remove_res"):
remove_list = cfg_info["remove_res"]
for f in remove_list:
res = os.path.join(target_path, f)
if os.path.isdir(res):
# is a directory
if f.endswith('/'):
# remove files & dirs in it
for sub_file in os.listdir(res):
sub_file_fullpath = os.path.join(res, sub_file)
if os.path.isfile(sub_file_fullpath):
os.remove(sub_file_fullpath)
elif os.path.isdir(sub_file_fullpath):
shutil.rmtree(sub_file_fullpath)
else:
# remove the dir
shutil.rmtree(res)
elif os.path.isfile(res):
# is a file, remove it
os.remove(res)
def get_engine_dir(self):
engine_dir = self._project.get_proj_config(CCPluginCompile.PROJ_CFG_KEY_ENGINE_DIR)
if engine_dir is None:
engine_dir = self._project.get_project_dir()
else:
engine_dir = os.path.join(self._project.get_project_dir(), engine_dir)
return engine_dir
def backup_dir(self, dir_path):
backup_dir = "%s%s" % (dir_path, CCPluginCompile.BACKUP_SUFFIX)
if os.path.exists(backup_dir):
shutil.rmtree(backup_dir)
shutil.copytree(dir_path, backup_dir)
def reset_backup_dir(self, dir_path):
backup_dir = "%s%s" % (dir_path, CCPluginCompile.BACKUP_SUFFIX)
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
os.rename(backup_dir, dir_path)
def build_ios(self):
if not self._platforms.is_ios_active():
return
if not cocos.os_is_mac():
raise cocos.CCPluginError("Please build on MacOSX")
need_record_sign_id = False
if self._mode == "release":
if self._sign_id is None:
self._sign_id = self._project.get_proj_config(CCPluginCompile.PROJ_CFG_KEY_IOS_SIGN_ID)
else:
need_record_sign_id = True
if self._sign_id is None:
raise cocos.CCPluginError("Please specify the code sign identity by \"--sign-identity\" if you want to compile with release mode.")
else:
cocos.Logging.info("Code Sign Identity: %s" % self._sign_id)
self.check_ios_mac_build_depends()
ios_project_dir = self._platforms.project_path()
output_dir = self._output_dir
projectPath = os.path.join(ios_project_dir, self.xcodeproj_name)
pbxprojectPath = os.path.join(projectPath, "project.pbxproj")
f = file(pbxprojectPath)
contents = f.read()
section = re.search(r"Begin PBXProject section.*End PBXProject section", contents, re.S)
if section is None:
message = "Can't find iOS target"
raise cocos.CCPluginError(message)
targets = re.search(r"targets = (.*);", section.group(), re.S)
if targets is None:
message = "Can't find iOS target"
raise cocos.CCPluginError(message)
targetName = None
cfg_obj = self._platforms.get_current_config()
if cfg_obj.target_name is not None:
targetName = cfg_obj.target_name
else:
names = re.split("\*", targets.group())
for name in names:
if "iOS" in name:
targetName = str.strip(name)
if targetName is None:
message = "Can't find iOS target"
raise cocos.CCPluginError(message)
if os.path.isdir(output_dir):
target_app_dir = os.path.join(output_dir, "%s.app" % targetName[:targetName.find(' ')])
if os.path.isdir(target_app_dir):
shutil.rmtree(target_app_dir)
# is script project & need compile scripts
if self._project._is_script_project() and self._compile_script:
# backup the source scripts
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
self.backup_dir(script_src_dir)
# compile the scripts
self.compile_scripts(script_src_dir, script_src_dir)
if self._project._is_js_project():
# js project need compile the js files in engine
engine_js_dir = os.path.join(self.get_engine_dir(), CCPluginCompile.ENGINE_JS_DIR)
self.backup_dir(engine_js_dir)
self.compile_scripts(engine_js_dir, engine_js_dir)
try:
cocos.Logging.info("building")
command = ' '.join([
"xcodebuild",
"-project",
"\"%s\"" % projectPath,
"-configuration",
"%s" % 'Debug' if self._mode == 'debug' else 'Release',
"-target",
"\"%s\"" % targetName,
"%s" % "-arch i386" if self._mode == 'debug' else '',
"-sdk",
"%s" % 'iphonesimulator' if self._mode == 'debug' else 'iphoneos',
"CONFIGURATION_BUILD_DIR=%s" % (output_dir)
])
if self._mode == 'release':
command = "%s CODE_SIGN_IDENTITY=\"%s\"" % (command, self._sign_id)
self._run_cmd(command)
filelist = os.listdir(output_dir)
app_name = targetName
for filename in filelist:
name, extention = os.path.splitext(filename)
if extention == '.a':
filename = os.path.join(output_dir, filename)
os.remove(filename)
if extention == '.app' and name == targetName:
filename = os.path.join(output_dir, filename)
app_name = name[:name.find(' ')]
newname = os.path.join(output_dir, app_name + extention)
os.rename(filename, newname)
self._iosapp_path = newname
if self._no_res:
self._remove_res(self._iosapp_path)
if self._mode == 'release':
# generate the ipa
app_path = os.path.join(output_dir, "%s.app" % app_name)
ipa_path = os.path.join(output_dir, "%s.ipa" % app_name)
ipa_cmd = "xcrun -sdk iphoneos PackageApplication -v \"%s\" -o \"%s\"" % (app_path, ipa_path)
self._run_cmd(ipa_cmd)
# record the sign id if necessary
if need_record_sign_id:
self._project.write_proj_config(CCPluginCompile.PROJ_CFG_KEY_IOS_SIGN_ID, self._sign_id)
cocos.Logging.info("build succeeded.")
except:
raise cocos.CCPluginError("Build failed: Take a look at the output above for details.")
finally:
# is script project & need compile scripts
if self._project._is_script_project() and self._compile_script:
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
self.reset_backup_dir(script_src_dir)
if self._project._is_js_project():
engine_js_dir = os.path.join(self.get_engine_dir(), CCPluginCompile.ENGINE_JS_DIR)
self.reset_backup_dir(engine_js_dir)
def build_mac(self):
if not self._platforms.is_mac_active():
return
if not cocos.os_is_mac():
raise cocos.CCPluginError("Please build on MacOSX")
self.check_ios_mac_build_depends()
mac_project_dir = self._platforms.project_path()
output_dir = self._output_dir
projectPath = os.path.join(mac_project_dir, self.xcodeproj_name)
pbxprojectPath = os.path.join(projectPath, "project.pbxproj")
f = file(pbxprojectPath)
contents = f.read()
section = re.search(
r"Begin PBXProject section.*End PBXProject section",
contents,
re.S
)
if section is None:
message = "Can't find Mac target"
raise cocos.CCPluginError(message)
targets = re.search(r"targets = (.*);", section.group(), re.S)
if targets is None:
message = "Can't find Mac target"
raise cocos.CCPluginError(message)
targetName = None
cfg_obj = self._platforms.get_current_config()
if cfg_obj.target_name is not None:
targetName = cfg_obj.target_name
else:
names = re.split("\*", targets.group())
for name in names:
if "Mac" in name:
targetName = str.strip(name)
if targetName is None:
message = "Can't find Mac target"
raise cocos.CCPluginError(message)
if os.path.isdir(output_dir):
target_app_dir = os.path.join(output_dir, "%s.app" % targetName[:targetName.find(' ')])
if os.path.isdir(target_app_dir):
shutil.rmtree(target_app_dir)
# is script project & need compile scripts
if self._project._is_script_project() and self._compile_script:
# backup the source scripts
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
self.backup_dir(script_src_dir)
# compile the scripts
self.compile_scripts(script_src_dir, script_src_dir)
if self._project._is_js_project():
# js project need compile the js files in engine
engine_js_dir = os.path.join(self.get_engine_dir(), CCPluginCompile.ENGINE_JS_DIR)
self.backup_dir(engine_js_dir)
self.compile_scripts(engine_js_dir, engine_js_dir)
try:
cocos.Logging.info("building")
command = ' '.join([
"xcodebuild",
"-project",
"\"%s\"" % projectPath,
"-configuration",
"%s" % 'Debug' if self._mode == 'debug' else 'Release',
"-target",
"\"%s\"" % targetName,
"CONFIGURATION_BUILD_DIR=%s" % (output_dir)
])
self._run_cmd(command)
self.target_name = targetName
filelist = os.listdir(output_dir)
for filename in filelist:
name, extention = os.path.splitext(filename)
if extention == '.a':
filename = os.path.join(output_dir, filename)
os.remove(filename)
if extention == '.app' and name == targetName:
filename = os.path.join(output_dir, filename)
if ' ' in name:
filename = os.path.join(output_dir, filename)
newname = os.path.join(output_dir, name[:name.find(' ')]+extention)
os.rename(filename, newname)
self._macapp_path = newname
if self._no_res:
resource_path = os.path.join(self._macapp_path, "Contents", "Resources")
self._remove_res(resource_path)
cocos.Logging.info("build succeeded.")
except:
raise cocos.CCPluginError("Build failed: Take a look at the output above for details.")
finally:
# is script project & need compile scripts
if self._project._is_script_project() and self._compile_script:
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
self.reset_backup_dir(script_src_dir)
if self._project._is_js_project():
engine_js_dir = os.path.join(self.get_engine_dir(), CCPluginCompile.ENGINE_JS_DIR)
self.reset_backup_dir(engine_js_dir)
def _get_required_vs_version(self, proj_file):
# get the VS version required by the project
file_obj = open(proj_file)
pattern = re.compile(r"^# Visual Studio.+(\d{4})")
num = None
for line in file_obj:
match = pattern.match(line)
if match is not None:
num = match.group(1)
break
if num is not None:
if num == "2012":
ret = "11.0"
elif num == "2013":
ret = "12.0"
else:
ret = None
else:
ret = None
return ret
def _is_32bit_windows(self):
arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()
archw = os.environ.has_key("PROCESSOR_ARCHITEW6432")
return (arch == "x86" and not archw)
def _get_vs_path(self, require_version):
# find the VS in register, if system is 64bit, should find vs in both 32bit & 64bit register
if self._is_32bit_windows():
reg_flag_list = [ _winreg.KEY_WOW64_32KEY ]
else:
reg_flag_list = [ _winreg.KEY_WOW64_64KEY, _winreg.KEY_WOW64_32KEY ]
needUpgrade = False
vsPath = None
try:
for reg_flag in reg_flag_list:
cocos.Logging.info("find vs in reg : %s" % "32bit" if reg_flag == _winreg.KEY_WOW64_32KEY else "64bit" )
vs = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\VisualStudio",
0,
_winreg.KEY_READ | reg_flag
)
try:
i = 0
while True:
# enum the keys in vs reg
version = _winreg.EnumKey(vs, i)
try:
find_ver = float(version)
except:
continue
# find the vs which version >= required version
if find_ver >= float(require_version):
key = _winreg.OpenKey(vs, r"SxS\VS7")
vsPath, type = _winreg.QueryValueEx(key, version)
if os.path.exists(vsPath):
if float(version) > float(require_version):
needUpgrade = True
break
else:
vsPath = None
i += 1
except:
pass
# if find one right vs, break
if vsPath is not None:
break
except WindowsError as e:
message = "Visual Studio wasn't installed"
print(e)
raise cocos.CCPluginError(message)
return (needUpgrade, vsPath)
def build_win32(self):
if not self._platforms.is_win32_active():
return
if not cocos.os_is_win32():
raise cocos.CCPluginError("Please build on winodws")
win32_projectdir = self._platforms.project_path()
output_dir = self._output_dir
cocos.Logging.info("building")
# get the solution file & project name
cfg_obj = self._platforms.get_current_config()
if cfg_obj.sln_file is not None:
sln_name = cfg_obj.sln_file
if cfg_obj.project_name is None:
raise cocos.CCPluginError("Must specified \"%s\" when \"%s\" is specified in file \"%s\"") % \
(cocos_project.Win32Config.KEY_PROJECT_NAME, cocos_project.Win32Config.KEY_SLN_FILE, cocos_project.Project.CONFIG)
else:
name = cfg_obj.project_name
else:
name, sln_name = self.checkFileByExtention(".sln", win32_projectdir)
if not sln_name:
message = "Can't find the \".sln\" file"
raise cocos.CCPluginError(message)
self.project_name = name
projectPath = os.path.join(win32_projectdir, sln_name)
# get the required VS version
build_cfg_path = self._build_cfg_path()
required_vs_version = self._get_required_vs_version(projectPath)
if required_vs_version is None:
raise cocos.CCPluginError("Can't parse the sln file to find required VS version")
cocos.Logging.info("Required VS version : %s" % required_vs_version)
# get the correct available VS path
needUpgrade, vsPath = self._get_vs_path(required_vs_version)
if vsPath is None:
message = "Can't find correct Visual Studio's path in the regedit"
raise cocos.CCPluginError(message)
cocos.Logging.info("Find VS path : %s" % vsPath)
commandPath = os.path.join(vsPath, "Common7", "IDE", "devenv")
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
# upgrade projects
if needUpgrade:
commandUpgrade = ' '.join([
"\"%s\"" % commandPath,
"\"%s\"" % projectPath,
"/Upgrade"
])
self._run_cmd(commandUpgrade)
# build the project
commands = ' '.join([
"\"%s\"" % commandPath,
"\"%s\"" % projectPath,
"/Build \"%s|Win32\"" % build_mode,
"/Project \"%s\"" % self.project_name
])
self._run_cmd(commands)
cocos.Logging.info("build succeeded.")
# copy files
build_folder_name = "%s.win32" % build_mode
build_folder_path = os.path.join(win32_projectdir, build_folder_name)
if not os.path.isdir(build_folder_path):
message = "Can not find the %s" % build_folder_path
raise cocos.CCPluginError(message)
# remove the files in output dir (keep the exe files)
if os.path.exists(output_dir):
output_files = os.listdir(output_dir)
for element in output_files:
ele_full_path = os.path.join(output_dir, element)
if os.path.isfile(ele_full_path):
base_name, file_ext = os.path.splitext(element)
if not file_ext == ".exe":
os.remove(ele_full_path)
elif os.path.isdir(ele_full_path):
shutil.rmtree(ele_full_path)
# create output dir if it not existed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# copy dll & exe
files = os.listdir(build_folder_path)
for filename in files:
name, ext = os.path.splitext(filename)
proj_exe_name = "%s.exe" % self.project_name
if ext == '.dll' or filename == proj_exe_name:
file_path = os.path.join(build_folder_path, filename)
cocos.Logging.info("Copying %s" % filename)
shutil.copy(file_path, output_dir)
# copy lua files & res
build_cfg = os.path.join(build_cfg_path, CCPluginCompile.BUILD_CONFIG_FILE)
if not os.path.exists(build_cfg):
message = "%s not found" % build_cfg
raise cocos.CCPluginError(message)
f = open(build_cfg)
data = json.load(f)
if data.has_key(CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES):
if self._no_res:
fileList = data[CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES]
else:
fileList = data[CCPluginCompile.CFG_KEY_COPY_RESOURCES] + data[CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES]
else:
fileList = data[CCPluginCompile.CFG_KEY_COPY_RESOURCES]
for cfg in fileList:
cocos.copy_files_with_config(cfg, build_cfg_path, output_dir)
# check the project config & compile the script files
self.compile_scripts(output_dir, output_dir)
self.run_root = output_dir
def build_web(self):
if not self._platforms.is_web_active():
return
project_dir = self._platforms.project_path()
# store env for run
cfg_obj = self._platforms.get_current_config()
if cfg_obj.run_root_dir is not None:
self.run_root = cfg_obj.run_root_dir
else:
self.run_root = project_dir
if cfg_obj.sub_url is not None:
self.sub_url = cfg_obj.sub_url
else:
self.sub_url = '/'
if self._is_debug_mode():
return
else:
self.sub_url = '%spublish/html5/' % self.sub_url
f = open(os.path.join(project_dir, "project.json"))
project_json = json.load(f)
f.close()
engine_dir = os.path.join(project_json["engineDir"])
realEngineDir = os.path.normpath(os.path.join(project_dir, engine_dir))
publish_dir = os.path.normpath(os.path.join(project_dir, "publish", "html5"))
# need to config in options of command
buildOpt = {
"outputFileName" : "game.min.js",
#"compilationLevel" : "simple",
"compilationLevel" : "advanced",
"sourceMapOpened" : True if self._has_sourcemap else False
}
if os.path.exists(publish_dir) == False:
os.makedirs(publish_dir)
# generate build.xml
build_web.gen_buildxml(project_dir, project_json, publish_dir, buildOpt)
outputJsPath = os.path.join(publish_dir, buildOpt["outputFileName"])
if os.path.exists(outputJsPath) == True:
os.remove(outputJsPath)
# call closure compiler
ant_root = cocos.check_environment_variable('ANT_ROOT')
ant_path = os.path.join(ant_root, 'ant')
self._run_cmd("%s -f %s" % (ant_path, os.path.join(publish_dir, 'build.xml')))
# handle sourceMap
sourceMapPath = os.path.join(publish_dir, "sourcemap")
if os.path.exists(sourceMapPath):
smFile = open(sourceMapPath)
try:
smContent = smFile.read()
finally:
smFile.close()
dir_to_replace = project_dir
if cocos.os_is_win32():
dir_to_replace = project_dir.replace('\\', '\\\\')
smContent = smContent.replace(dir_to_replace, os.path.relpath(project_dir, publish_dir))
smContent = smContent.replace(realEngineDir, os.path.relpath(realEngineDir, publish_dir))
smContent = smContent.replace('\\\\', '/')
smContent = smContent.replace('\\', '/')
smFile = open(sourceMapPath, "w")
smFile.write(smContent)
smFile.close()
# handle project.json
del project_json["engineDir"]
del project_json["modules"]
del project_json["jsList"]
project_json_output_file = open(os.path.join(publish_dir, "project.json"), "w")
project_json_output_file.write(json.dumps(project_json))
project_json_output_file.close()
# handle index.html
indexHtmlFile = open(os.path.join(project_dir, "index.html"))
try:
indexContent = indexHtmlFile.read()
finally:
indexHtmlFile.close()
reg1 = re.compile(r'<script\s+src\s*=\s*("|\')[^"\']*CCBoot\.js("|\')\s*><\/script>')
indexContent = reg1.sub("", indexContent)
mainJs = project_json.get("main") or "main.js"
indexContent = indexContent.replace(mainJs, buildOpt["outputFileName"])
indexHtmlOutputFile = open(os.path.join(publish_dir, "index.html"), "w")
indexHtmlOutputFile.write(indexContent)
indexHtmlOutputFile.close()
# copy res dir
dst_dir = os.path.join(publish_dir, 'res')
src_dir = os.path.join(project_dir, 'res')
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
shutil.copytree(src_dir, dst_dir)
def build_linux(self):
if not self._platforms.is_linux_active():
return
#if not cocos.os_is_linux():
# raise cocos.CCPluginError("Please build on linux")
project_dir = self._project.get_project_dir()
cfg_obj = self._platforms.get_current_config()
if cfg_obj.cmake_path is not None:
cmakefile_dir = os.path.join(project_dir, cfg_obj.cmake_path)
else:
cmakefile_dir = project_dir
if self._project._is_lua_project():
cmakefile_dir = os.path.join(project_dir, 'frameworks')
# get the project name
if cfg_obj.project_name is not None:
self.project_name = cfg_obj.project_name
else:
f = open(os.path.join(cmakefile_dir, 'CMakeLists.txt'), 'r')
for line in f.readlines():
if "set(APP_NAME " in line:
self.project_name = re.search('APP_NAME ([^\)]+)\)', line).group(1)
break
if cfg_obj.build_dir is not None:
build_dir = os.path.join(project_dir, cfg_obj.build_dir)
else:
build_dir = os.path.join(project_dir, 'linux-build')
if not os.path.exists(build_dir):
os.makedirs(build_dir)
with cocos.pushd(build_dir):
self._run_cmd('cmake %s' % os.path.relpath(cmakefile_dir, build_dir))
with cocos.pushd(build_dir):
self._run_cmd('make -j%s' % self._jobs)
# move file
output_dir = self._output_dir
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
if cfg_obj.build_result_dir is not None:
result_dir = os.path.join(build_dir, 'bin', cfg_obj.build_result_dir)
else:
result_dir = os.path.join(build_dir, 'bin')
cocos.copy_files_in_dir(result_dir, output_dir)
self.run_root = output_dir
if self._no_res:
res_dir = os.path.join(output_dir, "Resources")
self._remove_res(res_dir)
if self._project._is_script_project() and self._compile_script:
cocos.Logging.warning("Warning: Now script compiling is not supported for linux.")
cocos.Logging.info('Build successed!')
def checkFileByExtention(self, ext, path):
filelist = os.listdir(path)
for fullname in filelist:
name, extention = os.path.splitext(fullname)
if extention == ext:
return name, fullname
return (None, None)
def run(self, argv, dependencies):
self.parse_args(argv)
cocos.Logging.info('Building mode: %s' % self._mode)
self._update_build_cfg()
target_platform = self._platforms.get_current_platform()
args_build_copy = self._custom_step_args.copy()
# invoke the custom step: pre-build
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_PRE_BUILD, target_platform, args_build_copy)
self.build_android()
self.build_ios()
self.build_mac()
self.build_win32()
self.build_web()
self.build_linux()
# invoke the custom step: post-build
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_POST_BUILD, target_platform, args_build_copy)
| |
# Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import redis
import os
import ujson as json
from . import base
from . import actorbase
LOG = logging.getLogger(__name__)
class RedisSet(actorbase.ActorBaseFT):
def __init__(self, name, chassis, config):
self.redis_skey = name
self.redis_skey_value = name+'.value'
self.redis_skey_chkp = name+'.chkp'
self.SR = None
super(RedisSet, self).__init__(name, chassis, config)
def configure(self):
super(RedisSet, self).configure()
self.redis_url = self.config.get('redis_url',
os.environ.get('REDIS_URL', 'unix:///var/run/redis/redis.sock')
)
self.scoring_attribute = self.config.get(
'scoring_attribute',
'last_seen'
)
self.store_value = self.config.get('store_value', False)
self.max_entries = self.config.get('max_entries', 1000 * 1000)
def connect(self, inputs, output):
output = False
super(RedisSet, self).connect(inputs, output)
def read_checkpoint(self):
self._connect_redis()
self.last_checkpoint = None
config = {
'class': (self.__class__.__module__+'.'+self.__class__.__name__),
'config': self._original_config
}
config = json.dumps(config, sort_keys=True)
try:
contents = self.SR.get(self.redis_skey_chkp)
if contents is None:
raise ValueError('{} - last checkpoint not found'.format(self.name))
if contents[0] == '{':
# new format
contents = json.loads(contents)
self.last_checkpoint = contents['checkpoint']
saved_config = contents['config']
saved_state = contents['state']
else:
self.last_checkpoint = contents
saved_config = ''
saved_state = None
LOG.debug('%s - restored checkpoint: %s', self.name, self.last_checkpoint)
# old_status is missing in old releases
# stick to the old behavior
if saved_config and saved_config != config:
LOG.info(
'%s - saved config does not match new config',
self.name
)
self.last_checkpoint = None
return
LOG.info(
'%s - saved config matches new config',
self.name
)
if saved_state is not None:
self._saved_state_restore(saved_state)
except (ValueError, IOError):
LOG.exception('{} - Error reading last checkpoint'.format(self.name))
self.last_checkpoint = None
def create_checkpoint(self, value):
self._connect_redis()
config = {
'class': (self.__class__.__module__+'.'+self.__class__.__name__),
'config': self._original_config
}
contents = {
'checkpoint': value,
'config': json.dumps(config, sort_keys=True),
'state': self._saved_state_create()
}
self.SR.set(self.redis_skey_chkp, json.dumps(contents))
def remove_checkpoint(self):
self._connect_redis()
self.SR.delete(self.redis_skey_chkp)
def _connect_redis(self):
if self.SR is not None:
return
self.SR = redis.StrictRedis.from_url(
self.redis_url
)
def initialize(self):
self._connect_redis()
def rebuild(self):
self._connect_redis()
self.SR.delete(self.redis_skey)
self.SR.delete(self.redis_skey_value)
def reset(self):
self._connect_redis()
self.SR.delete(self.redis_skey)
self.SR.delete(self.redis_skey_value)
def _add_indicator(self, score, indicator, value):
if self.length() >= self.max_entries:
self.statistics['drop.overflow'] += 1
return
with self.SR.pipeline() as p:
p.multi()
p.zadd(self.redis_skey, score, indicator)
if self.store_value:
p.hset(self.redis_skey_value, indicator, json.dumps(value))
result = p.execute()[0]
self.statistics['added'] += result
def _delete_indicator(self, indicator):
with self.SR.pipeline() as p:
p.multi()
p.zrem(self.redis_skey, indicator)
p.hdel(self.redis_skey_value, indicator)
result = p.execute()[0]
self.statistics['removed'] += result
@base._counting('update.processed')
def filtered_update(self, source=None, indicator=None, value=None):
score = 0
if self.scoring_attribute is not None:
av = value.get(self.scoring_attribute, None)
if type(av) == int or type(av) == long:
score = av
else:
LOG.error("scoring_attribute is not int: %s", type(av))
score = 0
self._add_indicator(score, indicator, value)
@base._counting('withdraw.processed')
def filtered_withdraw(self, source=None, indicator=None, value=None):
self._delete_indicator(indicator)
def length(self, source=None):
return self.SR.zcard(self.redis_skey)
@staticmethod
def gc(name, config=None):
actorbase.ActorBaseFT.gc(name, config=config)
if config is None:
config = {}
redis_skey = name
redis_skey_value = '{}.value'.format(name)
redis_skey_chkp = '{}.chkp'.format(name)
redis_url = config.get('redis_url',
os.environ.get('REDIS_URL', 'unix:///var/run/redis/redis.sock')
)
cp = None
try:
cp = redis.ConnectionPool.from_url(
url=redis_url
)
SR = redis.StrictRedis(connection_pool=cp)
SR.delete(redis_skey)
SR.delete(redis_skey_value)
SR.delete(redis_skey_chkp)
except Exception as e:
raise RuntimeError(str(e))
finally:
if cp is not None:
cp.disconnect()
| |
import warnings
from typing import Any, Callable, List, Optional, Union, Sequence
import numpy as np
import tensorflow as tf
from odin.bay.layers import DistributionDense
from odin.bay.random_variable import RVconf
from odin.bay.vi.utils import permute_dims
from odin.networks import SequentialNetwork, dense_network
from odin.utils import as_tuple
from tensorflow_probability.python.distributions import (Distribution,
Independent)
from typing_extensions import Literal
class FactorDiscriminator(SequentialNetwork):
r""" The main goal is minimizing the total correlation (the mutual information
which quantifies the redundancy or dependency among latent variables).
We use a discriminator to estimate total-correlation
This class also support Semi-supervised factor discriminator, a combination
of supervised objective and total correlation estimation using density-ratio.
- 0: real sample for q(z) (or last unit in case n_outputs > 2) and
- 1: fake sample from q(z-)
If `n_outputs` > 2, suppose the number of classes is `K` then:
- 0 to K: is the classes' logits for real sample from q(z)
- K + 1: fake sample from q(z-)
This class is also extended to handle supervised loss for semi-supervised
systems.
Paramters
-----------
units : a list of Integer, the number of hidden units for each hidden layer.
n_outputs : an Integer or instance of `RVmeta`,
the number of output units and its distribution
ss_strategy : {'sum', 'logsumexp', 'mean', 'max', 'min'}.
Strategy for combining the outputs semi-supervised learning into the
logit for real sample from q(z):
- 'logsumexp' : used for semi-supervised GAN in (Salimans T. 2016)
Example
--------
```
# for FactorVAE
FactorDiscriminator(
observation=RVmeta(1, 'bernoulli', projection=True, name="ind_factors"))
# for classifier of ConditionalVAE
FactorDiscriminator(
observation=RVmeta(ds.shape, 'bernoulli', projection=True, name='image'))
```
References
------------
Kim, H., Mnih, A., 2018. "Disentangling by Factorising".
arXiv:1802.05983 [cs, stat].
Salimans, T., Goodfellow, I., Zaremba, W., et al 2016.
"Improved Techniques for Training GANs". arXiv:1606.03498 [cs.LG].
"""
def __init__(
self,
batchnorm: bool = False,
input_dropout: float = 0.,
dropout: float = 0.,
units: Sequence[int] = (1000, 1000, 1000, 1000, 1000),
observation: Union[RVconf, Sequence[RVconf]] = RVconf(1,
'bernoulli',
projection=True,
name="discriminator"),
activation: Union[
str, Callable[[tf.Tensor], tf.Tensor]] = tf.nn.leaky_relu,
ss_strategy: Literal['sum', 'logsumexp', 'mean', 'max',
'min'] = 'logsumexp',
name: str = "FactorDiscriminator",
):
if not isinstance(observation, (tuple, list)):
observation = [observation]
assert len(observation) > 0, "No output is given for FactorDiscriminator"
assert all(
isinstance(o, (RVconf, DistributionDense)) for o in observation), (
f"outputs must be instance of RVmeta, but given:{observation}")
n_outputs = 0
for o in observation:
if not o.projection:
warnings.warn(f'Projection turn off for observation {o}!')
o.event_shape = (int(np.prod(o.event_shape)),)
n_outputs += o.event_shape[0]
layers = dense_network(units=units,
batchnorm=batchnorm,
dropout=dropout,
flatten_inputs=True,
input_dropout=input_dropout,
activation=activation,
prefix=name)
super().__init__(layers, name=name)
self.ss_strategy = str(ss_strategy)
self.observation = observation
self.n_outputs = n_outputs
self._distributions = []
assert self.ss_strategy in {'sum', 'logsumexp', 'mean', 'max', 'min'}
def build(self, input_shape=None):
super().build(input_shape)
shape = self.output_shape[1:]
self._distributions = [
o.create_posterior(shape) if isinstance(o, RVconf) else o
for o in self.observation
]
self.input_ndim = len(self.input_shape) - 1
return self
def call(self, inputs, **kwargs):
if isinstance(inputs, (tuple, list)) and len(inputs) == 1:
inputs = inputs[0]
outputs = super().call(inputs, **kwargs)
# project into different output distributions
distributions = [d(outputs, **kwargs) for d in self.distributions]
return distributions[0] if len(distributions) == 1 else tuple(distributions)
def _to_samples(self, qz_x, mean=False, stop_grad=False):
qz_x = tf.nest.flatten(qz_x)
if mean:
z = tf.concat([q.mean() for q in qz_x], axis=-1)
else:
z = tf.concat([tf.convert_to_tensor(q) for q in qz_x], axis=-1)
z = tf.reshape(z, tf.concat([(-1,), z.shape[-self.input_ndim:]], axis=0))
if stop_grad:
z = tf.stop_gradient(z)
return z
def _tc_logits(self, logits):
# use ss_strategy to infer appropriate logits value for
# total-correlation estimator (logits for q(z)) in case of n_outputs > 1
Xs = []
for x in tf.nest.flatten(logits):
if isinstance(x, Distribution):
if isinstance(x, Independent):
x = x.distribution
if hasattr(x, 'logits'):
x = x.logits
elif hasattr(x, 'concentration'):
x = x.concentration
else:
raise RuntimeError(
f"Distribution {x} doesn't has 'logits' or 'concentration' "
"attributes, cannot not be used for estimating total correlation."
)
Xs.append(x)
# concatenate the outputs
if len(Xs) == 0:
raise RuntimeError(
f"No logits values found for total correlation: {logits}")
elif len(Xs) == 1:
Xs = Xs[0]
else:
Xs = tf.concat(Xs, axis=-1)
# only 1 unit, only estimate TC
if self.n_outputs == 1:
return Xs[..., 0]
# multiple units, reduce
return getattr(tf, 'reduce_%s' % self.ss_strategy)(Xs, axis=-1)
def total_correlation(self,
qz_x: Distribution,
training: Optional[bool] = None) -> tf.Tensor:
r""" Total correlation Eq(3)
```
TC(z) = KL(q(z)||q(z-)) = E_q(z)[log(q(z) / q(z-))]
~ E_q(z)[ log(D(z)) - log(1 - D(z)) ]
```
We want to minimize the total correlation to achieve factorized latent units
Note:
In many implementation, `log(q(z-)) - log(q(z))` is referred as `total
correlation loss`, here, we return `log(q(z)) - log(q(z-))` as the total
correlation for the construction of the ELBO in Eq(2)
Arguments:
qz_x : a Tensor, [batch_dim, latent_dim] or Distribution
Return:
TC(z) : a scalar, approximation of the density-ratio that arises in the
KL-term.
"""
z = self._to_samples(qz_x, stop_grad=False)
logits = self(z, training=training)
logits = self._tc_logits(logits)
# in case using sigmoid, other implementation use -logits here but it
# should be logits.
# if it is negative here, TC is reduce, but reasonably, it must be positive (?)
return tf.reduce_mean(logits)
def dtc_loss(self,
qz_x: Distribution,
qz_xprime: Optional[Distribution] = None,
training: Optional[bool] = None) -> tf.Tensor:
r""" Discriminated total correlation loss Algorithm(2)
Minimize the probability of:
- `q(z)` misclassified as `D(z)[:, 0]`
- `q(z')` misclassified as `D(z')[:, 1]`
Arguments:
qz_x : `Tensor` or `Distribution`.
Samples of the latents from first batch
qz_xprime : `Tensor` or `Distribution` (optional).
Samples of the latents from second batch, this will be permuted.
If not given, then reuse `qz_x`.
Return:
scalar - loss value for training the discriminator
"""
# we don't want the gradient to be propagated to the encoder
z = self._to_samples(qz_x, stop_grad=True)
z_logits = self._tc_logits(self(z, training=training))
# using log_softmax function give more numerical stabalized results than
# logsumexp yourself.
d_z = -tf.math.log_sigmoid(z_logits) # must be negative here
# for X_prime
if qz_xprime is not None:
z = self._to_samples(qz_xprime, stop_grad=True)
z_perm = permute_dims(z)
zperm_logits = self._tc_logits(self(z_perm, training=training))
d_zperm = -tf.math.log_sigmoid(zperm_logits) # also negative here
# reduce the negative of d_z, and the positive of d_zperm
# this equal to cross_entropy(d_z, zeros) + cross_entropy(d_zperm, ones)
loss = 0.5 * (tf.reduce_mean(d_z) + tf.reduce_mean(zperm_logits + d_zperm))
return loss
def supervised_loss(self,
labels: Union[tf.Tensor, List[tf.Tensor]],
qz_x: Distribution,
mean: bool = False,
mask: Optional[tf.Tensor] = None,
training: Optional[bool] = None) -> tf.Tensor:
labels = as_tuple(labels)
z = self._to_samples(qz_x, mean=mean, stop_grad=True)
distributions = as_tuple(self(z, training=training))
## applying the mask (1-labelled, 0-unlabelled)
if mask is not None:
mask = tf.reshape(mask, (-1,))
# labels = [tf.boolean_mask(y, mask, axis=0) for y in labels]
# z_logits = tf.boolean_mask(z_logits, mask, axis=0)
## calculate the loss
loss = 0.
for dist, y_true in zip(distributions, labels):
llk = dist.log_prob(y_true)
# check the mask careful here
# if no data for labels, just return 0
if mask is not None:
llk = tf.cond(tf.reduce_all(tf.logical_not(mask)), lambda: 0.,
lambda: tf.boolean_mask(llk, mask, axis=0))
# negative log-likelihood here
loss += -llk
# check non-zero, if zero the gradient must be stop or NaN gradient happen
loss = tf.reduce_mean(loss)
loss = tf.cond(
tf.abs(loss) < 1e-8, lambda: tf.stop_gradient(loss), lambda: loss)
return loss
@property
def n_observation(self) -> int:
return len(self.observation)
@property
def distributions(self) -> List[DistributionDense]:
return self._distributions
@property
def prior(self) -> List[Distribution]:
return [d.prior for d in self._distributions]
def __str__(self):
s = super().__str__()
s1 = ['\n Outputs:']
for i, d in enumerate(self.distributions):
s1.append(f' [{i}]{d}')
return s + '\n'.join(s1)
| |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import glance_store
from oslo.config import cfg
import webob
import glance.api.v2.image_members
from glance.openstack.common import jsonutils
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355)
ISOTIME = '2012-05-16T15:27:36Z'
CONF = cfg.CONF
BASE_URI = unit_test_utils.BASE_URI
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc'
UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7'
UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86'
UUID5 = '3eee7cc2-eae7-4c0f-b50d-a7160b0c62ed'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8'
TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4'
def _db_fixture(id, **kwargs):
obj = {
'id': id,
'name': None,
'is_public': False,
'properties': {},
'checksum': None,
'owner': None,
'status': 'queued',
'tags': [],
'size': None,
'locations': [],
'protected': False,
'disk_format': None,
'container_format': None,
'deleted': False,
'min_ram': None,
'min_disk': None,
}
obj.update(kwargs)
return obj
def _db_image_member_fixture(image_id, member_id, **kwargs):
obj = {
'image_id': image_id,
'member': member_id,
'status': 'pending',
}
obj.update(kwargs)
return obj
def _domain_fixture(id, **kwargs):
properties = {
'id': id,
}
properties.update(kwargs)
return glance.domain.ImageMembership(**properties)
class TestImageMembersController(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMembersController, self).setUp()
self.db = unit_test_utils.FakeDB()
self.store = unit_test_utils.FakeStoreAPI()
self.policy = unit_test_utils.FakePolicyEnforcer()
self.notifier = unit_test_utils.FakeNotifier()
self._create_images()
self._create_image_members()
self.controller = glance.api.v2.image_members\
.ImageMembersController(self.db,
self.policy,
self.notifier,
self.store)
glance_store.create_stores()
def _create_images(self):
self.db.reset()
self.images = [
_db_fixture(UUID1, owner=TENANT1, name='1', size=256,
is_public=True,
locations=[{'url': '%s/%s' % (BASE_URI, UUID1),
'metadata': {}, 'status': 'active'}]),
_db_fixture(UUID2, owner=TENANT1, name='2', size=512),
_db_fixture(UUID3, owner=TENANT3, name='3', size=512),
_db_fixture(UUID4, owner=TENANT4, name='4', size=1024),
_db_fixture(UUID5, owner=TENANT1, name='5', size=1024),
]
[self.db.image_create(None, image) for image in self.images]
self.db.image_tag_set_all(None, UUID1, ['ping', 'pong'])
def _create_image_members(self):
self.image_members = [
_db_image_member_fixture(UUID2, TENANT4),
_db_image_member_fixture(UUID3, TENANT4),
_db_image_member_fixture(UUID3, TENANT2),
_db_image_member_fixture(UUID4, TENANT1),
]
[self.db.image_member_create(None, image_member)
for image_member in self.image_members]
def test_index(self):
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID2)
self.assertEqual(1, len(output['members']))
actual = set([image_member.member_id
for image_member in output['members']])
expected = set([TENANT4])
self.assertEqual(actual, expected)
def test_index_no_members(self):
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID5)
self.assertEqual(0, len(output['members']))
self.assertEqual({'members': []}, output)
def test_index_member_view(self):
# UUID3 is a private image owned by TENANT3
# UUID3 has members TENANT2 and TENANT4
# When TENANT4 lists members for UUID3, should not see TENANT2
request = unit_test_utils.get_fake_request(tenant=TENANT4)
output = self.controller.index(request, UUID3)
self.assertEqual(1, len(output['members']))
actual = set([image_member.member_id
for image_member in output['members']])
expected = set([TENANT4])
self.assertEqual(actual, expected)
def test_index_private_image(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
request, UUID5)
def test_index_public_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.index,
request, UUID1)
def test_index_private_image_visible_members_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
output = self.controller.index(request, UUID4)
self.assertEqual(1, len(output['members']))
actual = set([image_member.member_id
for image_member in output['members']])
expected = set([TENANT1])
self.assertEqual(actual, expected)
def test_index_allowed_by_get_members_policy(self):
rules = {"get_members": True}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID2)
self.assertEqual(1, len(output['members']))
def test_index_forbidden_by_get_members_policy(self):
rules = {"get_members": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.index,
request, image_id=UUID2)
def test_show(self):
request = unit_test_utils.get_fake_request(tenant=TENANT1)
output = self.controller.show(request, UUID2, TENANT4)
expected = self.image_members[0]
self.assertEqual(output.image_id, expected['image_id'])
self.assertEqual(output.member_id, expected['member'])
self.assertEqual(output.status, expected['status'])
def test_show_by_member(self):
request = unit_test_utils.get_fake_request(tenant=TENANT4)
output = self.controller.show(request, UUID2, TENANT4)
expected = self.image_members[0]
self.assertEqual(output.image_id, expected['image_id'])
self.assertEqual(output.member_id, expected['member'])
self.assertEqual(output.status, expected['status'])
def test_show_forbidden(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
request, UUID2, TENANT4)
def test_show_not_found(self):
# one member should not be able to view status of another member
# of the same image
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
request, UUID3, TENANT4)
def test_create(self):
request = unit_test_utils.get_fake_request()
image_id = UUID2
member_id = TENANT3
output = self.controller.create(request, image_id=image_id,
member_id=member_id)
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT3, output.member_id)
def test_create_allowed_by_add_policy(self):
rules = {"add_member": True}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request()
output = self.controller.create(request, image_id=UUID2,
member_id=TENANT3)
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT3, output.member_id)
def test_create_forbidden_by_add_policy(self):
rules = {"add_member": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
request, image_id=UUID2, member_id=TENANT3)
def test_create_duplicate_member(self):
request = unit_test_utils.get_fake_request()
image_id = UUID2
member_id = TENANT3
output = self.controller.create(request, image_id=image_id,
member_id=member_id)
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT3, output.member_id)
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
request, image_id=image_id, member_id=member_id)
def test_create_overlimit(self):
self.config(image_member_quota=0)
request = unit_test_utils.get_fake_request()
image_id = UUID2
member_id = TENANT3
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, request,
image_id=image_id, member_id=member_id)
def test_create_unlimited(self):
self.config(image_member_quota=-1)
request = unit_test_utils.get_fake_request()
image_id = UUID2
member_id = TENANT3
output = self.controller.create(request, image_id=image_id,
member_id=member_id)
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT3, output.member_id)
def test_update_done_by_member(self):
request = unit_test_utils.get_fake_request(tenant=TENANT4)
image_id = UUID2
member_id = TENANT4
output = self.controller.update(request, image_id=image_id,
member_id=member_id,
status='accepted')
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT4, output.member_id)
self.assertEqual('accepted', output.status)
def test_update_done_by_member_forbidden_by_policy(self):
rules = {"modify_member": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT4)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
request, image_id=UUID2, member_id=TENANT4,
status='accepted')
def test_update_done_by_member_allowed_by_policy(self):
rules = {"modify_member": True}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT4)
output = self.controller.update(request, image_id=UUID2,
member_id=TENANT4,
status='accepted')
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT4, output.member_id)
self.assertEqual('accepted', output.status)
def test_update_done_by_owner(self):
request = unit_test_utils.get_fake_request(tenant=TENANT1)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
request, UUID2, TENANT4, status='accepted')
def test_update_non_existent_image(self):
request = unit_test_utils.get_fake_request(tenant=TENANT1)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
request, '123', TENANT4, status='accepted')
def test_update_invalid_status(self):
request = unit_test_utils.get_fake_request(tenant=TENANT4)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
request, UUID2, TENANT4, status='accept')
def test_create_private_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
request, UUID4, TENANT2)
def test_create_public_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
request, UUID1, TENANT2)
def test_create_image_does_not_exist(self):
request = unit_test_utils.get_fake_request()
image_id = 'fake-image-id'
member_id = TENANT3
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
request, image_id=image_id, member_id=member_id)
def test_delete(self):
request = unit_test_utils.get_fake_request()
member_id = TENANT4
image_id = UUID2
res = self.controller.delete(request, image_id, member_id)
self.assertEqual(res.body, '')
self.assertEqual(res.status_code, 204)
found_member = self.db.image_member_find(
request.context, image_id=image_id, member=member_id)
self.assertEqual(found_member, [])
def test_delete_by_member(self):
request = unit_test_utils.get_fake_request(tenant=TENANT4)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID2, TENANT4)
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID2)
self.assertEqual(1, len(output['members']))
actual = set([image_member.member_id
for image_member in output['members']])
expected = set([TENANT4])
self.assertEqual(actual, expected)
def test_delete_allowed_by_policies(self):
rules = {"get_member": True, "delete_member": True}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT1)
output = self.controller.delete(request, image_id=UUID2,
member_id=TENANT4)
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID2)
self.assertEqual(0, len(output['members']))
def test_delete_forbidden_by_get_member_policy(self):
rules = {"get_member": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT1)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID2, TENANT4)
def test_delete_forbidden_by_delete_member_policy(self):
rules = {"delete_member": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT1)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID2, TENANT4)
def test_delete_private_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID4, TENANT1)
def test_delete_public_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID1, TENANT1)
def test_delete_image_does_not_exist(self):
request = unit_test_utils.get_fake_request()
member_id = TENANT2
image_id = 'fake-image-id'
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
request, image_id, member_id)
def test_delete_member_does_not_exist(self):
request = unit_test_utils.get_fake_request()
member_id = 'fake-member-id'
image_id = UUID2
found_member = self.db.image_member_find(
request.context, image_id=image_id, member=member_id)
self.assertEqual(found_member, [])
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
request, image_id, member_id)
class TestImageMembersSerializer(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMembersSerializer, self).setUp()
self.serializer = glance.api.v2.image_members.ResponseSerializer()
self.fixtures = [
_domain_fixture(id='1', image_id=UUID2, member_id=TENANT1,
status='accepted',
created_at=DATETIME, updated_at=DATETIME),
_domain_fixture(id='2', image_id=UUID2, member_id=TENANT2,
status='pending',
created_at=DATETIME, updated_at=DATETIME),
]
def test_index(self):
expected = {
'members': [
{
'image_id': UUID2,
'member_id': TENANT1,
'status': 'accepted',
'created_at': ISOTIME,
'updated_at': ISOTIME,
'schema': '/v2/schemas/member',
},
{
'image_id': UUID2,
'member_id': TENANT2,
'status': 'pending',
'created_at': ISOTIME,
'updated_at': ISOTIME,
'schema': '/v2/schemas/member',
},
],
'schema': '/v2/schemas/members',
}
request = webob.Request.blank('/v2/images/%s/members' % UUID2)
response = webob.Response(request=request)
result = {'members': self.fixtures}
self.serializer.index(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
def test_show(self):
expected = {
'image_id': UUID2,
'member_id': TENANT1,
'status': 'accepted',
'created_at': ISOTIME,
'updated_at': ISOTIME,
'schema': '/v2/schemas/member',
}
request = webob.Request.blank('/v2/images/%s/members/%s'
% (UUID2, TENANT1))
response = webob.Response(request=request)
result = self.fixtures[0]
self.serializer.show(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
def test_create(self):
expected = {'image_id': UUID2,
'member_id': TENANT1,
'status': 'accepted',
'schema': '/v2/schemas/member',
'created_at': ISOTIME,
'updated_at': ISOTIME}
request = webob.Request.blank('/v2/images/%s/members/%s'
% (UUID2, TENANT1))
response = webob.Response(request=request)
result = self.fixtures[0]
self.serializer.create(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
def test_update(self):
expected = {'image_id': UUID2,
'member_id': TENANT1,
'status': 'accepted',
'schema': '/v2/schemas/member',
'created_at': ISOTIME,
'updated_at': ISOTIME}
request = webob.Request.blank('/v2/images/%s/members/%s'
% (UUID2, TENANT1))
response = webob.Response(request=request)
result = self.fixtures[0]
self.serializer.update(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
class TestImagesDeserializer(test_utils.BaseTestCase):
def setUp(self):
super(TestImagesDeserializer, self).setUp()
self.deserializer = glance.api.v2.image_members.RequestDeserializer()
def test_create(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({'member': TENANT1})
output = self.deserializer.create(request)
expected = {'member_id': TENANT1}
self.assertEqual(expected, output)
def test_create_invalid(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({'mem': TENANT1})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create,
request)
def test_create_no_body(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create,
request)
def test_create_member_empty(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({'member': ''})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create,
request)
def test_update(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({'status': 'accepted'})
output = self.deserializer.update(request)
expected = {'status': 'accepted'}
self.assertEqual(expected, output)
def test_update_invalid(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dumps({'mem': TENANT1})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update,
request)
def test_update_no_body(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update,
request)
| |
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
from __future__ import print_function
import os
import collections
import codegen
import encutil
import genutil
import actions
import verbosity
max_in_byte = 256 #max unsigned int per byte
emit_function_prefix = 'xed_encode_instruction_emit_pattern'
bind_function_prefix = 'xed_encode_instruction_fb_pattern'
get_field_value = 'xed_encoder_get_start_field_value'
def key_field_binding_lower(x):
return x.field_name.lower()
def sort_field_bindings(a,b):
''' sort action_t of type emit '''
if a.field_name.lower() > b.field_name.lower():
return 1
elif a.field_name.lower() < b.field_name.lower():
return -1
return 0
def key_iform_by_bind_ptrn(x):
return x.bind_ptrn
# priority assigned during initial read (see read_encfile.py)
def key_priority(x):
return x.priority
def key_rule_length(x):
return len(x.rule.get_all_emits() + x.rule.get_all_nts())
class instructions_group_t(object):
''' each encoding iform has:
1. conditions ( e.g.REG1=GPRv_r() )
2. actions, one type of the actions is a nonterminal (nt)
the conditions and the nt's are called "bind patterns".
if two different iclasses have the same bind patterns
for all of their iforms,
we can put those iclasses in the same group.
this is what we are doing in _join_iclasses_to_groups
'''
def __init__(self,iarray,log_dir):
self.iclass2group = {}
self.log_name = 'groups_log.txt'
self.groups = self._join_iclasses_to_groups(iarray,log_dir)
def _group_good_for_iclass(self,group,iforms):
''' Check if the incoming group represents the list of iforms.
A group represents a list of iforms if:
1. it has same number of iforms.
2. for each iform there is an iform in the group that has the same
bind pattern
@param group: ins_group_t object
@param iforms: a list of iform_t
@return: True if group represents the the ifoms list '''
if len(group.iforms) != len(iforms):
return False
for iform,group_iform in zip(iforms,group.iforms):
if iform.bind_ptrn != group_iform.bind_ptrn:
return False
return True
def _put_iclass_in_group(self,groups,iclass,instruction_iforms):
''' tries to find a group that represents the incoming iclass.
'represents' means that all the iforms have exactly the same
bind patterns.
if no group was found, then create new group for the iclass.
@param groups: a list of ins_group_t object
@param iclass: the iclass name
@param instruction_iforms: a list of iform_t, the
iforms of the iclass
@return: function_object_t '''
for group in groups:
# check if group represents the iclass
if self._group_good_for_iclass(group,instruction_iforms):
group.add_iclass(iclass,instruction_iforms)
return
#no matching groups
#create new one
group = ins_group_t()
group.add_iclass(iclass,instruction_iforms)
groups.append(group)
return
def _join_iclasses_to_groups(self,iarray,log_dir):
'''
1. dividing the iclasses into groups.
2. creating a mapping from iclass to its group Id.
3. generating a log
iarray is a dict[iclass] = [iform_t, ...]
return: a list of ins_group_t objects '''
groups = []
#1. generate the groups
for iclass,iforms in list(iarray.items()):
iforms.sort(key=key_iform_by_bind_ptrn)
self._put_iclass_in_group(groups,iclass,iforms)
# 2. generate the iclass to group Id mapping
self.iclass2group = {}
for i,group in enumerate(groups):
for iclass in group.get_iclasses():
self.iclass2group[iclass] = i
# 3. print the log
if verbosity.vencode():
log_file = os.path.join(log_dir,self.log_name)
df = open(log_file,'w') #debug file
df.write("number of iclasses: %d\n" % len(iarray))
df.write("number of groups: %d\n" % len(groups))
for i,group in enumerate(groups):
df.write("GROUP Id: %d\n" % i)
df.write("ICLASSES: %s\n" % str(group.get_iclasses()))
for iform in group.iforms:
df.write("%s: %s\n" % ('BIND PATTERN: ', iform.bind_ptrn ))
df.write("\n\n")
df.close()
return groups
def get_groups(self):
''' return the groups list '''
return self.groups
def num_groups(self):
''' return the number of groups '''
return len(self.groups)
def get_iclass2group(self):
''' return a dict of iclass to it group Id'''
return self.iclass2group
def get_iclass2index_in_group(self):
''' return a dictionary of iclass to its index in the group'''
d = {}
for group in self.groups:
iclasses = sorted(group.get_iclasses())
for i,iclass in enumerate(iclasses):
d[iclass] = i
return d
def iforms_sort(lst):
lst.sort(key=key_iform_by_bind_ptrn)
lst.sort(key=key_rule_length)
lst.sort(key=key_priority)
class ins_group_t(object):
''' This class represents one group.
it holds the list of iclasses that have the same bind patterns.
'''
def __init__(self):
# iclass2iforms is a mapping from iclass string to a list of iform_t
self.iclass2iforms = {}
# the iforms field is really the iforms of the first iclass
# to be added to the group.
self.iforms = []
self.iclasses = None # instantiated when we call sort()
def add_iclass(self,iclass,iforms):
''' add the iclass and iforms list to the group '''
self.iclass2iforms[iclass] = iforms
if not self.iforms:
self.iforms = iforms
def get_iclasses(self):
''' return a list of iclasses in the group'''
return list(self.iclass2iforms.keys())
def get_ith_iforms(self, i):
'''return the ith iform in for each iclass'''
lst = []
for ifl in self.iclass2iforms.values():
lst.append(ifl[i])
return lst
def get_ith_field(self, i, field):
'''return a list of the specified field from each iform'''
lst = []
for ifl in self.iclass2iforms.values():
lst.append(getattr(ifl[i],field))
return lst
def sort(self):
'''call this before generating code to make sure all agree on order'''
self.iclasses = sorted(self.get_iclasses())
for iclass in self.iclasses:
iforms_sort( self.iclass2iforms[iclass] )
# this should be sorted by one of the above since it is just a
# ref to the first list in group...
# iforms_sort(self.iforms)
def gen_iform_ids_table(self):
''' generate C style table of iform Id's.
the table is 2D. one row per iclass.
the columns are the different iform Ids '''
table = []
for iclass in self.iclasses:
values = []
for iform in self.iclass2iforms[iclass]:
values.append('{:4}'.format(iform.rule.iform_id))
line = "/*{:14}*/ {{{}}},".format(iclass, ",".join(values))
table.extend([ line ])
return table
def gen_iform_isa_set_table(self, isa_set_db_for_chip):
'''generate C style table of isa_set info. The table is 2D. one row
per iclass. the columns are the isa_set for that
iform. The values all_ones and all_zeros are optimizations
to reduce the amount of encoder code. '''
table = []
all_ones = True
all_zeros = True
for iclass in self.iclasses:
values = []
for iform in self.iclass2iforms[iclass]:
#s = 'XED_ISA_SET_{}'.format(iform.isa_set.upper())
s = '1' if iform.isa_set.upper() in isa_set_db_for_chip else '0'
if s == '0':
all_ones = False
else:
all_zeros = False
values.append(s)
line = "/*{:14}*/ {{{}}},".format(iclass, ",".join(values))
table.extend([ line ])
return table, all_ones, all_zeros
class instruction_codegen_t(object):
def __init__(self,iform_list,iarray,logs_dir, amd_enabled=True):
self.amd_enabled = amd_enabled
self.iform_list = iform_list
self.iarray = iarray # dictionary by iclass of [ iform_t ]
self.logs_dir = logs_dir #directory for the log file
#list of field binding function_object_t
self.fb_ptrs_fo_list = None
#list of emit patterns function_object_t
self.emit_ptrs_fo_list = None
# number of field binding patterns
self.max_fb_ptrns = None
# number of emit patterns
self.max_emit_ptrns = None
# a list of all values been set to field ordered sequentially
self.fb_values_list = None
# the length of fb_values_list
self.fb_values_table_size = None
# list of groups (instructions_group_t)
self.instruction_groups = None
def get_values(self,encoder_config):
''' copy the necessary fields to encoder_confing object '''
encoder_config.fb_values_list = self.fb_values_list
encoder_config.fb_values_table_size = self.fb_values_table_size
encoder_config.emit_ptrs_fo_list = self.emit_ptrs_fo_list
encoder_config.max_emit_ptrns = self.max_emit_ptrns
encoder_config.fb_ptrs_fo_list = self.fb_ptrs_fo_list
encoder_config.max_fb_ptrns = self.max_fb_ptrns
encoder_config.ins_groups = self.instruction_groups
def _emit_legacy_map(self, fo, iform):
# obj_str is the function parameters for the emit function
def _xemit(bits, v):
fo.add_code_eol('xed_encoder_request_emit_bytes({},{},0x{:02x})'.format(
encutil.enc_strings['obj_str'], bits, v))
if iform.legacy_map.legacy_escape != 'N/A':
bits = 8
_xemit(bits, iform.legacy_map.legacy_escape_int)
if iform.legacy_map.legacy_opcode != 'N/A':
_xemit(bits, iform.legacy_map.legacy_opcode_int)
def _make_emit_fo(self, iform, i):
''' create the function object for this emit pattern
@param iform: iform_t object
@param i: index of the pattern function
@return: function_object_t
'''
fname = "%s_%d" % (emit_function_prefix,i)
fo = codegen.function_object_t(fname,
return_type='void')
# obj_str is the function parameters for the emit function
obj_str = encutil.enc_strings['obj_str']
enc_arg = "%s* %s" % (encutil.enc_strings['obj_type'],
obj_str)
fo.add_arg(enc_arg)
for action in iform.rule.actions:
# MASSIVE HACK: we store the legacy_map as MAP0 in
# xed_encode_iform_db[] (obj/xed-encoder-iforms-init.c)
# for VEX/EVEX/XOP instr (see
# _identify_map_and_nominal_opcode() ) to avoid emitting
# any escape/map bytes at runtime.
if action.field_name == 'MAP':
if iform.encspace == 0: # legacy
genutil.die("Should not see MAP here: {}".format(iform.iclass))
pass
elif action.field_name and action.field_name in ['LEGACY_MAP1',
'LEGACY_MAP2',
'LEGACY_MAP3',
'LEGACY_MAP3DNOW']:
if iform.encspace != 0: # legacy
genutil.die("This should only occur for legacy instr")
self._emit_legacy_map(fo, iform)
elif action.field_name and action.field_name == 'NOM_OPCODE':
code = ''
get_opcode = 'xed_encoder_get_nominal_opcode(%s)' % obj_str
if action.nbits == 8:
emit_func = 'xed_encoder_request_emit_bytes'
else:
emit_func = 'xed_encoder_request_encode_emit'
code = ' '*4
code += '%s(%s,%d,%s)' % (emit_func,obj_str,
action.nbits,get_opcode)
fo.add_code_eol(code)
else:
code = action.emit_code('EMIT')
for c in code:
fo.add_code(c)
return fo
def _make_fb_setter_fo(self, iform, i):
''' create the function object for pattern of fields bindings
@param iform: iform_t object
@param i: index of the pattern function
@return: function_object_t
'''
fname = "%s_%d" % (bind_function_prefix,i)
fo = codegen.function_object_t(fname,
return_type='void')
obj_name = encutil.enc_strings['obj_str']
enc_arg = "%s* %s" % (encutil.enc_strings['obj_type'],
obj_name)
fo.add_arg(enc_arg)
if not iform.fbs:
#no field binding we need to set, pacify the compiler
fo.add_code_eol('(void)%s' % obj_name)
return fo
fo.add_code_eol(' const xed_uint8_t* val')
fo.add_code_eol(' val = %s(%s)' % (get_field_value, obj_name))
for i,fb_action in enumerate(iform.fbs):
value_from_lu_table = '*(val+%d)' % i
operand_setter = "%s_set_%s" % (encutil.enc_strings['op_accessor'],
fb_action.field_name.lower())
code = ' %s(%s,%s);' % (operand_setter,
obj_name, value_from_lu_table)
fo.add_code(code)
return fo
def _study_emit_patterns(self): # FIXME 2019-10-04 unused. was for learning how to fix code
bins = collections.defaultdict(list)
bins_alt = collections.defaultdict(list)
for iform in self.iform_list:
bins[iform.emit_actions].append(iform)
bins_alt[iform.emit_actions_alt].append(iform)
print("emit actions bins conventional {}".format(len(bins)))
print("emit actions bins alternative {}".format(len(bins_alt)))
for k in bins.keys():
bin_content = bins[k]
if len(bin_content)>1:
alt_set = set()
for iform in bin_content:
alt_set.add(iform.emit_actions_alt)
if len(alt_set) > 1:
print("EXPANDED {}".format(k))
for v in alt_set:
print("\t {}".format(v))
def _verify_naked_bits_in_unique_pattern(self): # FIXME 2019-10-04 unused, no longer relevant
''' calculate how many references we have per each full
instruction emit pattern.
naked bits are bits in the pattern without a field name
like 0x0F or 0b110. earlier functions decorated
opcode/legacy map.
If the naked bits just show up once, then we can hardcode
those bits in the emit function. This is a test for that.
Current design relies on the naked bits being the same in
similar instruction patterns. If two patterns differ in
any naked bits, they cannot share emit functions and we die.
The workaround would be to capture the bits in some field to
allow the emit function to be shared & generic.
The current inputs to XED have no such conflicts.
'''
refs_per_ptrn = collections.defaultdict(int)
for iform in self.iform_list:
refs_per_ptrn[iform.emit_actions] += 1
if refs_per_ptrn[iform.emit_actions] >= 2:
if iform.rule.has_naked_bit_action():
# this assumes that the naked bits are going to be different.
# if the naked bits were the same, we could share the emit action.
genutil.die('emit pattern has more than one reference use of naked bits is not allowed: {}\n{}'.format(iform.emit_actions,iform))
def _make_emit_pattern_fos(self):
''' collect all the different patterns for emit phase.
for each pattern create a function representing it.
adds to each rule in iform_t the index of the pattern function
@return: list of emit pattern function name to function object
'''
emit_patterns = {}
fo_list = []
i = 0
for iform in self.iform_list:
if iform.emit_actions not in emit_patterns:
fo = self._make_emit_fo(iform,i)
emit_patterns[iform.emit_actions] = (fo,i)
fo_list.append(fo)
iform.emit_func_index = i
i += 1
else:
fo, index = emit_patterns[iform.emit_actions]
iform.emit_func_index = index
return fo_list
def _make_fb_pattern_fos(self):
''' collect all the different patterns for bind phase.
for each pattern create a function representing it.
adds to each rule in iform_t the index of the pattern function
@return: list of emit pattern function name to function object
'''
bind_ptterns = {}
fo_list = []
i = 0
for iform in self.iform_list:
if iform.fb_ptrn not in bind_ptterns:
fo = self._make_fb_setter_fo(iform,i)
bind_ptterns[iform.fb_ptrn] = (fo,i)
fo_list.append(fo)
iform.bind_func_index = i
i += 1
else:
fo,index = bind_ptterns[iform.fb_ptrn]
iform.bind_func_index = index
return fo_list
def _identify_map_and_nominal_opcode(self,iform):
''' scan the list of actions and identify the nominal opcode and
the legacy map.
replace the actions that describe the bytes of the nom opcode
and map with dummy action as place holders.
'''
#list of all prefixes for a sanity check
prefixes = [0x66,0x67,0xf2,0xf3,0xf0,0x64,0x65,0x2e,0x3e,0x26,0x36]
vv = 0 # vex valid value, or 0 if not vex/evex/xop
first_naked_bits_index = None
# i is used as an index further down below
for i,action in enumerate(iform.rule.actions):
if action.is_field_binding() and action.field_name == 'VEXVALID':
vv = action.int_value # we are in vex valid 1/2/3
if vv==0:
genutil.die("zero-valued vexvalid. this should not happen.")
if action.naked_bits():
if vv == 0 and action.int_value in prefixes:
genutil.die("LEGACY SPACE PREFIX BYTE SHOULD NOT BE PRESENT: {}".format(iform))
# we are in legacy space and this byte is a
# prefix. prefixes should be encoded with operand
# deciders, not explicitly.
continue
else:
#this byte represents the nominal opcode or the legacy map
first_naked_bits_index = i
break
if first_naked_bits_index == None:
err = "did not find nominal opcode for iform: %s" % str(iform)
genutil.die(err)
last_index = len(iform.rule.actions) - 1
# FIXME: i is the same as first_naked_bits_index and they are both used below
if i != first_naked_bits_index:
genutil.die("This should not happen")
first = iform.rule.actions[first_naked_bits_index]
### FIXME:2020-04-17 rewrite the rest of this to be generic
### and use dyanmic map information to guide execution.
if vv:
# all VEX/EVEX/XOP instr have an explicit map
#this action represents the opcode
iform.nominal_opcode = first.int_value
iform.nom_opcode_bits = first.nbits
iform.rule.actions[i] = actions.dummy_emit(first,'NOM_OPCODE') # replace opcode
elif first.int_value != 0x0F: # map 0
#this action represents the opcode
iform.nominal_opcode = first.int_value
iform.nom_opcode_bits = first.nbits
iform.rule.actions[i] = actions.dummy_emit(first,'NOM_OPCODE') # replace opcode
else: #first byte == 0x0F and we are legacy space
#check that we have at least one more byte to read
if first_naked_bits_index+1 > last_index:
genutil.die("not enough actions")
second = iform.rule.actions[first_naked_bits_index+1]
if not second.naked_bits():
genutil.die("expecting map/nominal opcode after 0x0F byte")
if self.amd_enabled and second.int_value == 0x0F: #3DNow
# the nominal opcode in 3DNow is in the last action.
# FIXME: it is best to not reference directly the last action
# but rather add a meaningful field name to the action
amd3dnow_opcode_action = iform.rule.actions[-1]
iform.nominal_opcode = amd3dnow_opcode_action.int_value
iform.nom_opcode_bits = 8
iform.rule.actions[-1] = actions.dummy_emit(amd3dnow_opcode_action,
'NOM_OPCODE')
iform.rule.actions[i] = actions.dummy_emit(first,'LEGACY_MAP3DNOW') # replace first 0xF
# the second 0x0F byte that describes the map is not needed, remove it
iform.rule.actions.remove(second)
elif second.int_value == 0x38 or second.int_value == 0x3A:
#check that we have at least one more byte to read
if first_naked_bits_index+2 > last_index:
genutil.die("not enough actions")
third = iform.rule.actions[first_naked_bits_index+2]
if not third.naked_bits():
genutil.die("expecting map/nominal opcode after 0x0F byte")
iform.nominal_opcode = third.int_value
iform.nom_opcode_bits = third.nbits
if second.int_value==0x38:
xmap = 'LEGACY_MAP2'
else:
xmap = 'LEGACY_MAP3'
iform.rule.actions[i+1] = actions.dummy_emit(second,xmap) # replace the 0x38 or 0x3A
iform.rule.actions[i+2] = actions.dummy_emit(third,
'NOM_OPCODE') # replace opcode
iform.rule.actions.remove(first) # remove the 0x0F
else: # legacy map1 0f prefix only, 2nd byte is opcode
iform.nominal_opcode = second.int_value
iform.nom_opcode_bits = second.nbits
iform.rule.actions[i] = actions.dummy_emit(first,'LEGACY_MAP1') # replace 0x0F
iform.rule.actions[i+1] = actions.dummy_emit(second, # replace opcode
'NOM_OPCODE')
def _find_sub_list(self,all_fbs_values, fbs_values):
''' find the the sub list: fbs_values
in the list: all_fbs_values.
if not found return -1
if found return the fist index of the recurrence '''
elems = len(fbs_values)
indices_to_scan = len(all_fbs_values) - elems + 1
for i in range(indices_to_scan):
if fbs_values == all_fbs_values[i:i+elems]:
return i
return -1
def _find_fb_occurrence(self,all_fbs_values, fbs_values):
''' find the the sub list: fbs_values
in the list: all_fbs_values.
if fbs_values is not a sub list to all_fbs_values
concatenate it.
return: the first index of fbs_values occurrence
in all_fbs_values.
'''
if not fbs_values:
return 0
if not all_fbs_values:
all_fbs_values.extend(fbs_values)
return 0
index = self._find_sub_list(all_fbs_values,fbs_values)
if index >= 0:
# found sub list
return index
# did not found sub list concatenate to the end
last_index = len(all_fbs_values)
all_fbs_values.extend(fbs_values)
return last_index
def _make_fb_values_list(self):
''' generate a list of the values being set by the FB actions.
for each iform find the start index of the values list.
All the field bindings get put in to a linear array.
This is finds the index in to that array.
This is a quick compression technique for sharing trailing
subsequences.
e.g.: iform1 sets the values: 0 1 2 (3 fields)
iform2 sets the values: 3 4 (2 fields)
iform3 sets the values: 1 2
iform4 sets the values: 2 3
the ordered list of unique sequence values across
all iforms is: 0 1 2 3 4.
start index of iform1: 0 (which picks up 0, 1 2)
start index of iform2: 3 (which picks up 3, 4)
start index of iform3: 1 (which picks up 1, 2)
start index of iform4: 2 (which picks up 2, 3)
Note: because of ordering, if iform3 happens to show
up before iform1, they won't share iform1's
subsequence 1,2.
'''
fbs_list = []
for iform in self.iform_list:
# collect all the actions that set fields
iform.fbs = iform.rule.get_all_fbs()
iform.fbs.sort(key=key_field_binding_lower)
# create a list of int values
fbs_values = [ x.int_value for x in iform.fbs]
#find the start index of this list of values in the general list
#and update the general list as needed
iform.fb_index = self._find_fb_occurrence(fbs_list, fbs_values)
fbs_list = [ str(x) for x in fbs_list]
return fbs_list
def _make_field_bindings_pattern(self,iform):
''' create the string that represents the field bindings pattern. '''
bind_actions = []
for action in iform.rule.actions:
if action.type == 'nt':
pass
elif action.type == 'FB':
bind_actions.append(action.field_name)
elif action.type == 'emit':
if action.emit_type == 'numeric' and action.field_name:
bind_actions.append(action.field_name)
else:
pass
else:
genutil.die("unexpected action type: %s" % action.type)
fb_ptrn = ''
if bind_actions:
fb_ptrn = ', '.join(sorted(bind_actions))
iform.fb_ptrn = fb_ptrn
def _make_emit_pattern(self,iform):
'''create the string that represents the action for the emit phase.
using this string we will classify all the emit actions
into patterns'''
iform.emit_actions = self._make_emit_pattern_low(iform)
def _make_emit_pattern_low(self,iform):
emit_pattern = []
for action in iform.rule.actions:
if action.type == 'emit':
# if no field_name, then we must differentiate the
# emit patterns using the value to avoid collisions.
if action.field_name == None:
emit_pattern.append("emit {} nbits={} intval={}".format(
action.field_name,
action.nbits,
action.int_value))
else:
emit_pattern.append("emit {} nbits={}".format(
action.field_name,
action.nbits))
elif action.type == 'nt':
emit_pattern.append(str(action))
elif action.type == 'FB':
# FB are not used in emit phase so we do not factor them
# in to the string that represents the pattern
pass
else:
genutil.die("unexpected action type: %s" % action.type)
emit_actions_str = ', '.join(emit_pattern)
return emit_actions_str
def _make_bind_pattern(self,iform):
''' create the string that represents the field bindings pattern. '''
bind_ptrn = [ str(iform.rule.conditions) ]
for action in iform.rule.actions:
if action.type == 'nt':
bind_ptrn.append(str(action))
iform.bind_ptrn = ''
if bind_ptrn:
iform.bind_ptrn = ', '.join(bind_ptrn)
def _print_log(self):
print("---- encoder log ----")
for i,iform in enumerate(self.iform_list):
print("%d\n" % i)
print("IFORM: %s" % str(iform))
print("iform index: %d" % iform.rule.iform_id)
bind_index = iform.bind_func_index
bind_fo = self.fb_ptrs_fo_list[bind_index]
print("BIND function: %d, %s" % (bind_index,
bind_fo.function_name))
emit_index = iform.emit_func_index
emit_fo = self.emit_ptrs_fo_list[emit_index]
print("EMIT function: %d, %s" % (emit_index,
emit_fo.function_name))
print("NOM_OPCODE: %d" % iform.nominal_opcode)
fbs_values = [ x.int_value for x in iform.fbs]
print("FB values: %s" % fbs_values)
print("\n\n")
print("-"*20)
def work(self): # main entry point
'''
Each instruction has
1) conditions (iclass, user registers, user inputs) and
2) actions. 3 types:
2a) field bindings,
2b) nonterminals,
2c) bit-emit of operand fields
(hard-coded or from NT output)).
fos = function output object (plural)
generate the following:
1) list of emit patterns fos (2c)
2) list of field bindings patterns fos (2a)
3) list of all field bindings values (values from prev step)
4) max number of emit patterns
5) max number of field binding patterns
6) max number of field bindings values
7) list of groups fos (see explanation in instructions_group_t)
'''
for iform in self.iform_list:
self._identify_map_and_nominal_opcode(iform)
self._make_field_bindings_pattern(iform)
self._make_emit_pattern(iform)
#see explanation about bind patterns in instructions_group_t
self._make_bind_pattern(iform)
#self._study_emit_patterns()
#self._verify_naked_bits_in_unique_pattern()
self.fb_values_list = self._make_fb_values_list() # step 3
self.fb_values_table_size = len(self.fb_values_list)
self.emit_ptrs_fo_list = self._make_emit_pattern_fos()
self.max_emit_ptrns = len(self.emit_ptrs_fo_list)
if self.max_emit_ptrns > max_in_byte:
# we are using uint8 to hold the number of patterns,
# we need to make sure we don't exceeds
error = "total number of emit patterns(%d) exceeds 8 bits"
genutil.die(error % self.max_emit_ptrns)
self.instruction_groups = instructions_group_t(self.iarray,
self.logs_dir)
self.fb_ptrs_fo_list = self._make_fb_pattern_fos()
self.max_fb_ptrns = len(self.fb_ptrs_fo_list)
if self.max_fb_ptrns > max_in_byte:
# we are using uint8to hold the number of patterns,
# we need to make sure we don't exceeds
error = "total number of field binding patterns(%d) exceeds 8 bits"
genutil.die(error % self.max_fb_ptrns)
if verbosity.vencode():
self._print_log()
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010, Monash e-Research Centre
# (Monash University, Australia)
# Copyright (c) 2010, VeRSI Consortium
# (Victorian eResearch Strategic Initiative, Australia)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the VeRSI, the VeRSI Consortium members, nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from django.core.exceptions import SuspiciousOperation
"""
staging.py
.. moduleauthor:: Russell Sim <russell.sim@monash.edu.au>
"""
import logging
import shutil
from urllib2 import build_opener
from os import path, makedirs, listdir, rmdir
import posixpath
from django.conf import settings
logger = logging.getLogger(__name__)
def get_dataset_path(dataset):
return path.join(str(dataset.get_first_experiment().id),
str(dataset.id))
def staging_traverse(staging=settings.STAGING_PATH):
"""Recurse through directories and form HTML list tree for jtree
:param staging: the path to begin traversing
:type staging: string
:rtype: string
"""
ul = '<ul><li id="phtml_1"><a>My Files</a><ul>'
filelist = []
try:
filelist = listdir(staging)
filelist.sort()
except OSError:
logger.error('staging directory doesnt exist' +\
str(staging))
for f in filelist:
ul = ul + traverse(path.join(staging, f), staging)
return ul + '</ul></li></ul>'
def traverse(pathname, dirname=settings.STAGING_PATH):
"""Traverse a path and return an alphabetically by filename
sorted nested group of
unordered (<ul>) list HTML tags::
<ul>
<li id="dir2/file2"><a>file2</a></li>
<li id="dir2/file3"><a>file3</a></li>
<li id="dir2/subdir"><a>subdir</a>
<ul>
<li id="dir2/subdir/file4"><a>file4</a></li>
</ul>
</li>
</ul>
:param pathname: the directory to traverse
:type pathname: string
:param dirname: the root directory of the traversal
:type dirname: string
:rtype: string
"""
if path.isdir(pathname):
li = '<li id="%s"><a>%s</a>' % (path.relpath(pathname, dirname),
path.basename(pathname))
else:
li = '<li class="fileicon" id="%s"><a>%s</a>' % (path.relpath(pathname, dirname),
path.basename(pathname))
if posixpath.basename(pathname).startswith('.'):
return ''
if path.isfile(pathname):
return li + '</li>'
if path.isdir(pathname):
ul = '<ul>'
filelist = listdir(pathname)
filelist.sort()
for f in filelist:
ul = ul + traverse(path.join(pathname, f), dirname)
return li + ul + '</ul></li>'
return ''
class StagingHook():
__name__ = 'StagingHook'
def __init__(self, staging=None, store=None):
self.staging = staging or settings.STAGING_PATH
self.store = store or settings.FILE_STORE_PATH
def __call__(self, sender, **kwargs):
"""
post save callback
sender
The model class.
instance
The actual instance being saved.
created
A boolean; True if a new record was created.
"""
instance = kwargs.get('instance')
created = kwargs.get('created')
if not created:
# Don't extract on edit
return
if not instance.protocol == "staging":
return
stage_file(instance)
def stage_file(datafile):
from django.core.files.uploadedfile import TemporaryUploadedFile
with TemporaryUploadedFile(datafile.filename, None, None, None) as tf:
if datafile.verify(tempfile=tf.file):
tf.file.flush()
datafile.url = write_uploaded_file_to_dataset(datafile.dataset, tf)
datafile.protocol = ''
datafile.save()
def get_sync_root(prefix = ''):
from uuid import uuid4 as uuid
def get_candidate_path():
return path.join(settings.SYNC_TEMP_PATH, prefix + str(uuid()))
root = (p for p in iter(get_candidate_path,'') if not path.exists(p)).next()
makedirs(root)
return root
def get_sync_url_and_protocol(sync_path, filepath):
from urlparse import urlparse
from django.utils import _os
urlObj = urlparse(filepath)
if urlObj.scheme == '':
return ('file://'+_os.safe_join(sync_path, filepath), '')
else:
return (filepath, urlObj.scheme)
def get_staging_url_and_size(username, filepath):
'''
Returns a file:// URL and the size of the file.
'''
from os.path import getsize
from django.utils import _os
staging_path = get_full_staging_path(username)
# Safe join should throw exception if filepath is unsafe
filepath = _os.safe_join(staging_path, filepath)
return ('file://'+filepath, getsize(filepath))
def get_staging_path():
"""
return the path to the staging directory
"""
return settings.STAGING_PATH
def write_uploaded_file_to_dataset(dataset, uploaded_file_post):
"""
Writes file POST data to the dataset directory in the file store
:param dataset: dataset whose directory to be written to
:type dataset: models.Model
:param uploaded_file_post: uploaded file (either UploadedFile or File)
:type uploaded_file_post: types.FileType
:rtype: the path of the file written to
"""
filename = uploaded_file_post.name
from django.core.files.storage import default_storage
# Path on disk can contain subdirectories - but if the request gets tricky with "../" or "/var" or something
# we strip them out..
try:
copyto = path.join(get_dataset_path(dataset), filename)
default_storage.path(copyto)
except (SuspiciousOperation, ValueError):
copyto = path.join(get_dataset_path(dataset), path.basename(filename))
logger.debug("Writing uploaded file %s" % copyto)
copyto = default_storage.save(copyto, uploaded_file_post)
return copyto
def get_full_staging_path(username):
# check if the user is authenticated using the deployment's staging protocol
try:
from tardis.tardis_portal.models import UserAuthentication
userAuth = UserAuthentication.objects.get(
userProfile__user__username=username,
authenticationMethod=settings.STAGING_PROTOCOL)
except UserAuthentication.DoesNotExist:
return None
from os import path
staging_path = path.join(settings.STAGING_PATH, username)
logger.debug('full staging path returned as ' + str(staging_path))
if not path.exists(staging_path):
return None
else:
return staging_path
| |
# Lint as: python3
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CoordinatedCheckpointManager."""
import os
import random
import re
import signal
import sys
import time
from absl.testing import parameterized
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute.failure_handling import failure_handling
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.module import module
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import util as tracking_util
CLUSTER_SIZE = 4
EPOCHS_TO_RUN = 15
STEPS_PER_EPOCH = 15
def _is_oss():
"""Returns whether the test is run under OSS."""
return len(sys.argv) >= 1 and 'bazel' in sys.argv[0]
def _enable_coordination_service(cluster_spec):
if context.context().coordination_service is None:
coordination_service = 'standalone'
coordinated_jobs = ['chief', 'worker']
context.context().configure_coordination_service(
service_type=coordination_service,
service_leader=multi_worker_util.coordination_leader(
cluster_spec),
coordinated_jobs=coordinated_jobs)
class PreemptionCheckpointTest(test.TestCase, parameterized.TestCase):
"""Integration test for CoordinatedCheckpointManager."""
def _mwms_write_checkpoint_dir(self, checkpoint_dir, cluster_spec, task_type,
task_id):
dirpath = os.path.dirname(checkpoint_dir)
base = os.path.basename(checkpoint_dir)
if not multi_worker_util.is_chief(
cluster_spec=cluster_spec, task_type=task_type, task_id=task_id):
base_dirpath = 'workertemp_' + str(task_id)
dirpath = os.path.join(dirpath, base_dirpath)
gfile.MakeDirs(dirpath)
return os.path.join(dirpath, base)
def _maybe_trigger_a_preemption(self, training_started_event,
trigger_it=False):
if not training_started_event:
return
clear_events = [
event for event in training_started_event if not event.is_set()
]
if clear_events:
if trigger_it:
clear_events[0].set()
elif random.randrange(0, 9) > 6:
clear_events[0].set()
def worker_fn(self,
checkpoint_dir,
cluster_spec,
training_started_event=None,
raise_app_error_on_worker=None):
_enable_coordination_service(cluster_spec)
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
class Model(module.Module):
def __init__(self):
self.v = variables_lib.Variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_WRITE,
aggregation=variables_lib.VariableAggregation.SUM)
@def_function.function(input_signature=[])
def __call__(self):
return self.v.read_value()
with strategy.scope():
model = Model()
# Named it fh_ckpt because it'd be better that the user have their regular
# checkpoint separate from the checkpoint for
# CoordinatedCheckpointManager, since we will create CheckpointManager to
# manage the checkpoint and only one CheckpointManager should be active in
# a particular directory at a time..
fh_ckpt = tracking_util.Checkpoint(model=model)
failure_handler = failure_handling.CoordinatedCheckpointManager(
strategy.cluster_resolver, fh_ckpt, checkpoint_dir)
def distributed_train_step(current_epoch, current_step):
@def_function.function
def train_step():
if distribution_strategy_context.get_distribution_strategy(
).cluster_resolver.task_id == raise_app_error_on_worker:
raise errors_impl.ResourceExhaustedError(
node_def=None, op=None, message='Running out of resources')
model.v.assign_add(constant_op.constant(1.))
strategy.run(train_step)
if current_step == STEPS_PER_EPOCH - 1:
logging.info('epoch %d finished', current_epoch)
logging.info('Restored training at %d', failure_handler.total_runs)
for epoch in range(failure_handler.total_runs // STEPS_PER_EPOCH,
EPOCHS_TO_RUN):
for step in range(failure_handler.total_runs % STEPS_PER_EPOCH,
STEPS_PER_EPOCH):
failure_handler.run(distributed_train_step, epoch, step)
# Add some randomness to when preemption actually happens. We should
# trigger it for sure if the training is coming to an end and it hasn't
# been triggered yet.
if epoch >= EPOCHS_TO_RUN - 2:
trigger_it = True
else:
trigger_it = False
self._maybe_trigger_a_preemption(training_started_event, trigger_it)
self.assertEqual(
model.v.numpy(),
strategy.num_replicas_in_sync * EPOCHS_TO_RUN * STEPS_PER_EPOCH)
def test_preemption_checkpointing(self):
has_chief = False
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=has_chief,
num_workers=CLUSTER_SIZE)
training_started_event = multi_process_runner.manager().Event()
checkpoint_dir = os.path.join(self.get_temp_dir(), 'fh_ckpt')
if _is_oss():
rpc_layer = 'grpc'
else:
rpc_layer = 'grpc+loas'
mpr = multi_process_runner.MultiProcessRunner(
self.worker_fn,
cluster_spec,
args=(checkpoint_dir, cluster_spec, [training_started_event]),
rpc_layer=rpc_layer,
return_output=True,
dependence_on_chief=has_chief)
logging.info('Cluster starting.')
mpr.start()
while not training_started_event.is_set():
time.sleep(1)
logging.info('sending sigterm')
killed_worker = random.randrange(0, CLUSTER_SIZE)
os.kill(mpr.get_process_id('worker', killed_worker), signal.SIGTERM)
logging.info('sigterm sent')
time.sleep(5)
logging.info('restarting workers')
for worker_id in range(CLUSTER_SIZE):
mpr.start_single_process('worker', worker_id, cluster_spec)
logging.info('workers restarted')
stdout = mpr.join().stdout
all_start_point = []
for msg in stdout:
matched_group = re.search(r'.*Restored training at (\d+)', msg)
if matched_group:
all_start_point.append(int(matched_group.group(1)))
# remove duplicate logs created due to presence of multiple workers
start_points = all_start_point[::CLUSTER_SIZE]
# assert that after restarting, we don't repeat previous training steps
self.assertNotEqual(start_points[-1], 0)
def test_error_propagation(self):
error_worker = random.randint(0, CLUSTER_SIZE)
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=False, num_workers=CLUSTER_SIZE)
checkpoint_dir = self.get_temp_dir()
def assert_raise_error():
# Asserts that an error raised during a training step on one of the worker
# is caught on all workers.
with self.assertRaises(errors_impl.ResourceExhaustedError) as error:
self.worker_fn(
checkpoint_dir,
cluster_spec,
raise_app_error_on_worker=error_worker)
self.assertIn('Running out of resources', str(error.exception))
if _is_oss():
rpc_layer = 'grpc'
else:
rpc_layer = 'grpc+loas'
mpr = multi_process_runner.MultiProcessRunner(
assert_raise_error,
cluster_spec,
rpc_layer=rpc_layer,
return_output=True,
dependence_on_chief=False)
logging.info('Cluster starting.')
mpr.start()
mpr.join()
if __name__ == '__main__':
test_util.main()
| |
"""MediaPlayer platform for Roon integration."""
import logging
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
DEVICE_DEFAULT_NAME,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.util import convert
from homeassistant.util.dt import utcnow
from .const import DOMAIN
SUPPORT_ROON = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_STOP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SHUFFLE_SET
| SUPPORT_SEEK
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_MUTE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_VOLUME_STEP
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Roon MediaPlayer from Config Entry."""
roon_server = hass.data[DOMAIN][config_entry.entry_id]
media_players = set()
@callback
def async_update_media_player(player_data):
"""Add or update Roon MediaPlayer."""
dev_id = player_data["dev_id"]
if dev_id not in media_players:
# new player!
media_player = RoonDevice(roon_server, player_data)
media_players.add(dev_id)
async_add_entities([media_player])
else:
# update existing player
async_dispatcher_send(
hass, f"room_media_player_update_{dev_id}", player_data
)
# start listening for players to be added or changed by the server component
async_dispatcher_connect(hass, "roon_media_player", async_update_media_player)
class RoonDevice(MediaPlayerEntity):
"""Representation of an Roon device."""
def __init__(self, server, player_data):
"""Initialize Roon device object."""
self._remove_signal_status = None
self._server = server
self._available = True
self._last_position_update = None
self._supports_standby = False
self._state = STATE_IDLE
self._last_playlist = None
self._last_media = None
self._unique_id = None
self._zone_id = None
self._output_id = None
self._name = DEVICE_DEFAULT_NAME
self._media_title = None
self._media_album_name = None
self._media_artist = None
self._media_position = 0
self._media_duration = 0
self._is_volume_muted = False
self._volume_step = 0
self._shuffle = False
self._media_image_url = None
self._volume_level = 0
self.update_data(player_data)
async def async_added_to_hass(self):
"""Register callback."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"room_media_player_update_{self.unique_id}",
self.async_update_callback,
)
)
@callback
def async_update_callback(self, player_data):
"""Handle device updates."""
self.update_data(player_data)
self.async_write_ha_state()
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ROON
@property
def device_info(self):
"""Return the device info."""
dev_model = "player"
if self.player_data.get("source_controls"):
dev_model = self.player_data["source_controls"][0].get("display_name")
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "RoonLabs",
"model": dev_model,
"via_hub": (DOMAIN, self._server.host),
}
def update_data(self, player_data=None):
"""Update session object."""
if player_data:
self.player_data = player_data
if not self.player_data["is_available"]:
# this player was removed
self._available = False
self._state = STATE_OFF
else:
self._available = True
# determine player state
self.update_state()
if self.state == STATE_PLAYING:
self._last_position_update = utcnow()
def update_state(self):
"""Update the power state and player state."""
new_state = ""
# power state from source control (if supported)
if "source_controls" in self.player_data:
for source in self.player_data["source_controls"]:
if source["supports_standby"] and source["status"] != "indeterminate":
self._supports_standby = True
if source["status"] in ["standby", "deselected"]:
new_state = STATE_OFF
break
# determine player state
if not new_state:
if self.player_data["state"] == "playing":
new_state = STATE_PLAYING
elif self.player_data["state"] == "loading":
new_state = STATE_PLAYING
elif self.player_data["state"] == "stopped":
new_state = STATE_IDLE
elif self.player_data["state"] == "paused":
new_state = STATE_PAUSED
else:
new_state = STATE_IDLE
self._state = new_state
self._unique_id = self.player_data["dev_id"]
self._zone_id = self.player_data["zone_id"]
self._output_id = self.player_data["output_id"]
self._name = self.player_data["display_name"]
self._is_volume_muted = self.player_data["volume"]["is_muted"]
self._volume_step = convert(self.player_data["volume"]["step"], int, 0)
self._shuffle = self.player_data["settings"]["shuffle"]
if self.player_data["volume"]["type"] == "db":
volume = (
convert(self.player_data["volume"]["value"], float, 0.0) / 80 * 100
+ 100
)
else:
volume = convert(self.player_data["volume"]["value"], float, 0.0)
self._volume_level = convert(volume, int, 0) / 100
try:
self._media_title = self.player_data["now_playing"]["three_line"]["line1"]
self._media_artist = self.player_data["now_playing"]["three_line"]["line2"]
self._media_album_name = self.player_data["now_playing"]["three_line"][
"line3"
]
self._media_position = convert(
self.player_data["now_playing"]["seek_position"], int, 0
)
self._media_duration = convert(
self.player_data["now_playing"]["length"], int, 0
)
try:
image_id = self.player_data["now_playing"]["image_key"]
self._media_image_url = self._server.roonapi.get_image(image_id)
except KeyError:
self._media_image_url = None
except KeyError:
self._media_title = None
self._media_album_name = None
self._media_artist = None
self._media_position = 0
self._media_duration = 0
self._media_image_url = None
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
# Returns value from homeassistant.util.dt.utcnow().
return self._last_position_update
@property
def unique_id(self):
"""Return the id of this roon client."""
return self._unique_id
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def zone_id(self):
"""Return current session Id."""
return self._zone_id
@property
def output_id(self):
"""Return current session Id."""
return self._output_id
@property
def name(self):
"""Return device name."""
return self._name
@property
def media_title(self):
"""Return title currently playing."""
return self._media_title
@property
def media_album_name(self):
"""Album name of current playing media (Music track only)."""
return self._media_album_name
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._media_artist
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
return self._media_artist
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return self._last_playlist
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._media_image_url
@property
def media_position(self):
"""Return position currently playing."""
return self._media_position
@property
def media_duration(self):
"""Return total runtime length."""
return self._media_duration
@property
def volume_level(self):
"""Return current volume level."""
return self._volume_level
@property
def is_volume_muted(self):
"""Return mute state."""
return self._is_volume_muted
@property
def volume_step(self):
""".Return volume step size."""
return self._volume_step
@property
def supports_standby(self):
"""Return power state of source controls."""
return self._supports_standby
@property
def state(self):
"""Return current playstate of the device."""
return self._state
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self._shuffle
def media_play(self):
"""Send play command to device."""
self._server.roonapi.playback_control(self.output_id, "play")
def media_pause(self):
"""Send pause command to device."""
self._server.roonapi.playback_control(self.output_id, "pause")
def media_play_pause(self):
"""Toggle play command to device."""
self._server.roonapi.playback_control(self.output_id, "playpause")
def media_stop(self):
"""Send stop command to device."""
self._server.roonapi.playback_control(self.output_id, "stop")
def media_next_track(self):
"""Send next track command to device."""
self._server.roonapi.playback_control(self.output_id, "next")
def media_previous_track(self):
"""Send previous track command to device."""
self._server.roonapi.playback_control(self.output_id, "previous")
def media_seek(self, position):
"""Send seek command to device."""
self._server.roonapi.seek(self.output_id, position)
# Seek doesn't cause an async update - so force one
self._media_position = position
self.schedule_update_ha_state()
def set_volume_level(self, volume):
"""Send new volume_level to device."""
volume = int(volume * 100)
self._server.roonapi.change_volume(self.output_id, volume)
def mute_volume(self, mute=True):
"""Send mute/unmute to device."""
self._server.roonapi.mute(self.output_id, mute)
def volume_up(self):
"""Send new volume_level to device."""
self._server.roonapi.change_volume(self.output_id, 3, "relative")
def volume_down(self):
"""Send new volume_level to device."""
self._server.roonapi.change_volume(self.output_id, -3, "relative")
def turn_on(self):
"""Turn on device (if supported)."""
if not (self.supports_standby and "source_controls" in self.player_data):
self.media_play()
return
for source in self.player_data["source_controls"]:
if source["supports_standby"] and source["status"] != "indeterminate":
self._server.roonapi.convenience_switch(
self.output_id, source["control_key"]
)
return
def turn_off(self):
"""Turn off device (if supported)."""
if not (self.supports_standby and "source_controls" in self.player_data):
self.media_stop()
return
for source in self.player_data["source_controls"]:
if source["supports_standby"] and not source["status"] == "indeterminate":
self._server.roonapi.standby(self.output_id, source["control_key"])
return
def set_shuffle(self, shuffle):
"""Set shuffle state."""
self._server.roonapi.shuffle(self.output_id, shuffle)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
# Roon itself doesn't support playback of media by filename/url so this a bit of a workaround.
media_type = media_type.lower()
if media_type == "radio":
if self._server.roonapi.play_radio(self.zone_id, media_id):
self._last_playlist = media_id
self._last_media = media_id
elif media_type == "playlist":
if self._server.roonapi.play_playlist(
self.zone_id, media_id, shuffle=False
):
self._last_playlist = media_id
elif media_type == "shuffleplaylist":
if self._server.roonapi.play_playlist(self.zone_id, media_id, shuffle=True):
self._last_playlist = media_id
elif media_type == "queueplaylist":
self._server.roonapi.queue_playlist(self.zone_id, media_id)
elif media_type == "genre":
self._server.roonapi.play_genre(self.zone_id, media_id)
else:
_LOGGER.error(
"Playback requested of unsupported type: %s --> %s",
media_type,
media_id,
)
| |
#!/usr/bin/env python3
import os
import sys
import argparse
import pyfastaq
import pymummer
import logging
from operator import itemgetter
# coords = list of tuples [(x1, y1), (x2, y2) ...]
def svg_polygon(coords, fill_colour, border_colour, border_width = 1, opacity=-1):
return_string = '<polygon points="' + ' '.join([str(x[0])+','+str(x[1]) for x in coords]) + '" ' \
+ 'fill="' + fill_colour + '" '
if opacity != -1:
return_string += 'fill-opacity="' + str(opacity) + '" '
return_string += 'stroke="' + border_colour + '" ' \
+ 'stroke-width="' + str(border_width) + '" ' \
+ '/>'
return return_string
class Appearance:
def __init__(self, opts):
self.contig_fill_colour = opts.seq_col
self.contig_border_colour = opts.seq_col
self.contig_border_width = 0
self.contig_x_space = opts.contig_x_space
self.match_fwd_fill_colour = opts.fwd_match_col
self.match_fwd_border_colour = opts.fwd_match_col
self.match_fwd_border_width = 0
self.match_rev_fill_colour = opts.rev_match_col
self.match_rev_border_colour = opts.rev_match_col
self.match_rev_border_width = 0
self.match_opacity = opts.match_opacity
self.match_min_length_bases = opts.match_min_len_bases
self.match_min_length_ratio = opts.match_min_len_ratio
class Assembly:
def __init__(self, fasta_file):
self.names = []
self.lengths = {}
seq_reader = pyfastaq.sequences.file_reader(fasta_file)
for seq in seq_reader:
self.lengths[seq.id] = len(seq)
self.names.append(seq.id)
def x_width(self, contig_x_space):
return sum(self.lengths.values()) + contig_x_space * (len(self.lengths) - 1)
def contigs_svg(self, scale_factor, y_top, y_bottom, appearance):
x_in_bases = 0
lines = []
self.contig_coords = {}
for contig in self.names:
x_end = x_in_bases + self.lengths[contig]
x_left = x_in_bases * scale_factor
x_right = x_end * scale_factor
self.contig_coords[contig] = (x_left, x_right)
coords = [(x_left, y_top), (x_right, y_top), (x_right, y_bottom), (x_left, y_bottom)]
lines.append(
svg_polygon(
coords,
appearance.contig_fill_colour,
appearance.contig_border_colour,
border_width=appearance.contig_border_width
)
)
x_in_bases = x_end + appearance.contig_x_space
return '\n'.join(lines)
class Assemblies:
def __init__(self, fasta_files, appearance, outprefix, nucmer_min_id=98, nucmer_min_length=250, simplify=True, verbose=False):
self.fasta_files = fasta_files
self.appearance = appearance
self.outprefix = outprefix
ok = True
for filename in fasta_files:
if not os.path.exists(filename):
print('Could not find file:', filename, file=sys.stderr)
ok = False
if not ok:
sys.exit(1)
self.nucmer_min_id = nucmer_min_id
self.nucmer_min_length = nucmer_min_length
self.simplify = simplify
self.verbose = verbose
self.assemblies = {filename: Assembly(filename) for filename in fasta_files}
@staticmethod
def _get_x_max(assemblies, contig_x_space):
return max([a.x_width(contig_x_space) for a in assemblies.values()])
def _make_nucmer_files(self, outprefix):
self.nucmer_matches = []
self.nucmer_files = []
for i in range(len(self.assemblies) - 1):
logging.info(''.join([str(x) for x in ['Comparing assembly ', i, ' (', self.fasta_files[i], ') against ', i+1, ' (', self.fasta_files[i+1], ')']]))
nucmer_file = '.'.join([outprefix, str(i), str(i+1), 'coords'])
if os.path.exists(nucmer_file):
logging.info('Found nucmer coords file ' + nucmer_file + ' so no need to run nucmer')
else:
logging.info('Running nucmer. Coords file will be called: ' + nucmer_file)
n = pymummer.nucmer.Runner(
self.fasta_files[i+1],
self.fasta_files[i],
nucmer_file,
min_id=self.nucmer_min_id,
breaklen=500,
maxmatch=True,
simplify=True,
verbose=logging.getLogger().isEnabledFor(logging.INFO),
)
n.run()
self.nucmer_matches.append([x for x in pymummer.coords_file.reader(nucmer_file)])
@staticmethod
def _write_svg_header(filehandle, width, height):
print(r'''<?xml version="1.0" standalone="no"?>''', file=filehandle)
print(r'''<!DOCTYPE svg PUBLIC " -//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">''', file=filehandle)
print(r'<svg width="' + str(width) + '" height="' + str(height) + '">', file=filehandle)
def _write_svg_contigs(self, filehandle):
y_top = 0
for filename in self.fasta_files:
y_bottom = y_top + self.contig_height
print(self.assemblies[filename].contigs_svg(self.x_scale_factor, y_top, y_bottom, self.appearance), file=filehandle)
y_top += self.contig_height + 2 * self.y_space + self.match_height
def _write_svg_matches_between_two_assemblies(self, nucmer_matches, top_assembly, bottom_assembly, y_top):
# top assembly = the query in nucmer matches
# bottom assembly = the reference in nucmer matches
lines = []
y_bottom = y_top + self.match_height
for match in nucmer_matches:
if match.hit_length_qry < min(self.appearance.match_min_length_bases, self.appearance.match_min_length_ratio * min(match.qry_length, match.ref_length)):
continue
top_contig_start, top_contig_end = top_assembly.contig_coords[match.qry_name]
top_start = top_contig_start + (match.qry_start / match.qry_length) * (top_contig_end - top_contig_start)
top_end = top_contig_start + (match.qry_end / match.qry_length) * (top_contig_end - top_contig_start)
bottom_contig_start, bottom_contig_end = bottom_assembly.contig_coords[match.ref_name]
bottom_start = bottom_contig_start + (match.ref_start / match.ref_length) * (bottom_contig_end - bottom_contig_start)
bottom_end = bottom_contig_start + (match.ref_end / match.ref_length) * (bottom_contig_end - bottom_contig_start)
coords = [(top_start, y_top), (top_end, y_top), (bottom_end, y_bottom), (bottom_start, y_bottom)]
if match.on_same_strand():
lines.append((0, abs(top_start - top_end), svg_polygon(coords, self.appearance.match_fwd_fill_colour, self.appearance.match_fwd_border_colour, opacity=self.appearance.match_opacity, border_width=self.appearance.match_fwd_border_width)))
else:
lines.append((1, abs(top_start - top_end), svg_polygon(coords, self.appearance.match_rev_fill_colour, self.appearance.match_rev_border_colour, opacity=self.appearance.match_opacity, border_width=self.appearance.match_rev_border_width)))
lines.sort(key=itemgetter(0, 1))
return '\n'.join([x[-1] for x in lines])
def _write_all_svg_matches(self, filehandle):
y_top = self.contig_height + self.y_space
for i in range(len(self.fasta_files) - 1):
top_assembly = self.assemblies[self.fasta_files[i]]
bottom_assembly = self.assemblies[self.fasta_files[i+1]]
print(self._write_svg_matches_between_two_assemblies(self.nucmer_matches[i], top_assembly, bottom_assembly, y_top), file=filehandle)
y_top += self.contig_height + 2 * self.y_space + self.match_height
def run(self, outprefix):
self._make_nucmer_files(outprefix)
self.contig_height = 1.5
self.match_height = 30
self.y_space = 1
self.svg_height = (len(self.assemblies) - 1) * (self.contig_height + 2 * self.y_space + self.match_height) + self.y_space + self.contig_height
self.svg_width = 400
self.total_width_in_bases = self._get_x_max(self.assemblies, self.appearance.contig_x_space)
self.x_scale_factor = self.svg_width / self.total_width_in_bases
svg_file = outprefix + '.svg'
logging.info('Writing SVG file: ' + svg_file)
svg_fh = pyfastaq.utils.open_file_write(svg_file)
self._write_svg_header(svg_fh, self.svg_width, self.svg_height)
self._write_svg_contigs(svg_fh)
self._write_all_svg_matches(svg_fh)
print('</svg>', file=svg_fh)
pyfastaq.utils.close(svg_fh)
logging.info('Finished writing SVG file: ' + svg_file)
parser = argparse.ArgumentParser(
description = 'Makes cartoon ACT-style figure comparing at least two FASTA files. Files are shown from top to bottom in the same order as listed on the command line when calling this script.',
usage = '%(prog)s [options] <outprefix> <file1.fa> <file2.fa> [more fasta files ...]')
parser.add_argument('--contig_x_space', type=int, help='Space between each contig, in bases [%(default)s]', default=20000, metavar='INT')
parser.add_argument('--seq_col', help='Colour of sequences [%(default)s]', default='black', metavar='STRING')
parser.add_argument('--fwd_match_col', help='Colour of match on same strands [%(default)s]', default='lightseagreen', metavar='STRING')
parser.add_argument('--rev_match_col', help='Colour of match on opposite strands [%(default)s]', default='peru', metavar='STRING')
parser.add_argument('--match_opacity', type=int, help='Opactiy of matches between 0 and 1. Higher means less transparent [%(default)s]', default=0.8, metavar='FLOAT in [0,1]')
parser.add_argument('--match_min_len_bases', type=int, help='Minimum match length to show. Whether or not a match is shown also depends on the sequence lengths. See --match_min_len_ratio [%(default)s]', default=10000, metavar='INT')
parser.add_argument('--match_min_len_ratio', type=float, help='Minimum match length to show, as proportion of sequence lengths. Using "--match_min_len_bases X --match_min_len_ratio Y" means that a match of length L is shown if L >= min(X, Y * S), where S is max(length of seq1, length of seq2) [%(default)s]', default=0.5, metavar='FLOAT')
parser.add_argument('--nucmer_min_id', type=int, help='Minimum identity when running nucmer [%(default)s]', default=90, metavar='FLOAT')
parser.add_argument('-v', '--verbose', action='store_true', help='Be verbose')
parser.add_argument('outprefix', help='Prefix of output files')
parser.add_argument('fa_list', help='List of at least 2 fasta files', nargs=argparse.REMAINDER)
options = parser.parse_args()
if len(options.fa_list) < 2:
print('Must have at least two input fasta files! Cannot continue', file=sys.stderr)
sys.exit(1)
if options.verbose:
logging.basicConfig(level=logging.INFO, format='')
a = Assemblies(
options.fa_list,
Appearance(options),
options.outprefix,
nucmer_min_id=options.nucmer_min_id,
simplify=True,
verbose=options.verbose
)
a.run(options.outprefix)
| |
"""
Created on 15 Nov 2018
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
Firmware report:
OPC-N3 Iss1.1 FirmwareVer=1.17a...........................BS
"""
import time
from scs_dfe.particulate.alphasense_opc import AlphasenseOPC
from scs_dfe.particulate.opc_n3.opc_firmware_conf import OPCFirmwareConf
from scs_dfe.particulate.opc_n3.opc_n3_datum import OPCN3Datum
from scs_dfe.particulate.opc_n3.opc_status import OPCStatus
# --------------------------------------------------------------------------------------------------------------------
class OPCN3(AlphasenseOPC):
"""
classdocs
"""
MIN_SAMPLE_PERIOD = 5.0 # seconds
MAX_SAMPLE_PERIOD = 10.0 # seconds
DEFAULT_SAMPLE_PERIOD = 10.0 # seconds
DEFAULT_BUSY_TIMEOUT = 5.0 # seconds
# ----------------------------------------------------------------------------------------------------------------
__BOOT_TIME = 4.0 # seconds
__POWER_CYCLE_TIME = 10.0 # seconds
__LASER_START_TIME = 1.0 # seconds
__FAN_START_TIME = 5.0 # seconds
__FAN_STOP_TIME = 2.0 # seconds
__MAX_PERMITTED_ZERO_READINGS = 4
__CMD_POWER = 0x03
__CMD_LASER_ON = 0x07
__CMD_LASER_OFF = 0x06
__CMD_FAN_ON = 0x03
__CMD_FAN_OFF = 0x02
__CMD_READ_HISTOGRAM = 0x30
__CMD_GET_FIRMWARE = 0x3f
__CMD_GET_VERSION = 0x12
__CMD_GET_SERIAL = 0x10
__CMD_GET_STATUS = 0x13
__CMD_GET_CONF = 0x3c
__CMD_SET_CONF = 0x3a
__CMD_SET_BIN_WEIGHTING_INDEX = 0x05
__CMD_SAVE_CONF = 0x43
__CMD_SAVE_CONF_SEQUENCE = (0x3F, 0x3c, 0x3f, 0x3c, 0x43)
__CMD_CHECK = 0xcf
__CMD_RESET = 0x06
__RESPONSE_BUSY = 0x31
__RESPONSE_NOT_BUSY = (0x00, 0xff, 0xf3)
__SPI_CLOCK = 326000 # Minimum speed for OPCube
__SPI_MODE = 1
__DELAY_TRANSFER = 0.020 # 0.001
__DELAY_CMD = 0.020 # 0.010
__DELAY_BUSY = 0.100
__LOCK_TIMEOUT = 20.0
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def source(cls):
return OPCN3Datum.SOURCE
@classmethod
def lock_timeout(cls):
return cls.__LOCK_TIMEOUT
@classmethod
def boot_time(cls):
return cls.__BOOT_TIME
@classmethod
def power_cycle_time(cls):
return cls.__POWER_CYCLE_TIME
@classmethod
def max_permitted_zero_readings(cls):
return cls.__MAX_PERMITTED_ZERO_READINGS
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, interface, spi_bus, spi_device):
"""
Constructor
"""
super().__init__(interface, spi_bus, spi_device, self.__SPI_MODE, self.__SPI_CLOCK)
# ----------------------------------------------------------------------------------------------------------------
def operations_on(self):
try:
self.obtain_lock()
# fan...
self.__cmd_power(self.__CMD_FAN_ON)
time.sleep(self.__FAN_START_TIME)
# laser...
self.__cmd_power(self.__CMD_LASER_ON)
finally:
self.release_lock()
def operations_off(self):
try:
self.obtain_lock()
# laser...
self.__cmd_power(self.__CMD_LASER_OFF)
# fan...
self.__cmd_power(self.__CMD_FAN_OFF)
time.sleep(self.__FAN_STOP_TIME)
finally:
self.release_lock()
def reset(self):
try:
self.obtain_lock()
self._spi.open()
# command...
self.__wait_while_busy()
self.__cmd(self.__CMD_RESET)
time.sleep(self.__DELAY_TRANSFER)
finally:
self._spi.close()
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
def sample(self):
try:
self.obtain_lock()
self._spi.open()
# command...
self.__wait_while_busy()
self.__cmd(self.__CMD_READ_HISTOGRAM)
chars = self.__read_bytes(OPCN3Datum.CHARS)
# report...
return OPCN3Datum.construct(chars)
finally:
self._spi.close()
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
def serial_no(self):
try:
self.obtain_lock()
self._spi.open()
# command...
self.__wait_while_busy()
self.__cmd(self.__CMD_GET_SERIAL)
chars = self.__read_bytes(60)
# report...
report = ''.join(chr(byte) for byte in chars)
pieces = report.split(' ')
if len(pieces) < 2:
return None
return pieces[1]
finally:
self._spi.close()
self.release_lock()
def version(self):
try:
self.obtain_lock()
self._spi.open()
# command...
self.__wait_while_busy()
self.__cmd(self.__CMD_GET_VERSION)
# report...
major = int(self.__read_byte())
minor = int(self.__read_byte())
return major, minor
finally:
self._spi.close()
self.release_lock()
def status(self):
try:
self.obtain_lock()
self._spi.open()
# command...
self.__wait_while_busy()
self.__cmd(self.__CMD_GET_STATUS)
chars = self.__read_bytes(OPCStatus.CHARS)
# report...
status = OPCStatus.construct(chars)
return status
finally:
self._spi.close()
self.release_lock()
def firmware(self):
try:
self.obtain_lock()
self._spi.open()
# command...
self.__wait_while_busy()
self.__cmd(self.__CMD_GET_FIRMWARE)
chars = self.__read_bytes(60)
# report...
report = ''.join(chr(byte) for byte in chars)
return report.strip('\0\xff') # \0 - Raspberry Pi, \xff - BeagleBone
finally:
self._spi.close()
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
def get_firmware_conf(self):
try:
self.obtain_lock()
self._spi.open()
# command...
self.__wait_while_busy()
self.__cmd(self.__CMD_GET_CONF)
chars = self.__read_bytes(OPCFirmwareConf.CHARS)
# report...
conf = OPCFirmwareConf.construct(chars)
return conf
finally:
self._spi.close()
self.release_lock()
def set_firmware_conf(self, jdict):
conf = OPCFirmwareConf.construct_from_jdict(jdict)
chars = conf.as_chars()
try:
self.obtain_lock()
self._spi.open()
# set conf...
self.__wait_while_busy()
self.__cmd(self.__CMD_SET_CONF)
self.__write_bytes(chars)
# set bin_weighting_index...
self.__wait_while_busy()
self.__cmd(self.__CMD_SET_BIN_WEIGHTING_INDEX)
self.__write_byte(conf.bin_weighting_index)
finally:
self._spi.close()
self.release_lock()
def commit_firmware_conf(self):
try:
self.obtain_lock()
self._spi.open()
# command...
self.__wait_while_busy()
self.__cmd(self.__CMD_SAVE_CONF)
self.__write_bytes(self.__CMD_SAVE_CONF_SEQUENCE)
finally:
self._spi.close()
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
def __wait_while_busy(self, specified_timeout=None):
timeout = self.DEFAULT_BUSY_TIMEOUT if specified_timeout is None else specified_timeout
timeout_time = time.time() + timeout
self.__cmd(self.__CMD_CHECK)
while self.__read_byte() == self.__RESPONSE_BUSY:
if time.time() > timeout_time:
raise TimeoutError()
time.sleep(self.__DELAY_BUSY)
def __cmd_power(self, cmd):
try:
self._spi.open()
while True:
response = self._spi.xfer([self.__CMD_POWER])
time.sleep(self.__DELAY_CMD)
# print(["0x%02x" % char for char in response], file=sys.stderr)
# sys.stderr.flush()
if response[0] in self.__RESPONSE_NOT_BUSY:
break
self._spi.xfer([cmd])
time.sleep(self.__DELAY_TRANSFER)
finally:
self._spi.close()
def __cmd(self, cmd):
self._spi.xfer([cmd])
time.sleep(self.__DELAY_CMD)
self._spi.xfer([cmd])
time.sleep(self.__DELAY_TRANSFER)
def __read_bytes(self, count):
response = [self.__read_byte() for _ in range(count)]
# print(["0x%02x" % char for char in response], file=sys.stderr)
# sys.stderr.flush()
return response
def __read_byte(self):
chars = self._spi.read_bytes(1)
time.sleep(self.__DELAY_TRANSFER)
return chars[0]
def __write_bytes(self, chars):
for char in chars:
self.__write_byte(char)
def __write_byte(self, char):
self._spi.xfer([char])
time.sleep(self.__DELAY_CMD)
| |
"""Nest Media Source implementation.
The Nest MediaSource implementation provides a directory tree of devices and
events and associated media (e.g. an image or clip). Camera device events
publish an event message, received by the subscriber library. Media for an
event, such as camera image or clip, may be fetched from the cloud during a
short time window after the event happens.
The actual management of associating events to devices, fetching media for
events, caching, and the overall lifetime of recent events are managed outside
of the Nest MediaSource.
Users may also record clips to local storage, unrelated to this MediaSource.
For additional background on Nest Camera events see:
https://developers.google.com/nest/device-access/api/camera#handle_camera_events
"""
from __future__ import annotations
from collections.abc import Mapping
from dataclasses import dataclass
import logging
import os
from google_nest_sdm.camera_traits import CameraClipPreviewTrait, CameraEventImageTrait
from google_nest_sdm.device import Device
from google_nest_sdm.event import EventImageType, ImageEventBase
from google_nest_sdm.event_media import EventMediaStore
from google_nest_sdm.google_nest_subscriber import GoogleNestSubscriber
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_IMAGE,
MEDIA_CLASS_VIDEO,
MEDIA_TYPE_IMAGE,
MEDIA_TYPE_VIDEO,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.components.media_source.error import Unresolvable
from homeassistant.components.media_source.models import (
BrowseMediaSource,
MediaSource,
MediaSourceItem,
PlayMedia,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.storage import Store
from homeassistant.helpers.template import DATE_STR_FORMAT
from homeassistant.util import dt as dt_util, raise_if_invalid_filename
from .const import DATA_SUBSCRIBER, DOMAIN
from .device_info import NestDeviceInfo
from .events import EVENT_NAME_MAP, MEDIA_SOURCE_EVENT_TITLE_MAP
_LOGGER = logging.getLogger(__name__)
MEDIA_SOURCE_TITLE = "Nest"
DEVICE_TITLE_FORMAT = "{device_name}: Recent Events"
CLIP_TITLE_FORMAT = "{event_name} @ {event_time}"
EVENT_MEDIA_API_URL_FORMAT = "/api/nest/event_media/{device_id}/{event_id}"
STORAGE_KEY = "nest.event_media"
STORAGE_VERSION = 1
# Buffer writes every few minutes (plus guaranteed to be written at shutdown)
STORAGE_SAVE_DELAY_SECONDS = 120
# Path under config directory
MEDIA_PATH = f"{DOMAIN}/event_media"
# Size of small in-memory disk cache to avoid excessive disk reads
DISK_READ_LRU_MAX_SIZE = 32
async def async_get_media_event_store(
hass: HomeAssistant, subscriber: GoogleNestSubscriber
) -> EventMediaStore:
"""Create the disk backed EventMediaStore."""
media_path = hass.config.path(MEDIA_PATH)
def mkdir() -> None:
os.makedirs(media_path, exist_ok=True)
await hass.async_add_executor_job(mkdir)
store = Store(hass, STORAGE_VERSION, STORAGE_KEY, private=True)
return NestEventMediaStore(hass, subscriber, store, media_path)
class NestEventMediaStore(EventMediaStore):
"""Storage hook to locally persist nest media for events.
This interface is meant to provide two storage features:
- media storage of events (jpgs, mp4s)
- metadata about events (e.g. motion, person), filename of the media, etc.
The default implementation in nest is in memory, and this allows the data
to be backed by disk.
The nest event media manager internal to the subscriber manages the lifetime
of individual objects stored here (e.g. purging when going over storage
limits). This store manages the addition/deletion once instructed.
"""
def __init__(
self,
hass: HomeAssistant,
subscriber: GoogleNestSubscriber,
store: Store,
media_path: str,
) -> None:
"""Initialize NestEventMediaStore."""
self._hass = hass
self._subscriber = subscriber
self._store = store
self._media_path = media_path
self._data: dict | None = None
self._devices: Mapping[str, str] | None = {}
async def async_load(self) -> dict | None:
"""Load data."""
if self._data is None:
self._devices = await self._get_devices()
data = await self._store.async_load()
if data is None:
_LOGGER.debug("Loaded empty event store")
self._data = {}
elif isinstance(data, dict):
_LOGGER.debug("Loaded event store with %d records", len(data))
self._data = data
else:
raise ValueError(
"Unexpected data in storage version={}, key={}".format(
STORAGE_VERSION, STORAGE_KEY
)
)
return self._data
async def async_save(self, data: dict) -> None:
"""Save data."""
self._data = data
def provide_data() -> dict:
return data
self._store.async_delay_save(provide_data, STORAGE_SAVE_DELAY_SECONDS)
def get_media_key(self, device_id: str, event: ImageEventBase) -> str:
"""Return the filename to use for a new event."""
# Convert a nest device id to a home assistant device id
device_id_str = (
self._devices.get(device_id, f"{device_id}-unknown_device")
if self._devices
else "unknown_device"
)
event_id_str = event.event_session_id
try:
raise_if_invalid_filename(event_id_str)
except ValueError:
event_id_str = ""
time_str = str(int(event.timestamp.timestamp()))
event_type_str = EVENT_NAME_MAP.get(event.event_type, "event")
suffix = "jpg" if event.event_image_type == EventImageType.IMAGE else "mp4"
return f"{device_id_str}/{time_str}-{event_id_str}-{event_type_str}.{suffix}"
def get_media_filename(self, media_key: str) -> str:
"""Return the filename in storage for a media key."""
return f"{self._media_path}/{media_key}"
async def async_load_media(self, media_key: str) -> bytes | None:
"""Load media content."""
filename = self.get_media_filename(media_key)
def load_media(filename: str) -> bytes | None:
if not os.path.exists(filename):
return None
_LOGGER.debug("Reading event media from disk store: %s", filename)
with open(filename, "rb") as media:
return media.read()
try:
return await self._hass.async_add_executor_job(load_media, filename)
except OSError as err:
_LOGGER.error("Unable to read media file: %s %s", filename, err)
return None
async def async_save_media(self, media_key: str, content: bytes) -> None:
"""Write media content."""
filename = self.get_media_filename(media_key)
def save_media(filename: str, content: bytes) -> None:
os.makedirs(os.path.dirname(filename), exist_ok=True)
if os.path.exists(filename):
_LOGGER.debug(
"Event media already exists, not overwriting: %s", filename
)
return
_LOGGER.debug("Saving event media to disk store: %s", filename)
with open(filename, "wb") as media:
media.write(content)
try:
await self._hass.async_add_executor_job(save_media, filename, content)
except OSError as err:
_LOGGER.error("Unable to write media file: %s %s", filename, err)
async def async_remove_media(self, media_key: str) -> None:
"""Remove media content."""
filename = self.get_media_filename(media_key)
def remove_media(filename: str) -> None:
if not os.path.exists(filename):
return None
_LOGGER.debug("Removing event media from disk store: %s", filename)
os.remove(filename)
try:
await self._hass.async_add_executor_job(remove_media, filename)
except OSError as err:
_LOGGER.error("Unable to remove media file: %s %s", filename, err)
async def _get_devices(self) -> Mapping[str, str]:
"""Return a mapping of nest device id to home assistant device id."""
device_registry = dr.async_get(self._hass)
device_manager = await self._subscriber.async_get_device_manager()
devices = {}
for device in device_manager.devices.values():
if device_entry := device_registry.async_get_device(
{(DOMAIN, device.name)}
):
devices[device.name] = device_entry.id
return devices
async def async_get_media_source(hass: HomeAssistant) -> MediaSource:
"""Set up Nest media source."""
return NestMediaSource(hass)
async def get_media_source_devices(hass: HomeAssistant) -> Mapping[str, Device]:
"""Return a mapping of device id to eligible Nest event media devices."""
if DATA_SUBSCRIBER not in hass.data[DOMAIN]:
# Integration unloaded, or is legacy nest integration
return {}
subscriber = hass.data[DOMAIN][DATA_SUBSCRIBER]
device_manager = await subscriber.async_get_device_manager()
device_registry = dr.async_get(hass)
devices = {}
for device in device_manager.devices.values():
if not (
CameraEventImageTrait.NAME in device.traits
or CameraClipPreviewTrait.NAME in device.traits
):
continue
if device_entry := device_registry.async_get_device({(DOMAIN, device.name)}):
devices[device_entry.id] = device
return devices
@dataclass
class MediaId:
"""Media identifier for a node in the Media Browse tree.
A MediaId can refer to either a device, or a specific event for a device
that is associated with media (e.g. image or video clip).
"""
device_id: str
event_id: str | None = None
@property
def identifier(self) -> str:
"""Media identifier represented as a string."""
if self.event_id:
return f"{self.device_id}/{self.event_id}"
return self.device_id
def parse_media_id(identifier: str | None = None) -> MediaId | None:
"""Parse the identifier path string into a MediaId."""
if identifier is None or identifier == "":
return None
parts = identifier.split("/")
if len(parts) > 1:
return MediaId(parts[0], parts[1])
return MediaId(parts[0])
class NestMediaSource(MediaSource):
"""Provide Nest Media Sources for Nest Cameras.
The media source generates a directory tree of devices and media associated
with events for each device (e.g. motion, person, etc). Each node in the
tree has a unique MediaId.
The lifecycle for event media is handled outside of NestMediaSource, and
instead it just asks the device for all events it knows about.
"""
name: str = MEDIA_SOURCE_TITLE
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize NestMediaSource."""
super().__init__(DOMAIN)
self.hass = hass
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Resolve media identifier to a url."""
media_id: MediaId | None = parse_media_id(item.identifier)
if not media_id:
raise Unresolvable("No identifier specified for MediaSourceItem")
if not media_id.event_id:
raise Unresolvable("Identifier missing an event_id: %s" % item.identifier)
devices = await self.devices()
if not (device := devices.get(media_id.device_id)):
raise Unresolvable(
"Unable to find device with identifier: %s" % item.identifier
)
events = await _get_events(device)
if media_id.event_id not in events:
raise Unresolvable(
"Unable to find event with identifier: %s" % item.identifier
)
event = events[media_id.event_id]
return PlayMedia(
EVENT_MEDIA_API_URL_FORMAT.format(
device_id=media_id.device_id, event_id=media_id.event_id
),
event.event_image_type.content_type,
)
async def async_browse_media(self, item: MediaSourceItem) -> BrowseMediaSource:
"""Return media for the specified level of the directory tree.
The top level is the root that contains devices. Inside each device are
media for events for that device.
"""
media_id: MediaId | None = parse_media_id(item.identifier)
_LOGGER.debug(
"Browsing media for identifier=%s, media_id=%s", item.identifier, media_id
)
devices = await self.devices()
if media_id is None:
# Browse the root and return child devices
browse_root = _browse_root()
browse_root.children = []
for device_id, child_device in devices.items():
browse_root.children.append(
_browse_device(MediaId(device_id), child_device)
)
return browse_root
# Browse either a device or events within a device
if not (device := devices.get(media_id.device_id)):
raise BrowseError(
"Unable to find device with identiifer: %s" % item.identifier
)
if media_id.event_id is None:
# Browse a specific device and return child events
browse_device = _browse_device(media_id, device)
browse_device.children = []
events = await _get_events(device)
for child_event in events.values():
event_id = MediaId(media_id.device_id, child_event.event_session_id)
browse_device.children.append(
_browse_event(event_id, device, child_event)
)
return browse_device
# Browse a specific event
events = await _get_events(device)
if not (event := events.get(media_id.event_id)):
raise BrowseError(
"Unable to find event with identiifer: %s" % item.identifier
)
return _browse_event(media_id, device, event)
async def devices(self) -> Mapping[str, Device]:
"""Return all event media related devices."""
return await get_media_source_devices(self.hass)
async def _get_events(device: Device) -> Mapping[str, ImageEventBase]:
"""Return relevant events for the specified device."""
events = await device.event_media_manager.async_events()
return {e.event_session_id: e for e in events}
def _browse_root() -> BrowseMediaSource:
"""Return devices in the root."""
return BrowseMediaSource(
domain=DOMAIN,
identifier="",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type=MEDIA_TYPE_VIDEO,
children_media_class=MEDIA_CLASS_VIDEO,
title=MEDIA_SOURCE_TITLE,
can_play=False,
can_expand=True,
thumbnail=None,
children=[],
)
def _browse_device(device_id: MediaId, device: Device) -> BrowseMediaSource:
"""Return details for the specified device."""
device_info = NestDeviceInfo(device)
return BrowseMediaSource(
domain=DOMAIN,
identifier=device_id.identifier,
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type=MEDIA_TYPE_VIDEO,
children_media_class=MEDIA_CLASS_VIDEO,
title=DEVICE_TITLE_FORMAT.format(device_name=device_info.device_name),
can_play=False,
can_expand=True,
thumbnail=None,
children=[],
)
def _browse_event(
event_id: MediaId, device: Device, event: ImageEventBase
) -> BrowseMediaSource:
"""Build a BrowseMediaSource for a specific event."""
return BrowseMediaSource(
domain=DOMAIN,
identifier=event_id.identifier,
media_class=MEDIA_CLASS_IMAGE,
media_content_type=MEDIA_TYPE_IMAGE,
title=CLIP_TITLE_FORMAT.format(
event_name=MEDIA_SOURCE_EVENT_TITLE_MAP.get(event.event_type, "Event"),
event_time=dt_util.as_local(event.timestamp).strftime(DATE_STR_FORMAT),
),
can_play=(event.event_image_type == EventImageType.CLIP_PREVIEW),
can_expand=False,
thumbnail=None,
children=[],
)
| |
"""
"""
__author__ = "unkonwn"
import bisect
import itertools
import operator
class _BNode(object):
__slots__ = ["tree", "contents", "children"]
def __init__(self, tree, contents=None, children=None):
self.tree = tree
self.contents = contents or []
self.children = children or []
if self.children:
assert len(self.contents) + 1 == len(self.children), \
"one more child than data item required"
def __repr__(self):
name = getattr(self, "children", 0) and "Branch" or "Leaf"
return "<%s %s>" % (name, ", ".join(map(str, self.contents)))
def lateral(self, parent, parent_index, dest, dest_index):
if parent_index > dest_index:
dest.contents.append(parent.contents[dest_index])
parent.contents[dest_index] = self.contents.pop(0)
if self.children:
dest.children.append(self.children.pop(0))
else:
dest.contents.insert(0, parent.contents[parent_index])
parent.contents[parent_index] = self.contents.pop()
if self.children:
dest.children.insert(0, self.children.pop())
def shrink(self, ancestors):
parent = None
if ancestors:
parent, parent_index = ancestors.pop()
# try to lend to the left neighboring sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, left_sib, parent_index - 1)
return
# try the right neighbor
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, right_sib, parent_index + 1)
return
center = len(self.contents) // 2
sibling, push = self.split()
if not parent:
parent, parent_index = self.tree.BRANCH(
self.tree, children=[self]), 0
self.tree._root = parent
# pass the median up to the parent
parent.contents.insert(parent_index, push)
parent.children.insert(parent_index + 1, sibling)
if len(parent.contents) > parent.tree.order:
parent.shrink(ancestors)
def grow(self, ancestors):
parent, parent_index = ancestors.pop()
minimum = self.tree.order // 2
left_sib = right_sib = None
# try to borrow from the right sibling
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) > minimum:
right_sib.lateral(parent, parent_index + 1, self, parent_index)
return
# try to borrow from the left sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) > minimum:
left_sib.lateral(parent, parent_index - 1, self, parent_index)
return
# consolidate with a sibling - try left first
if left_sib:
left_sib.contents.append(parent.contents[parent_index - 1])
left_sib.contents.extend(self.contents)
if self.children:
left_sib.children.extend(self.children)
parent.contents.pop(parent_index - 1)
parent.children.pop(parent_index)
else:
self.contents.append(parent.contents[parent_index])
self.contents.extend(right_sib.contents)
if self.children:
self.children.extend(right_sib.children)
parent.contents.pop(parent_index)
parent.children.pop(parent_index + 1)
if len(parent.contents) < minimum:
if ancestors:
# parent is not the root
parent.grow(ancestors)
elif not parent.contents:
# parent is root, and its now empty
self.tree._root = left_sib or self
def split(self):
center = len(self.contents) // 2
median = self.contents[center]
sibling = type(self)(
self.tree,
self.contents[center + 1:],
self.children[center + 1:])
self.contents = self.contents[:center]
self.children = self.children[:center + 1]
return sibling, median
def insert(self, index, item, ancestors):
self.contents.insert(index, item)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def remove(self, index, ancestors):
minimum = self.tree.order // 2
if self.children:
# find the smallest in the right subtree, exchange the value with the current node
# then delete the smallest one, just like the idea in the binary search tree.
# Note: only if len(descendent.contents) > minimum, we do this way in order to avoid 'grow' operation.
# Or we will inspect the left tree and do it any way
# all internal nodes have both left and right subtree.
additional_ancestors = [(self, index + 1)]
descendent = self.children[index + 1]
while descendent.children:
additional_ancestors.append((descendent, 0))
descendent = descendent.children[0]
if len(descendent.contents) > minimum:
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[0]
descendent.remove(0, ancestors)
return
# fall back to the left child, and exchange with the biggest, then delete the biggest anyway.
additional_ancestors = [(self, index)]
descendent = self.children[index]
while descendent.children:
additional_ancestors.append(
(descendent, len(descendent.children) - 1))
descendent = descendent.children[-1]
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[-1]
descendent.remove(len(descendent.children) - 1, ancestors)
else:
self.contents.pop(index)
if len(self.contents) < minimum and ancestors:
self.grow(ancestors)
class _BPlusLeaf(_BNode):
__slots__ = ["tree", "contents", "data", "next"]
def __init__(self, tree, contents=None, data=None, next=None):
self.tree = tree
self.contents = contents or []
self.data = data or []
self.next = next
assert len(self.contents) == len(self.data), "one data per key"
def insert(self, index, key, data, ancestors):
self.contents.insert(index, key)
self.data.insert(index, data)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def lateral(self, parent, parent_index, dest, dest_index):
if parent_index > dest_index:
dest.contents.append(self.contents.pop(0))
dest.data.append(self.data.pop(0))
parent.contents[dest_index] = self.contents[0]
else:
dest.contents.insert(0, self.contents.pop())
dest.data.insert(0, self.data.pop())
parent.contents[parent_index] = dest.contents[0]
def split(self):
center = len(self.contents) // 2
median = self.contents[center - 1]
sibling = type(self)(
self.tree,
self.contents[center:],
self.data[center:],
self.next)
self.contents = self.contents[:center]
self.data = self.data[:center]
self.next = sibling
return sibling, sibling.contents[0]
def remove(self, index, ancestors):
minimum = self.tree.order // 2
if index >= len(self.contents):
self, index = self.next, 0
key = self.contents[index]
# if any leaf that could accept the key can do so
# without any rebalancing necessary, then go that route
current = self
while current is not None and current.contents[0] == key:
if len(current.contents) > minimum:
if current.contents[0] == key:
index = 0
else:
index = bisect.bisect_left(current.contents, key)
current.contents.pop(index)
current.data.pop(index)
return
current = current.next
self.grow(ancestors)
def grow(self, ancestors):
minimum = self.tree.order // 2
parent, parent_index = ancestors.pop()
left_sib = right_sib = None
# try borrowing from a neighbor - try right first
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) > minimum:
right_sib.lateral(parent, parent_index + 1, self, parent_index)
return
# fallback to left
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) > minimum:
left_sib.lateral(parent, parent_index - 1, self, parent_index)
return
# join with a neighbor - try left first
if left_sib:
left_sib.contents.extend(self.contents)
left_sib.data.extend(self.data)
parent.remove(parent_index - 1, ancestors)
return
# fallback to right
self.contents.extend(right_sib.contents)
self.data.extend(right_sib.data)
parent.remove(parent_index, ancestors)
class BTree(object):
BRANCH = LEAF = _BNode
def __init__(self, order):
self.order = order
self._root = self._bottom = self.LEAF(self)
def _path_to(self, item):
"""
"""
current = self._root
ancestry = []
while getattr(current, "children", None):
index = bisect.bisect_left(current.contents, item)
ancestry.append((current, index))
if index < len(current.contents) \
and current.contents[index] == item:
return ancestry
current = current.children[index]
index = bisect.bisect_left(current.contents, item)
ancestry.append((current, index))
present = index < len(current.contents)
present = present and current.contents[index] == item
return ancestry
def _present(self, item, ancestors):
last, index = ancestors[-1]
return index < len(last.contents) and last.contents[index] == item
def insert(self, item):
current = self._root
ancestors = self._path_to(item)
node, index = ancestors[-1]
while getattr(node, "children", None):
node = node.children[index]
index = bisect.bisect_left(node.contents, item)
ancestors.append((node, index))
node, index = ancestors.pop()
node.insert(index, item, ancestors)
def remove(self, item):
current = self._root
ancestors = self._path_to(item)
if self._present(item, ancestors):
node, index = ancestors.pop()
node.remove(index, ancestors)
else:
raise ValueError("%r not in %s" % (item, self.__class__.__name__))
def __contains__(self, item):
return self._present(item, self._path_to(item))
def __iter__(self):
def _recurse(node):
if node.children:
for child, item in zip(node.children, node.contents):
for child_item in _recurse(child):
yield child_item
yield item
for child_item in _recurse(node.children[-1]):
yield child_item
else:
for item in node.contents:
yield item
for item in _recurse(self._root):
yield item
def __repr__(self):
def recurse(node, accum, depth):
accum.append((" " * depth) + repr(node))
for node in getattr(node, "children", []):
recurse(node, accum, depth + 1)
accum = []
recurse(self._root, accum, 0)
return "\n".join(accum)
@classmethod
def bulkload(cls, items, order):
tree = object.__new__(cls)
tree.order = order
leaves = tree._build_bulkloaded_leaves(items)
tree._build_bulkloaded_branches(leaves)
return tree
def _build_bulkloaded_leaves(self, items):
minimum = self.order // 2
leaves, seps = [[]], []
for item in items:
if len(leaves[-1]) < self.order:
leaves[-1].append(item)
else:
seps.append(item)
leaves.append([])
if len(leaves[-1]) < minimum and seps:
last_two = leaves[-2] + [seps.pop()] + leaves[-1]
leaves[-2] = last_two[:minimum]
leaves[-1] = last_two[minimum + 1:]
seps.append(last_two[minimum])
return [self.LEAF(self, contents=node) for node in leaves], seps
def _build_bulkloaded_branches(self, (leaves, seps)):
minimum = self.order // 2
levels = [leaves]
while len(seps) > self.order + 1:
items, nodes, seps = seps, [[]], []
for item in items:
if len(nodes[-1]) < self.order:
nodes[-1].append(item)
else:
seps.append(item)
nodes.append([])
if len(nodes[-1]) < minimum and seps:
last_two = nodes[-2] + [seps.pop()] + nodes[-1]
nodes[-2] = last_two[:minimum]
nodes[-1] = last_two[minimum + 1:]
seps.append(last_two[minimum])
offset = 0
for i, node in enumerate(nodes):
children = levels[-1][offset:offset + len(node) + 1]
nodes[i] = self.BRANCH(self, contents=node, children=children)
offset += len(node) + 1
levels.append(nodes)
self._root = self.BRANCH(self, contents=seps, children=levels[-1])
class BPlusTree(BTree):
LEAF = _BPlusLeaf
def _get(self, key):
node, index = self._path_to(key)[-1]
if index == len(node.contents):
if node.next:
node, index = node.next, 0
else:
return
while node.contents[index] == key:
yield node.data[index]
index += 1
if index == len(node.contents):
if node.next:
node, index = node.next, 0
else:
return
def _path_to(self, item):
path = super(BPlusTree, self)._path_to(item)
node, index = path[-1]
while hasattr(node, "children"):
node = node.children[index]
index = bisect.bisect_left(node.contents, item)
path.append((node, index))
return path
def get(self, key, default=None):
try:
return self._get(key).next()
except StopIteration:
return default
def getlist(self, key):
return list(self._get(key))
def insert(self, key, data):
path = self._path_to(key)
node, index = path.pop()
node.insert(index, key, data, path)
def remove(self, key):
path = self._path_to(key)
node, index = path.pop()
node.remove(index, path)
__getitem__ = get
__setitem__ = insert
__delitem__ = remove
def __contains__(self, key):
for item in self._get(key):
return True
return False
def iteritems(self):
node = self._root
while hasattr(node, "children"):
node = node.children[0]
while node:
for pair in itertools.izip(node.contents, node.data):
yield pair
node = node.next
def iterkeys(self):
return itertools.imap(operator.itemgetter(0), self.iteritems())
def itervalues(self):
return itertools.imap(operator.itemgetter(1), self.iteritems())
__iter__ = iterkeys
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def _build_bulkloaded_leaves(self, items):
minimum = self.order // 2
leaves, seps = [[]], []
for item in items:
if len(leaves[-1]) >= self.order:
seps.append(item)
leaves.append([])
leaves[-1].append(item)
if len(leaves[-1]) < minimum and seps:
last_two = leaves[-2] + leaves[-1]
leaves[-2] = last_two[:minimum]
leaves[-1] = last_two[minimum:]
seps.append(last_two[minimum])
leaves = [self.LEAF(
self,
contents=[p[0] for p in pairs],
data=[p[1] for p in pairs])
for pairs in leaves]
for i in xrange(len(leaves) - 1):
leaves[i].next = leaves[i + 1]
return leaves, [s[0] for s in seps]
#import random
#import unittest
#
#
#class BTreeTests(unittest.TestCase):
# def test_additions(self):
# bt = BTree(20)
# l = range(2000)
# for i, item in enumerate(l):
# bt.insert(item)
# self.assertEqual(list(bt), l[:i + 1])
#
# def test_bulkloads(self):
# bt = BTree.bulkload(range(2000), 20)
# self.assertEqual(list(bt), range(2000))
#
# def test_removals(self):
# bt = BTree(20)
# l = range(2000)
# map(bt.insert, l)
# rand = l[:]
# random.shuffle(rand)
# while l:
# self.assertEqual(list(bt), l)
# rem = rand.pop()
# l.remove(rem)
# bt.remove(rem)
# self.assertEqual(list(bt), l)
#
# def test_insert_regression(self):
# bt = BTree.bulkload(range(2000), 50)
#
# for i in xrange(100000):
# bt.insert(random.randrange(2000))
#
#
#class BPlusTreeTests(unittest.TestCase):
# def test_additions_sorted(self):
# bt = BPlusTree(20)
# l = range(2000)
#
# for item in l:
# bt.insert(item, str(item))
#
# for item in l:
# self.assertEqual(str(item), bt[item])
#
# self.assertEqual(l, list(bt))
#
# def test_additions_random(self):
# bt = BPlusTree(20)
# l = range(2000)
# random.shuffle(l)
#
# for item in l:
# bt.insert(item, str(item))
#
# for item in l:
# self.assertEqual(str(item), bt[item])
#
# self.assertEqual(range(2000), list(bt))
#
# def test_bulkload(self):
# bt = BPlusTree.bulkload(zip(range(2000), map(str, range(2000))), 20)
#
# self.assertEqual(list(bt), range(2000))
#
# self.assertEqual(
# list(bt.iteritems()),
# zip(range(2000), map(str, range(2000))))
def main():
bt = BTree(2)
l = range(20, 0, -1)
bt.insert(8)
bt.insert(20)
bt.insert(9)
bt.insert(11)
bt.insert(15)
#for i, item in enumerate(l):
# bt.insert(item)
# print list(bt)
#assert list(bt)==l[:i + 1]
if __name__ == '__main__':
#unittest.main()
main()
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests transmission of tickets across gRPC-on-the-wire."""
import unittest
from grpc._adapter import _intermediary_low
from grpc._links import invocation
from grpc._links import service
from grpc.framework.interfaces.links import links
from grpc_test import test_common
from grpc_test._links import _proto_scenarios
from grpc_test.framework.common import test_constants
from grpc_test.framework.interfaces.links import test_cases
from grpc_test.framework.interfaces.links import test_utilities
_IDENTITY = lambda x: x
class TransmissionTest(test_cases.TransmissionTest, unittest.TestCase):
def create_transmitting_links(self):
service_link = service.service_link(
{self.group_and_method(): self.deserialize_request},
{self.group_and_method(): self.serialize_response})
port = service_link.add_port(0, None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost',
{self.group_and_method(): self.serialize_request},
{self.group_and_method(): self.deserialize_response})
invocation_link.start()
return invocation_link, service_link
def destroy_transmitting_links(self, invocation_side_link, service_side_link):
invocation_side_link.stop()
service_side_link.stop_gracefully()
def create_invocation_initial_metadata(self):
return (
('first invocation initial metadata key', 'just a string value'),
('second invocation initial metadata key', '0123456789'),
('third invocation initial metadata key-bin', '\x00\x57' * 100),
)
def create_invocation_terminal_metadata(self):
return None
def create_service_initial_metadata(self):
return (
('first service initial metadata key', 'just another string value'),
('second service initial metadata key', '9876543210'),
('third service initial metadata key-bin', '\x00\x59\x02' * 100),
)
def create_service_terminal_metadata(self):
return (
('first service terminal metadata key', 'yet another string value'),
('second service terminal metadata key', 'abcdefghij'),
('third service terminal metadata key-bin', '\x00\x37' * 100),
)
def create_invocation_completion(self):
return None, None
def create_service_completion(self):
return _intermediary_low.Code.OK, 'An exuberant test "details" message!'
def assertMetadataTransmitted(self, original_metadata, transmitted_metadata):
self.assertTrue(
test_common.metadata_transmitted(
original_metadata, transmitted_metadata),
'%s erroneously transmitted as %s' % (
original_metadata, transmitted_metadata))
class RoundTripTest(unittest.TestCase):
def testZeroMessageRoundTrip(self):
test_operation_id = object()
test_group = 'test package.Test Group'
test_method = 'test method'
identity_transformation = {(test_group, test_method): _IDENTITY}
test_code = _intermediary_low.Code.OK
test_message = 'a test message'
service_link = service.service_link(
identity_transformation, identity_transformation)
service_mate = test_utilities.RecordingLink()
service_link.join_link(service_mate)
port = service_link.add_port(0, None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost', identity_transformation, identity_transformation)
invocation_mate = test_utilities.RecordingLink()
invocation_link.join_link(invocation_mate)
invocation_link.start()
invocation_ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
None, None, None, None, links.Ticket.Termination.COMPLETION)
invocation_link.accept_ticket(invocation_ticket)
service_mate.block_until_tickets_satisfy(test_cases.terminated)
service_ticket = links.Ticket(
service_mate.tickets()[-1].operation_id, 0, None, None, None, None,
None, None, None, None, test_code, test_message,
links.Ticket.Termination.COMPLETION)
service_link.accept_ticket(service_ticket)
invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
service_link.stop_gracefully()
self.assertIs(
service_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
self.assertIs(
invocation_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
def _perform_scenario_test(self, scenario):
test_operation_id = object()
test_group, test_method = scenario.group_and_method()
test_code = _intermediary_low.Code.OK
test_message = 'a scenario test message'
service_link = service.service_link(
{(test_group, test_method): scenario.deserialize_request},
{(test_group, test_method): scenario.serialize_response})
service_mate = test_utilities.RecordingLink()
service_link.join_link(service_mate)
port = service_link.add_port(0, None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost',
{(test_group, test_method): scenario.serialize_request},
{(test_group, test_method): scenario.deserialize_response})
invocation_mate = test_utilities.RecordingLink()
invocation_link.join_link(invocation_mate)
invocation_link.start()
invocation_ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
None, None, None, None, None)
invocation_link.accept_ticket(invocation_ticket)
requests = scenario.requests()
for request_index, request in enumerate(requests):
request_ticket = links.Ticket(
test_operation_id, 1 + request_index, None, None, None, None, 1, None,
request, None, None, None, None)
invocation_link.accept_ticket(request_ticket)
service_mate.block_until_tickets_satisfy(
test_cases.at_least_n_payloads_received_predicate(1 + request_index))
response_ticket = links.Ticket(
service_mate.tickets()[0].operation_id, request_index, None, None,
None, None, 1, None, scenario.response_for_request(request), None,
None, None, None)
service_link.accept_ticket(response_ticket)
invocation_mate.block_until_tickets_satisfy(
test_cases.at_least_n_payloads_received_predicate(1 + request_index))
request_count = len(requests)
invocation_completion_ticket = links.Ticket(
test_operation_id, request_count + 1, None, None, None, None, None,
None, None, None, None, None, links.Ticket.Termination.COMPLETION)
invocation_link.accept_ticket(invocation_completion_ticket)
service_mate.block_until_tickets_satisfy(test_cases.terminated)
service_completion_ticket = links.Ticket(
service_mate.tickets()[0].operation_id, request_count, None, None, None,
None, None, None, None, None, test_code, test_message,
links.Ticket.Termination.COMPLETION)
service_link.accept_ticket(service_completion_ticket)
invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
service_link.stop_gracefully()
observed_requests = tuple(
ticket.payload for ticket in service_mate.tickets()
if ticket.payload is not None)
observed_responses = tuple(
ticket.payload for ticket in invocation_mate.tickets()
if ticket.payload is not None)
self.assertTrue(scenario.verify_requests(observed_requests))
self.assertTrue(scenario.verify_responses(observed_responses))
def testEmptyScenario(self):
self._perform_scenario_test(_proto_scenarios.EmptyScenario())
def testBidirectionallyUnaryScenario(self):
self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())
def testBidirectionallyStreamingScenario(self):
self._perform_scenario_test(
_proto_scenarios.BidirectionallyStreamingScenario())
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import logging
import numpy as np
from numpy.testing import assert_allclose
import pytest
from jax import random
import jax.numpy as jnp
import numpyro
from numpyro import handlers, infer
from numpyro.contrib.control_flow import scan
import numpyro.distributions as dist
from numpyro.distributions.util import is_identically_one
# put all funsor-related imports here, so test collection works without funsor
try:
import funsor
from numpyro.contrib.funsor import config_enumerate, infer_discrete, markov
funsor.set_backend("jax")
except ImportError:
pytestmark = pytest.mark.skip(reason="funsor is not installed")
logger = logging.getLogger(__name__)
def log_prob_sum(trace):
log_joint = jnp.zeros(())
for site in trace.values():
if site["type"] == "sample":
value = site["value"]
intermediates = site["intermediates"]
scale = site["scale"]
if intermediates:
log_prob = site["fn"].log_prob(value, intermediates)
else:
log_prob = site["fn"].log_prob(value)
if (scale is not None) and (not is_identically_one(scale)):
log_prob = scale * log_prob
log_prob = jnp.sum(log_prob)
log_joint = log_joint + log_prob
return log_joint
@pytest.mark.parametrize("length", [1, 2, 10])
@pytest.mark.parametrize("temperature", [0, 1])
def test_hmm_smoke(length, temperature):
# This should match the example in the infer_discrete docstring.
def hmm(data, hidden_dim=10):
transition = 0.3 / hidden_dim + 0.7 * jnp.eye(hidden_dim)
means = jnp.arange(float(hidden_dim))
states = [0]
for t in markov(range(len(data))):
states.append(
numpyro.sample(
"states_{}".format(t), dist.Categorical(transition[states[-1]])
)
)
data[t] = numpyro.sample(
"obs_{}".format(t), dist.Normal(means[states[-1]], 1.0), obs=data[t]
)
return states, data
true_states, data = handlers.seed(hmm, 0)([None] * length)
assert len(data) == length
assert len(true_states) == 1 + len(data)
decoder = infer_discrete(
config_enumerate(hmm), temperature=temperature, rng_key=random.PRNGKey(1)
)
inferred_states, _ = decoder(data)
assert len(inferred_states) == len(true_states)
logger.info("true states: {}".format(list(map(int, true_states))))
logger.info("inferred states: {}".format(list(map(int, inferred_states))))
@pytest.mark.parametrize(
"length",
[
1,
2,
pytest.param(
10,
marks=pytest.mark.xfail(
reason="adjoint does not work with markov sum product yet."
),
),
],
)
@pytest.mark.parametrize("temperature", [0, 1])
def test_scan_hmm_smoke(length, temperature):
# This should match the example in the infer_discrete docstring.
def hmm(data, hidden_dim=10):
transition = 0.3 / hidden_dim + 0.7 * jnp.eye(hidden_dim)
means = jnp.arange(float(hidden_dim))
def transition_fn(state, y):
state = numpyro.sample("states", dist.Categorical(transition[state]))
y = numpyro.sample("obs", dist.Normal(means[state], 1.0), obs=y)
return state, (state, y)
_, (states, data) = scan(transition_fn, 0, data, length=length)
return [0] + [s for s in states], data
true_states, data = handlers.seed(hmm, 0)(None)
assert len(data) == length
assert len(true_states) == 1 + len(data)
decoder = infer_discrete(
config_enumerate(hmm), temperature=temperature, rng_key=random.PRNGKey(1)
)
inferred_states, _ = decoder(data)
assert len(inferred_states) == len(true_states)
logger.info("true states: {}".format(list(map(int, true_states))))
logger.info("inferred states: {}".format(list(map(int, inferred_states))))
def vectorize_model(model, size, dim):
def fn(*args, **kwargs):
with numpyro.plate("particles", size=size, dim=dim):
return model(*args, **kwargs)
return fn
@pytest.mark.parametrize("temperature", [0, 1])
def test_distribution_1(temperature):
# +-------+
# z --|--> x |
# +-------+
num_particles = 10000
data = np.array([1.0, 2.0, 3.0])
@config_enumerate
def model(z=None):
p = numpyro.param("p", np.array([0.75, 0.25]))
iz = numpyro.sample("z", dist.Categorical(p), obs=z)
z = jnp.array([0.0, 1.0])[iz]
logger.info("z.shape = {}".format(z.shape))
with numpyro.plate("data", 3):
numpyro.sample("x", dist.Normal(z, 1.0), obs=data)
first_available_dim = -3
vectorized_model = (
model if temperature == 0 else vectorize_model(model, num_particles, dim=-2)
)
sampled_model = infer_discrete(
vectorized_model, first_available_dim, temperature, rng_key=random.PRNGKey(1)
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {
z: handlers.trace(model).get_trace(z=np.array(z)) for z in [0, 1]
}
# Check posterior over z.
actual_z_mean = sampled_trace["z"]["value"].astype(float).mean()
if temperature:
expected_z_mean = 1 / (
1
+ np.exp(
log_prob_sum(conditioned_traces[0])
- log_prob_sum(conditioned_traces[1])
)
)
else:
expected_z_mean = (
log_prob_sum(conditioned_traces[1]) > log_prob_sum(conditioned_traces[0])
).astype(float)
expected_max = max(log_prob_sum(t) for t in conditioned_traces.values())
actual_max = log_prob_sum(sampled_trace)
assert_allclose(expected_max, actual_max, atol=1e-5)
assert_allclose(actual_z_mean, expected_z_mean, atol=1e-2 if temperature else 1e-5)
@pytest.mark.parametrize("temperature", [0, 1])
def test_distribution_2(temperature):
# +--------+
# z1 --|--> x1 |
# | | |
# V | |
# z2 --|--> x2 |
# +--------+
num_particles = 10000
data = np.array([[-1.0, -1.0, 0.0], [-1.0, 1.0, 1.0]])
@config_enumerate
def model(z1=None, z2=None):
p = numpyro.param("p", jnp.array([[0.25, 0.75], [0.1, 0.9]]))
loc = numpyro.param("loc", jnp.array([-1.0, 1.0]))
z1 = numpyro.sample("z1", dist.Categorical(p[0]), obs=z1)
z2 = numpyro.sample("z2", dist.Categorical(p[z1]), obs=z2)
logger.info("z1.shape = {}".format(z1.shape))
logger.info("z2.shape = {}".format(z2.shape))
with numpyro.plate("data", 3):
numpyro.sample("x1", dist.Normal(loc[z1], 1.0), obs=data[0])
numpyro.sample("x2", dist.Normal(loc[z2], 1.0), obs=data[1])
first_available_dim = -3
vectorized_model = (
model if temperature == 0 else vectorize_model(model, num_particles, dim=-2)
)
sampled_model = infer_discrete(
vectorized_model, first_available_dim, temperature, rng_key=random.PRNGKey(1)
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {
(z1, z2): handlers.trace(model).get_trace(z1=np.array(z1), z2=np.array(z2))
for z1 in [0, 1]
for z2 in [0, 1]
}
# Check joint posterior over (z1, z2).
actual_probs = np.zeros((2, 2))
expected_probs = np.zeros((2, 2))
for (z1, z2), tr in conditioned_traces.items():
expected_probs[z1, z2] = np.exp(log_prob_sum(tr))
actual_probs[z1, z2] = (
(
(sampled_trace["z1"]["value"] == z1)
& (sampled_trace["z2"]["value"] == z2)
)
.astype(float)
.mean()
)
if temperature:
expected_probs = expected_probs / expected_probs.sum()
else:
argmax = np.argmax(expected_probs.reshape(-1))
expected_max = expected_probs.reshape(-1)[argmax]
actual_max = log_prob_sum(sampled_trace)
assert_allclose(np.log(expected_max), actual_max, atol=1e-5)
expected_probs[:] = 0
expected_probs.reshape(-1)[argmax] = 1
assert_allclose(expected_probs, actual_probs, atol=1e-2 if temperature else 1e-5)
@pytest.mark.parametrize("temperature", [0, 1])
def test_distribution_3_simple(temperature):
# +---------------+
# | z2 ---> x2 |
# | 2 |
# +---------------+
num_particles = 10000
data = np.array([-1.0, 1.0])
@config_enumerate
def model(z2=None):
p = numpyro.param("p", np.array([0.25, 0.75]))
loc = numpyro.param("loc", jnp.array([-1.0, 1.0]))
with numpyro.plate("data", 2):
z2 = numpyro.sample("z2", dist.Categorical(p), obs=z2)
numpyro.sample("x2", dist.Normal(loc[z2], 1.0), obs=data)
first_available_dim = -3
vectorized_model = (
model if temperature == 0 else vectorize_model(model, num_particles, dim=-2)
)
sampled_model = infer_discrete(
vectorized_model, first_available_dim, temperature, random.PRNGKey(1)
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {
(z20, z21): handlers.trace(model).get_trace(z2=np.array([z20, z21]))
for z20 in [0, 1]
for z21 in [0, 1]
}
# Check joint posterior over (z2[0], z2[1]).
actual_probs = np.zeros((2, 2))
expected_probs = np.zeros((2, 2))
for (z20, z21), tr in conditioned_traces.items():
expected_probs[z20, z21] = np.exp(log_prob_sum(tr))
actual_probs[z20, z21] = (
(
(sampled_trace["z2"]["value"][..., :1] == z20)
& (sampled_trace["z2"]["value"][..., 1:] == z21)
)
.astype(float)
.mean()
)
if temperature:
expected_probs = expected_probs / expected_probs.sum()
else:
argmax = np.argmax(expected_probs.reshape(-1))
expected_max = expected_probs.reshape(-1)[argmax]
actual_max = log_prob_sum(sampled_trace)
assert_allclose(np.log(expected_max), actual_max, atol=1e-5)
expected_probs[:] = 0
expected_probs.reshape(-1)[argmax] = 1
assert_allclose(expected_probs.reshape(-1), actual_probs.reshape(-1), atol=1e-2)
@pytest.mark.parametrize("temperature", [0, 1])
def test_distribution_3(temperature):
# +---------+ +---------------+
# z1 --|--> x1 | | z2 ---> x2 |
# | 3 | | 2 |
# +---------+ +---------------+
num_particles = 10000
data = [np.array([-1.0, -1.0, 0.0]), np.array([-1.0, 1.0])]
@config_enumerate
def model(z1=None, z2=None):
p = numpyro.param("p", np.array([0.25, 0.75]))
loc = numpyro.param("loc", jnp.array([-1.0, 1.0]))
z1 = numpyro.sample("z1", dist.Categorical(p), obs=z1)
with numpyro.plate("data[0]", 3):
numpyro.sample("x1", dist.Normal(loc[z1], 1.0), obs=data[0])
with numpyro.plate("data[1]", 2):
z2 = numpyro.sample("z2", dist.Categorical(p), obs=z2)
numpyro.sample("x2", dist.Normal(loc[z2], 1.0), obs=data[1])
first_available_dim = -3
vectorized_model = (
model if temperature == 0 else vectorize_model(model, num_particles, dim=-2)
)
sampled_model = infer_discrete(
vectorized_model, first_available_dim, temperature, rng_key=random.PRNGKey(1)
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {
(z1, z20, z21): handlers.trace(model).get_trace(
z1=np.array(z1), z2=np.array([z20, z21])
)
for z1 in [0, 1]
for z20 in [0, 1]
for z21 in [0, 1]
}
# Check joint posterior over (z1, z2[0], z2[1]).
actual_probs = np.zeros((2, 2, 2))
expected_probs = np.zeros((2, 2, 2))
for (z1, z20, z21), tr in conditioned_traces.items():
expected_probs[z1, z20, z21] = jnp.exp(log_prob_sum(tr))
actual_probs[z1, z20, z21] = (
(
(sampled_trace["z1"]["value"] == z1)
& (sampled_trace["z2"]["value"][..., :1] == z20)
& (sampled_trace["z2"]["value"][..., 1:] == z21)
)
.astype(float)
.mean()
)
if temperature:
expected_probs = expected_probs / expected_probs.sum()
else:
argmax = expected_probs.reshape(-1).argmax()
expected_max = expected_probs.reshape(-1)[argmax]
actual_max = np.exp(log_prob_sum(sampled_trace))
assert_allclose(expected_max, actual_max, atol=1e-5)
expected_probs[:] = 0
expected_probs.reshape(-1)[argmax] = 1
assert_allclose(expected_probs.reshape(-1), actual_probs.reshape(-1), atol=1e-2)
def model_zzxx():
# loc,scale
# / \
# +-------/-+ +--------\------+
# z1 --|--> x1 | | z2 ---> x2 |
# | 3 | | 2 |
# +---------+ +---------------+
data = [np.array([-1.0, -1.0, 0.0]), np.array([-1.0, 1.0])]
p = numpyro.param("p", np.array([0.25, 0.75]))
loc = numpyro.sample("loc", dist.Normal(0, 1).expand([2]).to_event(1))
# FIXME results in infinite loop in transformeddist_to_funsor.
# scale = numpyro.sample("scale", dist.LogNormal(0, 1))
scale = jnp.exp(numpyro.sample("scale", dist.Normal(0, 1)))
z1 = numpyro.sample("z1", dist.Categorical(p))
with numpyro.plate("data[0]", 3):
numpyro.sample("x1", dist.Normal(loc[z1], scale), obs=data[0])
with numpyro.plate("data[1]", 2):
z2 = numpyro.sample("z2", dist.Categorical(p))
numpyro.sample("x2", dist.Normal(loc[z2], scale), obs=data[1])
def model2():
data = [np.array([-1.0, -1.0, 0.0]), np.array([-1.0, 1.0])]
p = numpyro.param("p", np.array([0.25, 0.75]))
loc = numpyro.sample("loc", dist.Normal(0, 1).expand([2]).to_event(1))
# FIXME results in infinite loop in transformeddist_to_funsor.
# scale = numpyro.sample("scale", dist.LogNormal(0, 1))
z1 = numpyro.sample("z1", dist.Categorical(p))
scale = numpyro.sample("scale", dist.LogNormal(jnp.array([0.0, 1.0])[z1], 1))
with numpyro.plate("data[0]", 3):
numpyro.sample("x1", dist.Normal(loc[z1], scale), obs=data[0])
with numpyro.plate("data[1]", 2):
z2 = numpyro.sample("z2", dist.Categorical(p))
numpyro.sample("x2", dist.Normal(loc[z2], scale), obs=data[1])
@pytest.mark.parametrize("model", [model_zzxx, model2])
@pytest.mark.parametrize("temperature", [0, 1])
def test_mcmc_model_side_enumeration(model, temperature):
mcmc = infer.MCMC(infer.NUTS(config_enumerate(model)), num_warmup=0, num_samples=1)
mcmc.run(random.PRNGKey(0))
mcmc_data = {
k: v[0] for k, v in mcmc.get_samples().items() if k in ["loc", "scale"]
}
# MAP estimate discretes, conditioned on posterior sampled continous latents.
model = handlers.seed(model, rng_seed=1)
actual_trace = handlers.trace(
infer_discrete(
# TODO support replayed sites in infer_discrete.
# handlers.replay(config_enumerate(model), mcmc_trace),
handlers.condition(config_enumerate(model), mcmc_data),
temperature=temperature,
rng_key=random.PRNGKey(1),
)
).get_trace()
# Check site names and shapes.
expected_trace = handlers.trace(model).get_trace()
assert set(actual_trace) == set(expected_trace)
@pytest.mark.parametrize("temperature", [0, 1])
def test_distribution_masked(temperature):
# +-------+
# z --|--> x |
# +-------+
num_particles = 10000
data = np.array([1.0, 2.0, 3.0])
mask = np.array([True, False, False])
@config_enumerate
def model(z=None):
p = numpyro.param("p", np.array([0.75, 0.25]))
z = numpyro.sample("z", dist.Categorical(p), obs=z)
logger.info("z.shape = {}".format(z.shape))
with numpyro.plate("data", 3), handlers.mask(mask=mask):
numpyro.sample("x", dist.Normal(z, 1.0), obs=data)
first_available_dim = -3
vectorized_model = (
model if temperature == 0 else vectorize_model(model, num_particles, dim=-2)
)
sampled_model = infer_discrete(
vectorized_model, first_available_dim, temperature, rng_key=random.PRNGKey(1)
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {
z: handlers.trace(model).get_trace(z=np.array(z)) for z in [0.0, 1.0]
}
# Check posterior over z.
actual_z_mean = sampled_trace["z"]["value"].astype(float).mean()
if temperature:
expected_z_mean = 1 / (
1
+ jnp.exp(
log_prob_sum(conditioned_traces[0])
- log_prob_sum(conditioned_traces[1])
)
)
else:
expected_z_mean = (
log_prob_sum(conditioned_traces[1]) > log_prob_sum(conditioned_traces[0])
).astype(float)
assert_allclose(actual_z_mean, expected_z_mean, atol=1e-2)
| |
#!/usr/bin/env python
# Copyright 2009 Jerome Renard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import OptionParser
from xml.etree.ElementTree import ElementTree
def main():
parser = OptionParser()
parser.add_option("-f",
"--file",
action="store",
type="string",
dest="filename",
help="write report to FILE",
metavar="FILE")
(options, args) = parser.parse_args()
if not options.filename:
print("Please specify a filename for your module description")
exit(2)
else:
print("Using file {0}".format(options.filename))
tree = ElementTree()
tree.parse(options.filename)
apacheModule = ApacheModule(tree)
apacheModule.generateModuleSkeleton()
# try:
# tree.parse(options.filename)
# apacheModule = ApacheModule(tree)
# apacheModule.generateModuleSkeleton()
# except Exception:
# print("Error opening {0} not an XML file" . format(options.filename))
# exit(1)
# except ApacheModuleConfigurationInvalidScopeException as ex:
# print("Invalid scope : {0}" . format(ex.foundScope))
# exit(1)
# Module handling -------------
class ApacheModule:
def __init__(self, descriptionTree):
self.descriptionTree = descriptionTree
def getName(self):
apacheModuleName = ApacheModuleName(self.descriptionTree)
name = apacheModuleName.getName()
return name
def getConfiguration(self):
apacheModuleConfigurationDirective = ApacheModuleConfigurationDirective(self.descriptionTree)
directiveList = apacheModuleConfigurationDirective.getDirectiveList()
return directiveList
def getHooks(self):
apacheModuleHook = ApacheModuleHook(self.descriptionTree)
hookList = apacheModuleHook.getHookList()
return hookList
def generateModuleSkeleton(self):
name = self.getName()
configurationDirectiveList = self.getConfiguration()
hookList = self.getHooks()
apModuleDeclareDataCode = self.generateModuleDeclarationCode(name)
registerHooksCode = self.generateModuleRegisterHookCode(hookList)
commandTableCode = self.generateConfigurationDirectiveTable(configurationDirectiveList)
def generateModuleDeclarationCode(self, moduleName):
return("""\
module AP_MODULE_DECLARE_DATA %(moduleName)s = {
STANDARD20_MODULE_STUFF,
config_dir_create, /* create per-dir config structures */
config_dir_merge, /* merge per-dir config structures */
config_server_create, /* create per-server config structures */
config_server_merge, /* merge per-server config structures */
command_table, /* table of config file commands */
%(moduleName)s_register_hooks /* register hooks */
};
""" % locals())
def generateModuleRegisterHookCode(self, hookList):
linesOfCode = ['static void url_alias_register_hooks(apr_pool_t *p)', '{']
for hook in hookList:
linesOfCode.append(' ' + hook['type'] + '('
+ hook['name'] + ', '
+ hook['predecessor'] + ', '
+ hook['successor'] + ', '
+ hook['position'] + ');')
linesOfCode.append('}')
delimiter = "\n"
return(delimiter.join(linesOfCode))
def generateConfigurationDirectiveTable(self, configurationDirectiveList):
linesOfCode = ['static const command_rec command_table[] = ', '{']
for configurationDirective in configurationDirectiveList:
linesOfCode.append(' ' + configurationDirective['type'] + '('
+ '"' + configurationDirective['name'] + '", '
+ 'cmd_' + configurationDirective['name'].lower() + ', '
+ "NULL, "
+ configurationDirective['scope'] + ', '
+ '"' + configurationDirective['description'] + '"'
+ '),')
linesOfCode.append(' { NULL }')
linesOfCode.append('};')
delimiter = "\n"
return(delimiter.join(linesOfCode))
class ApacheModuleName:
def __init__(self, descriptionTree):
self.descriptionTree = descriptionTree
def getName(self):
return self.descriptionTree.find('/name').text
class ApacheModuleConfigurationDirective:
validScopeList = ['RSRC_CONF',
'ACCESS_CONF',
'OR_OPTIONS',
'OR_FILEINFO',
'OR_INDEXES',
'EXEC_ON_READ']
def __init__(self, descriptionTree):
self.descriptionTree = descriptionTree
def getDirectiveList(self):
configurationTag = self.descriptionTree.find('/configuration')
directiveList = configurationTag.getiterator('directive')
directives = []
for configurationDirective in directiveList:
directives.append(self.getDirective(configurationDirective))
return directives
def getDirective(self, configurationDirective):
name = configurationDirective.find('name').text
type = configurationDirective.find('type').text
description = configurationDirective.find('description').text
scope = "RSRC_CONF"
if configurationDirective.find('scope') != None:
scope = configurationDirective.find('scope').text
if scope not in ApacheModuleConfigurationDirective.validScopeList:
raise ApacheModuleConfigurationInvalidScopeException(scope)
valueList = configurationDirective.getiterator('value')
values = []
for value in valueList:
values.append(value.text)
return {'name' : name,
'type' : type,
'scope' : scope,
'values' : values,
'description':description}
class ApacheModuleHook:
def __init__(self, descriptionTree):
self.descriptionTree = descriptionTree
def getHookList(self):
hooksTag = self.descriptionTree.find('/hooks')
if hooksTag != None:
hookList = hooksTag.getiterator('hook')
hooks = []
for hook in hookList:
hooks.append(self.getHook(hook))
return hooks
else:
return []
def getHook(self, hook):
name = hook.find('name').text
type = hook.find('type').text
predecessor = "NULL"
successor = "NULL"
position = "MIDDLE"
if hook.find('predecessor') != None:
predecessor = hook.find('predecessor').text
if hook.find('successor') != None:
successor = hook.find('successor').text
if hook.find('position') != None:
position = hook.find('position').text
return {'name':name,
'type':type,
'predecessor':predecessor,
'successor':successor,
'position':position}
# Exceptions ------
class ApacheModuleConfigurationInvalidScopeException(Exception):
def __init__(self, foundScope):
Exception.__init__(self)
self.foundScope = foundScope
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify basic invocation of Microsoft Visual C/C++, including use
of a precompiled header with the $CCFLAGS variable.
"""
import sys
import time
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
if sys.platform != 'win32':
msg = "Skipping Visual C/C++ test on non-Windows platform '%s'\n" % sys.platform
test.skip_test(msg)
#####
# Test the basics
test.write('SConstruct',"""
import os
# TODO: this is order-dependent (putting 'mssdk' second or third breaks),
# and ideally we shouldn't need to specify the tools= list anyway.
env = Environment(tools=['mssdk', 'msvc', 'mslink'])
env.Append(CPPPATH=os.environ.get('INCLUDE', ''),
LIBPATH=os.environ.get('LIB', ''),
CCFLAGS='/DPCHDEF')
env['PDB'] = File('test.pdb')
env['PCHSTOP'] = 'StdAfx.h'
env['PCH'] = env.PCH('StdAfx.cpp')[0]
env.Program('test', ['test.cpp', env.RES('test.rc')], LIBS=['user32'])
env.Object('fast', 'foo.cpp')
env.Object('slow', 'foo.cpp', PCH=0)
""")
test.write('test.cpp', '''
#include "StdAfx.h"
#include "resource.h"
int main(void)
{
char test[1024];
LoadString(GetModuleHandle(NULL), IDS_TEST, test, sizeof(test));
printf("%d %s\\n", IDS_TEST, test);
return 0;
}
''')
test.write('test.rc', '''
#include "resource.h"
STRINGTABLE DISCARDABLE
BEGIN
IDS_TEST "test 1"
END
''')
test.write('resource.h', '''
#define IDS_TEST 2001
''')
test.write('foo.cpp', '''
#include "StdAfx.h"
''')
test.write('StdAfx.h', '''
#include <windows.h>
#include <stdio.h>
#include "resource.h"
''')
test.write('StdAfx.cpp', '''
#include "StdAfx.h"
#ifndef PCHDEF
this line generates an error if PCHDEF is not defined!
#endif
''')
# Visual Studio 8 has deprecated the /Yd option and prints warnings
# about it, so ignore stderr when running SCons.
test.run(arguments='test.exe', stderr=None)
test.must_exist(test.workpath('test.exe'))
test.must_exist(test.workpath('test.res'))
test.must_exist(test.workpath('test.pdb'))
test.must_exist(test.workpath('StdAfx.pch'))
test.must_exist(test.workpath('StdAfx.obj'))
test.run(program=test.workpath('test.exe'), stdout='2001 test 1\n')
test.write('resource.h', '''
#define IDS_TEST 2002
''')
test.run(arguments='test.exe', stderr=None)
test.run(program=test.workpath('test.exe'), stdout='2002 test 1\n')
test.write('test.rc', '''
#include "resource.h"
STRINGTABLE DISCARDABLE
BEGIN
IDS_TEST "test 2"
END
''')
test.run(arguments='test.exe', stderr=None)
test.run(program=test.workpath('test.exe'), stdout='2002 test 2\n')
test.run(arguments='-c .')
test.must_not_exist(test.workpath('test.exe'))
test.must_not_exist(test.workpath('test.pdb'))
test.must_not_exist(test.workpath('test.res'))
test.must_not_exist(test.workpath('StdAfx.pch'))
test.must_not_exist(test.workpath('StdAfx.obj'))
test.run(arguments='test.exe', stderr=None)
test.must_exist(test.workpath('test.pdb'))
test.must_exist(test.workpath('StdAfx.pch'))
test.must_exist(test.workpath('StdAfx.obj'))
test.run(arguments='-c test.pdb')
test.must_not_exist(test.workpath('test.exe'))
test.must_not_exist(test.workpath('test.obj'))
test.must_not_exist(test.workpath('test.pdb'))
test.must_not_exist(test.workpath('StdAfx.pch'))
test.must_not_exist(test.workpath('StdAfx.obj'))
test.run(arguments='StdAfx.pch', stderr=None)
test.must_not_exist(test.workpath('test.pdb'))
test.must_exist(test.workpath('StdAfx.pch'))
test.must_exist(test.workpath('StdAfx.obj'))
test.run(arguments='-c test.exe')
test.must_not_exist(test.workpath('test.exe'))
test.must_not_exist(test.workpath('test.obj'))
test.must_not_exist(test.workpath('test.pdb'))
test.must_not_exist(test.workpath('StdAfx.pch'))
test.must_not_exist(test.workpath('StdAfx.obj'))
test.run(arguments='test.obj', stderr=None)
test.must_not_exist(test.workpath('test.pdb'))
test.must_exist(test.workpath('test.obj'))
start = time.time()
test.run(arguments='fast.obj', stderr=None)
fast = time.time() - start
start = time.time()
test.run(arguments='slow.obj', stderr=None)
slow = time.time() - start
# using precompiled headers should be faster
limit = slow*0.90
if fast >= limit:
print "Using precompiled headers was not fast enough:"
print "slow.obj: %.3fs" % slow
print "fast.obj: %.3fs (expected less than %.3fs)" % (fast, limit)
test.fail_test()
# Modifying resource.h should cause both the resource and precompiled header to be rebuilt:
test.write('resource.h', '''
#define IDS_TEST 2003
''')
test.not_up_to_date(arguments='test.res', stderr=None)
test.not_up_to_date(arguments='StdAfx.pch', stderr=None)
test.not_up_to_date(arguments='test.exe', stderr=None)
test.run(program=test.workpath('test.exe'), stdout='2003 test 2\n')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
# The initial version of the directory_cache_t class was written
# by Matthias Baas (baas@ira.uka.de).
"""Directory cache implementation.
This module contains the implementation of a cache that uses individual
files stored in a dedicated cache directory to store the cached contents.
The cache class is L{directory_cache_t} which can be passed to the C{cache}
argument of the L{parse()} function.
"""
import os, os.path, gzip
try:
import hashlib as md5
except:
import md5
import cPickle
import declarations_cache
class index_entry_t:
"""Entry of the index table in the directory cache index.
Each cached header file (i.e. each *.cache file) has a corresponding
index_entry_t object. This object is used to determine whether the
cache file with the declarations is still valid or not.
This class is a helper class for the directory_cache_t class.
"""
def __init__( self, filesigs, configsig ):
"""Constructor.
filesigs is a list of tuples (fileid, sig)...
configsig is the signature of the configuration object.
"""
self.filesigs = filesigs
self.configsig = configsig
def __getstate__(self):
return (self.filesigs, self.configsig)
def __setstate__(self, state):
self.filesigs, self.configsig = state
class directory_cache_t ( declarations_cache.cache_base_t ):
"""Cache class that stores its data as multiple files inside a directory.
The cache stores one index file called "index.dat" which is always
read by the cache when the cache object is created. Each header file
will have its corresponding *.cache file that stores the declarations
found in the header file. The index file is used to determine whether
a *.cache file is still valid or not (by checking if one of the dependent
files (i.e. the header file itself and all included files) have been
modified since the last run).
"""
def __init__( self, dir="cache", compression=False, md5_sigs=True ):
"""Constructor.
dir is the cache directory (it is created if it does not exist).
If compression is set to True the cache files will be compressed
using gzip.
md5_sigs determines whether file modifications is checked by
computing a md5 digest or by checking the modification date.
"""
declarations_cache.cache_base_t.__init__(self)
# Cache directory
self.__dir = os.path.abspath(dir)
# Flag that determines whether the cache files will be compressed
self.__compression = compression
# Flag that determines whether the signature is a md5 digest or
# the modification time
# (this flag is passed to the filename_repository_t class)
self.__md5_sigs = md5_sigs
# Filename repository
self.__filename_rep = filename_repository_t(self.__md5_sigs)
# Index dictionary (Key is the value returned by _create_cache_key()
# (which is based on the header file name) and value is an
# index_entry_t object)
self.__index = {}
# Flag that indicates whether the index was modified
self.__modified_flag = False
# Check if dir refers to an existing file...
if os.path.isfile(self.__dir):
raise ValueError, "Cannot use %s as cache directory. There is already a file with that name."%self.__dir
# Load the cache or create the cache directory...
if os.path.isdir(self.__dir):
self._load()
else:
# Create the cache directory...
os.mkdir(self.__dir)
def flush(self):
"""Save the index table to disk."""
self._save()
# self.__filename_rep._dump()
def update(self, source_file, configuration, declarations, included_files):
"""Replace a cache entry by a new value.
@param source_file: Header file name.
@type source_file: str
@param configuration: Configuration object.
@type configuration: L{config_t}
@param declarations: Declarations contained in the header file.
@type declarations: picklable object
@param included_files: Dependent files
@type included_files: list of str
"""
# Normlize all paths...
source_file = os.path.normpath(source_file)
included_files = map(lambda p: os.path.normpath(p), included_files)
# Create the list of dependent files. This is the included_files list
# + the source file. Duplicate names are removed.
dependent_files = {}
for name in [source_file]+included_files:
dependent_files[name] = 1
dependent_files = dependent_files.keys()
key = self._create_cache_key(source_file)
# Remove an existing entry (if there is one)
# After calling this method, it is guaranteed that __index[key]
# does not exist anymore.
self._remove_entry(source_file, key)
# Create a new entry...
# Create the sigs of all dependent files...
filesigs = []
for filename in dependent_files:
id_,sig = self.__filename_rep.acquire_filename(filename)
filesigs.append((id_,sig))
configsig = self._create_config_signature(configuration)
entry = index_entry_t(filesigs, configsig)
self.__index[key] = entry
self.__modified_flag = True
# Write the declarations into the cache file...
cachefilename = self._create_cache_filename(source_file)
self._write_file(cachefilename, declarations)
def cached_value(self, source_file, configuration):
"""Return the cached declarations or None.
@param source_file: Header file name
@type source_file: str
@param configuration: Configuration object
@type configuration: L{config_t}
@return: Cached declarations or None
"""
# Check if the cache contains an entry for source_file
key = self._create_cache_key(source_file)
entry = self.__index.get(key)
if entry==None:
# print "CACHE: %s: Not cached"%source_file
return None
# Check if the entry is still valid. It is not valid if:
# - the source_file has been updated
# - the configuration object has changed (i.e. the header is parsed
# by gccxml with different settings which may influence the
# declarations)
# - the included files have been updated
# (this list is part of the cache entry as it cannot be known
# by the caller when cached_value() is called. It was instead
# passed to update())
# Check if the config is different...
configsig = self._create_config_signature(configuration)
if configsig!=entry.configsig:
# print "CACHE: %s: Config mismatch"%source_file
return None
# Check if any of the dependent files has been modified...
for id_, sig in entry.filesigs:
if self.__filename_rep.is_file_modified(id_, sig):
# print "CACHE: %s: Entry not up to date"%source_file
return None
# Load and return the cached declarations
cachefilename = self._create_cache_filename(source_file)
decls = self._read_file(cachefilename)
# print "CACHE: Using cached decls for",source_file
return decls
def _load(self):
"""Load the cache.
Loads the file index.dat which contains the index table and
the file name repository.
This method is called by the constructor.
"""
indexfilename = os.path.join(self.__dir, "index.dat")
if os.path.exists(indexfilename):
data = self._read_file(indexfilename)
self.__index = data[0]
self.__filename_rep = data[1]
if self.__filename_rep._md5_sigs!=self.__md5_sigs:
print "CACHE: Warning: md5_sigs stored in the cache is set to %s."%self.__filename_rep._md5_sigs
print " Please remove the cache to change this setting."
self.__md5_sigs = self.__filename_rep._md5_sigs
else:
self.__index = {}
self.__filename_rep = filename_repository_t(self.__md5_sigs)
self.__modified_flag = False
def _save(self):
"""Save the cache index if it was modified.
Saves the index table and the file name repository in the file
index.dat.
"""
if self.__modified_flag:
self.__filename_rep.update_id_counter()
indexfilename = os.path.join(self.__dir, "index.dat")
self._write_file(indexfilename, (self.__index,self.__filename_rep))
self.__modified_flag = False
def _read_file(self, filename):
"""Read a Python object from a cache file.
Reads a pickled object from disk and returns it.
@param filename: Name of the file that should be read.
@type filename: str
@returns: Unpickled file contents
"""
if self.__compression:
f = gzip.GzipFile(filename, "rb")
else:
f = file(filename, "rb")
res = cPickle.load(f)
f.close()
return res
def _write_file(self, filename, data):
"""Write a data item into a file.
The data object is written to a file using the pickle mechanism.
@param filename: Output file name
@type filename: str
@param data: A Python object that will be pickled
@type data: picklable object
"""
if self.__compression:
f = gzip.GzipFile(filename, "wb")
else:
f = file(filename, "wb")
cPickle.dump(data, f, cPickle.HIGHEST_PROTOCOL)
f.close()
def _remove_entry(self, source_file, key):
"""Remove an entry from the cache.
source_file is the name of the header and key is its corresponding
cache key (obtained by a call to L{_create_cache_key()}).
The entry is removed from the index table, any referenced file
name is released and the cache file is deleted.
If key references a non-existing entry, the method returns
immediately.
@param source_file: Header file name
@type source_file: str
@param key: Key value for the specified header file
@type key: hashable object
"""
entry = self.__index.get(key)
if entry==None:
return
# Release the referenced files...
for id_, sig in entry.filesigs:
self.__filename_rep.release_filename(id_)
# Remove the cache entry...
del self.__index[key]
self.__modified_flag = True
# Delete the corresponding cache file...
cachefilename = self._create_cache_filename(source_file)
try:
os.remove(cachefilename)
except OSError, e:
print "Could not remove cache file (%s)"%e
def _create_cache_key(self, source_file):
"""Return the cache key for a header file.
@param source_file: Header file name
@type source_file: str
@returns: Key for the given header file
@rtype: str
"""
path, name = os.path.split(source_file)
return name+str(hash(path))
def _create_cache_filename(self, source_file):
"""Return the cache file name for a header file.
@param source_file: Header file name
@type source_file: str
@returns: Cache file name (*.cache)
@rtype: str
"""
res = self._create_cache_key(source_file)+".cache"
return os.path.join(self.__dir, res)
def _create_config_signature(self, config):
"""Return the signature for a config object.
The signature is computed as md5 digest of the contents of
working_directory, include_paths, define_symbols and
undefine_symbols.
@param config: Configuration object
@type config: L{config_t}
@returns: Signature
@rtype: str
"""
m = md5.new()
m.update(config.working_directory)
map(lambda p: m.update(p), config.include_paths)
map(lambda p: m.update(p), config.define_symbols)
map(lambda p: m.update(p), config.undefine_symbols)
map(lambda p: m.update(p), config.cflags)
return m.digest()
class filename_entry_t:
"""This is a record stored in the filename_repository_t class.
The class is an internal class used in the implementation of the
filename_repository_t class and it just serves as a container for
the file name and the reference count.
"""
def __init__( self, filename ):
"""Constructor.
The reference count is initially set to 0.
"""
# Filename
self.filename = filename
# Reference count
self.refcount = 0
# Cached signature value for the file.
# If sig_valid flag is False, the signature still has to be computed,
# otherwise the cached value can be used.
# These attributes must not be pickled!
self.sig_valid = False
self.signature = None
def __getstate__(self):
# Only pickle filename and refcount
return (self.filename, self.refcount)
def __setstate__(self, state):
self.filename, self.refcount = state
self.sig_valid = False
self.signature = None
def inc_ref_count(self):
"""Increase the reference count by 1."""
self.refcount += 1
def dec_ref_count(self):
"""Decrease the reference count by 1 and return the new count."""
self.refcount -= 1
return self.refcount
class filename_repository_t:
"""File name repository.
This class stores file names and can check whether a file has been
modified or not since a previous call.
A file name is stored by calling acquire_filename() which returns
an ID and a signature of the file. The signature can later be used
to check if the file was modified by calling is_file_modified().
If the file name is no longer required release_filename() should be
called so that the entry can be removed from the repository.
"""
def __init__( self, md5_sigs ):
"""Constructor.
"""
# Flag that determines whether the signature is a md5 digest or
# the modification time
# (this flag is passed to the filename_repository_t class)
self._md5_sigs = md5_sigs
# ID lookup table (key: filename / value: id_)
self.__id_lut = {}
# Entry dictionary (key: id_ / value: filename_entry_t)
# This dictionary contains the actual data.
# It must always hold that each entry in __entries has a corresponding
# entry in __id_lut (i.e. the keys in __id_lut must be the names
# stored in __entries)
self.__entries = {}
# A counter for new ids
self.__next_id = 1
def acquire_filename(self, name):
"""Acquire a file name and return its id and its signature.
"""
id_ = self.__id_lut.get(name)
# Is this a new entry?
if id_==None:
# then create one...
id_ = self.__next_id
self.__next_id += 1
self.__id_lut[name] = id_
entry = filename_entry_t(name)
self.__entries[id_] = entry
else:
# otherwise obtain the entry...
entry = self.__entries[id_]
entry.inc_ref_count()
return id_, self._get_signature(entry)
def release_filename(self, id_):
"""Release a file name.
"""
entry = self.__entries.get(id_)
if entry==None:
raise ValueError, "Invalid filename id (%d)"%id_
# Decrease reference count and check if the entry has to be removed...
if entry.dec_ref_count()==0:
del self.__entries[id_]
del self.__id_lut[entry.filename]
def is_file_modified(self, id_, signature):
"""Check if the file referred to by id_ has been modified.
"""
entry = self.__entries.get(id_)
if entry==None:
raise ValueError, "Invalid filename id_ (%d)"%id_
# Is the signature already known?
if entry.sig_valid:
# use the cached signature
filesig = entry.signature
else:
# compute the signature and store it
filesig = self._get_signature(entry)
entry.signature = filesig
entry.sig_valid = True
return filesig!=signature
def update_id_counter(self):
"""Update the id_ counter so that it doesn't grow forever.
"""
if len(self.__entries)==0:
self.__next_id = 1
else:
self.__next_id = max(self.__entries.keys())+1
def _get_signature(self, entry):
"""Return the signature of the file stored in entry.
"""
if self._md5_sigs:
# return md5 digest of the file content...
if not os.path.exists(entry.filename):
return None
try:
f = file(entry.filename)
except IOError, e:
print "Cannot determine md5 digest:",e
return None
data = f.read()
f.close()
return md5.new(data).digest()
else:
# return file modification date...
try:
return os.path.getmtime(entry.filename)
except OSError, e:
return None
def _dump(self):
"""Dump contents for debugging/testing.
"""
print 70*"-"
print "ID lookup table:"
for name in self.__id_lut:
id_ = self.__id_lut[name]
print " %s -> %d"%(name, id_)
print 70*"-"
print "%-4s %-60s %s"%("ID", "Filename", "Refcount")
print 70*"-"
for id_ in self.__entries:
entry = self.__entries[id_]
print "%04d %-60s %d"%(id_, entry.filename, entry.refcount)
| |
from __future__ import unicode_literals
import logging
import os.path
import sys
from os import listdir
from mopidy_bigbeet import Extension
from mopidy_bigbeet.schema import beet_schema, genre_schema
from peewee import *
from playhouse.apsw_ext import APSWDatabase, DateTimeField
user_version = 1
# database = SqliteDatabase(None, pragmas=(
# ('journal_mode', 'WAL'),
# ('user_version', user_version)
# ))
# database = MySQLDatabase('bigbeet', user='rails_user', passwd='tequila'
# charset='utf8mb4')
logger = logging.getLogger(__name__)
bdb = None
gdb = None
data_dir = None
database = APSWDatabase(None,
pragmas=(
('foreign_keys', 'ON'),
('temp_store', 2),
('journal_mode', 'WAL'),
('user_version', user_version)
))
unwanted = [u'_',u'1',u'2',u'3',u'4',u'5',u'6',u'7',u'8',u'9',u'0',u' ',u'!',u'"',u'.',u'<']
def _initialize(config):
global bdb
global gdb
global data_dir
data_dir = config['bigbeet']['bb_data_dir'] #|| Extension.get_data_dir(config)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
bdb = beet_schema.BeetsLibrary(config['bigbeet']['beetslibrary']).lib
gdb = genre_schema.GenreTree(data_dir)
db_path = os.path.join(data_dir, b'bb_library.db')
_connect_db(db_path)
def setup_db():
#import pdb; pdb.set_trace()
try:
database.drop_tables(
[Genre, AlbumGroup, Album, ArtistSecondaryGenre, Artist, Label, SecondaryGenre, SchemaMigration, Track, UserTag, TrackTag])
except:
pass
for modell in [Genre, AlbumGroup, Album, Artist, Label, SchemaMigration, Track, UserTag, TrackTag]:
modell.create_table()
SchemaMigration.create(version = '20180818' )
def _connect_db(db_path):
global database
db_existed = os.path.isfile(db_path)
database.init(db_path)
if not db_existed:
setup_db()
try:
database.connect()
except:
pass
#_migrate_db()
def _migrate_db():
migrations = listdir(os.path.join(
os.path.dirname(__file__), '..', 'db', 'migrations'))
migrations = set((m.split('.')[0] for m in migrations if m.startswith(u'migration')))
versions = [v.version for v in SchemaMigration.select()]
for migration in migrations:
if not migration.split('_')[1] in versions:
modul_name = 'mopidy_bigbeet.db.migrations.' + migration
mig_object = __import__(modul_name,
globals(),
locals(),
[migration],
-1)
mig = mig_object.Migration(database=database)
# import pdb; pdb.set_trace()
mig.migrate_db()
mig.update_db()
def check_genres(config):
_initialize(config)
albums = bdb.albums()
for album in albums:
gdb.find_missing(album['genre'])
for item in bdb.items():
gdb.find_missing(album['genre'])
with open(os.path.join(data_dir, 'genres-missing.txt'), 'w') as outfile:
for mg in set(gdb.genres_missing):
print mg
outfile.write(mg + '\n')
print set(gdb.genres_missing)
def _sync_beets_item(track, item):
track.name = item.title
track.path = item.path
track.acoustid_fingerprint = item.acoustid_fingerprint
track.acoustid = item.acoustid_id
track.added = item.added
if item.singleton:
track.album = None
else:
bdb_album = item.get_album()
if bdb_album:
track.album = Album.get(beets_id=item.get_album().id)
track.artist = item.artist
track.asin = item.asin
track.bitdepth = item.bitdepth
track.bitrate = item.bitrate
track.beets_id = item.id
track.bpm = item.bpm
track.channels = item.channels
track.comments = item.comments
track.composer = item.composer
track.country = item.country
track.day = item.day
track.disc = item.disc
track.encoder = item.encoder
track.format = item.format
track.genre = item.genre
track.grouping = item.grouping
track.language = item.language
track.length = item.length
track.mb_releasegroupid = item.mb_releasegroupid
track.mb_trackid = item.mb_trackid
track.media = item.media
track.month = item.month
track.mtime = item.mtime
track.original_day = item.original_day
track.original_month = item.original_month
track.original_year = item.original_year
track.samplerate = item.samplerate
track.track = item.track
track.year = item.year
_sync_usertags(item, track)
track.save()
def _sync_usertags(item, track):
bb_usertags = [i.tag.name for i in track.tracktag_set]
if hasattr(item, 'usertags'):
usertags = item.usertags.split('|')
else:
usertags = []
missing_usertags = [i for i in usertags if i not in bb_usertags]
delete_usertags = [i for i in bb_usertags if i not in usertags]
# import pdb; pdb.set_trace()
if set(bb_usertags) == set(usertags):
# Nothing to sync
return
elif missing_usertags:
for tag_str in missing_usertags:
tag, created = UserTag.get_or_create(name=tag_str)
TrackTag.get_or_create(track=track, tag=tag)
elif delete_usertags:
for tag_str in delete_usertags:
usertag = UserTag.select().where(UserTag.name == tag_str)[0]
usertag.delete_instance(recursive=True)
def _sync_beets_album(album, bdb_album):
genre_name = bdb_album.genre or '_Unknown'
genre = _set_genre(genre_name)
try:
artist, created = Artist.get_or_create(
name=(bdb_album.albumartist or '_Unknown'),
mb_albumartistid=bdb_album.mb_albumartistid)
except:
# import pdb; pdb.set_trace()
if bdb_album.mb_albumartistid:
artist, created = Artist.get_or_create(mb_albumartistid=bdb_album.mb_albumartistid)
else:
artist, created = Artist.get_or_create(name=(bdb_album.albumartist or '_Unknown'))
artist.country = bdb_album.country
artist.albumartist_sort = bdb_album.albumartist_sort
artist.albumartist_credit = bdb_album.albumartist_credit
artist.albumartist_initial = _get_artist_initial(artist)
artist.genre = genre
artist.save()
label, created = Label.get_or_create(name = (bdb_album.label or '_Unknown'))
album_group, created = AlbumGroup.get_or_create(
name = (bdb_album.albumtype or '_Unknown'))
album.name = bdb_album.album
album.mb_albumid = bdb_album.mb_albumid or None
album.label = label
album.artist = artist
album.album_group = album_group
album.albumstatus = bdb_album.albumstatus
album.beets_id = bdb_album.id
album.catalognum = bdb_album.catalognum
album.comp = bdb_album.comp
album.day = bdb_album.day
album.disctotal = bdb_album.disctotal
album.genre = genre
album.language = bdb_album.language
album.mb_releasegroupid = bdb_album.mb_releasegroupid
album.month = bdb_album.month
album.original_day = bdb_album.original_day
album.original_month = bdb_album.original_month
album.original_year = bdb_album.original_year
album.tracktotal = len(bdb_album.items())
album.year = bdb_album.year
try:
album.art_url = bdb_album.art_url
except:
logger.debug(u'Album has no art_url field yet: %s', album.name)
try:
album.save()
except:
import pdb;
pdb.set_trace()
def _get_artist_initial(artist):
if artist.albumartist_sort:
return [i for i in artist.albumartist_sort if i not in unwanted][0].upper()
elif artist.name:
return [i for i in artist.name if i not in unwanted][0].upper()
else:
return u'-'
def _set_genre(genre_name):
genres = gdb.find_parents(genre_name)
parent_id = None
while genres:
genre_name = genres.pop()
genre, created = Genre.get_or_create(name=genre_name,
parent=parent_id)
parent_id = genre.id
return genre
def item_update(config,item_id):
_initialize(config)
item = bdb.get_item(item_id)
if item:
track, created = Track.get_or_create(beets_id=item_id)
_sync_beets_item(track, item)
logger.info(u'Track synced')
else:
tracks = Track.select().where(Track.beets_id == item_id)
for track in tracks:
logger.info(u'Track deleted: %s in %s', track.name, str(track.path))
track.delete_instance()
def album_update(config,album_id):
_initialize(config)
bdb_album = bdb.get_album(album_id)
if bdb_album:
# import pdb; pdb.set_trace()
album, created = Album.get_or_create(beets_id=bdb_album.id)
_sync_beets_album(album, bdb_album)
logger.info(u'Album synced: %s', album.name)
else:
albums = Album.select().where(Album.beets_id == album_id)
for album in albums:
artist = album.artist
label = album.label
album_group = album.album_group
for track in album.track_set:
track.delete_instance()
album.delete_instance()
logger.info(u'Album deleted: %s', album.name)
if artist:
genre = artist.genre
if artist and not artist.albums:
artist.delete_instance()
if label and not label.albums:
label.delete_instance()
if album_group and not album_group.albums:
album_group.delete_instance()
if not genre.artists and not Genre.select().where(Genre.parent == genre.id):
genre.delete_instance()
def _delete_orphans():
albums = Album.select()
for album in albums:
if not album.track_set:
album.delete_instance()
artists = Artist.select()
for artist in artists:
if not artist.albums:
artist.delete_instance()
genres = Genre.select()
for genre in genres:
if not genre.artists:
genre.delete()
labels = Label.select()
for label in labels:
if not label.albums:
label.delete_instance()
album_groups = AlbumGroup.select()
for album_group in album_groups:
if not album_group.albums:
album_group.delete_instance()
def update(config):
_initialize(config)
# import pdb; pdb.set_trace()
_delete_orphans()
for item in bdb.items(u'singleton:true'):
logger.info("update: %s", item.path)
track, created = Track.get_or_create(beets_id=item.id)
_sync_beets_item(track, item)
def _fix_mtime(config):
_initialize(config)
items = bdb.items()
with open(os.path.join(data_dir, 'files-missing.txt'), 'w') as outfile:
for item in items:
if os.path.isfile(item.path):
item.mtime = item.current_mtime()
item.store()
else:
print(u"missing %s", item.path)
# import pdb; pdb.set_trace()
item.remove(False,True)
tracks = Track.select().where(Track.path == item.path)
for track in tracks:
album = track.album
artist = album.artist
genre = artist.genre
track.delete_instance()
if not album.track_set:
album.delete_instance()
if not artist.albums:
artist.delete_instance()
if not genre.artists:
genre.delete_instance()
outfile.write(item.path + '\n')
def scan(config):
_initialize(config)
# import pdb; pdb.set_trace()
from beets import dbcore
id_sort = dbcore.query.FixedFieldSort(u"id", True)
for bdb_album in bdb.albums(sort = id_sort):
try:
print("%s - %s" % (bdb_album.id, bdb_album.album.encode('utf-8')))
except:
pass
#import pdb; pdb.set_trace()
album, created = Album.get_or_create(beets_id=bdb_album.id)
_sync_beets_album(album, bdb_album)
for item in bdb_album.items():
track, created = Track.get_or_create(beets_id=item.id)
_sync_beets_item(track, item)
for item in bdb.items(u'singleton:true'):
track, created = Track.get_or_create(beets_id=item.id)
_sync_beets_item(track, item)
_delete_orphans()
def _find_children(genre, children):
logger.info("called with {0}".format(genre.name))
childs = [c for c in Genre.select().where(Genre.parent == genre.id)]
children += childs
for child in childs:
_find_children(child, children)
return children
class BaseModel(Model):
created_at = DateTimeField(null=True)
updated_at = DateTimeField(null=True)
class Meta:
database = database
class AlbumGroup(BaseModel):
name = CharField(null=True) # varchar
class Meta:
db_table = 'album_groups'
class Genre(BaseModel):
name = CharField(null=True, unique=True) # varchar
parent = IntegerField(null=True)
class Meta:
db_table = 'genres'
class Label(BaseModel):
name = CharField(null=True) # varchar
class Meta:
db_table = 'labels'
class Artist(BaseModel):
albumartist_credit = TextField(null=True)
albumartist_sort = CharField(null=True) # varchar
country = CharField(null=True) # varchar
genre = ForeignKeyField(Genre, related_name='artists', db_column='genre_id', null=True)
mb_albumartistid = CharField(null=True, unique=True) # varchar
name = CharField(null=True, unique=True) # varchar
albumartist_initial = CharField(null=True) # varchar
class Meta:
db_table = 'artists'
class Album(BaseModel):
added = FloatField(null=True) # float
album_group = ForeignKeyField(AlbumGroup, related_name='albums', db_column='album_group_id', null=True)
albumstatus = CharField(null=True) # varchar
artist = ForeignKeyField(Artist, related_name='albums', db_column='artist_id', null=True)
beets_id = IntegerField(null=True, unique=True)
catalognum = CharField(null=True) # varchar
art_url = CharField(null=True) # varchar
comp = IntegerField(null=True)
day = IntegerField(null=True)
disctotal = IntegerField(null=True)
genre = ForeignKeyField(Genre, related_name='albums', db_column='genre_id', null=True)
label = ForeignKeyField(Label, related_name='albums', db_column='label_id', null=True)
language = CharField(null=True) # varchar
mb_albumid = CharField(null=True, unique=True) # varchar
# mb_albumartistid
mb_releasegroupid = CharField(null=True) # varchar
month = IntegerField(null=True)
name = CharField(null=True) # varchar
tracktotal = IntegerField(null=True)
original_day = IntegerField(null=True)
original_month = IntegerField(null=True)
original_year = IntegerField(null=True)
year = IntegerField(null=True)
class Meta:
db_table = 'albums'
# class SecondaryGenre(BaseModel):
# name = CharField(null=True) # varchar
# class Meta:
# db_table = 'secondary_genres'
# class ArtistSecondaryGenre(BaseModel):
# artist = ForeignKeyField(Artist, db_column='artist_id', null=True)
# position = IntegerField(null=True)
# secondary_genre = ForeignKeyField(SecondaryGenre, db_column='secondary_genre_id', null=True)
# class Meta:
# db_table = 'artist_secondary_genres'
class SchemaMigration(BaseModel):
version = CharField(primary_key=True) # varchar
class Meta:
db_table = 'schema_migrations'
class UserTag(BaseModel):
name = CharField(unique=True) # varchar
class Meta:
db_table = 'user_tags'
class Track(BaseModel):
acoustid_fingerprint = CharField(null=True) # varchar
acoustid = CharField(db_column='acoustid_id', null=True) # varchar
added = FloatField(null=True) # float
album = ForeignKeyField(Album, db_column='album_id', null=True)
artist = CharField(null=True) # varchar
asin = CharField(null=True) # varchar
bitdepth = IntegerField(null=True)
bitrate = IntegerField(null=True)
beets_id = IntegerField(null=True)
bpm = IntegerField(null=True)
channels = IntegerField(null=True)
comments = CharField(null=True) # varchar
composer = CharField(null=True) # varchar
country = CharField(null=True) # varchar
day = IntegerField(null=True)
disc = IntegerField(null=True)
encoder = CharField(null=True) # varchar
format = CharField(null=True) # varchar
genre = CharField(null=True) # varchar
grouping = CharField(null=True) # varchar
language = CharField(null=True) # varchar
length = FloatField(null=True) # float
# mb_artistid = CharField(null=True)
# mb_albumartistid = CharField(null=True)
mb_releasegroupid = CharField(null=True) # varchar
mb_trackid = CharField(null=True) # varchar
media = CharField(null=True) # varchar
month = IntegerField(null=True)
mtime = FloatField(null=True) # float
name = CharField(null=True) # varchar
original_day = IntegerField(null=True)
original_month = IntegerField(null=True)
original_year = IntegerField(null=True)
path = BlobField(null=True)
samplerate = IntegerField(null=True)
track = IntegerField(null=True)
year = IntegerField(null=True)
class Meta:
db_table = 'tracks'
class TrackTag(BaseModel):
track = ForeignKeyField(Track, db_column='track_id', null=False)
tag = ForeignKeyField(UserTag, db_column='user_tag_id', null=False)
class Meta:
indexes = (
(('tag', 'track'), True),
)
db_table = 'track_tags'
| |
"""
This file contains models for the Quiz module of the e-learning platform
It has models defined for the Quiz, QuestionModule, Question and Submissions
Also defines the model for saving the history for the above
"""
import string
from courseware.models import Course, CourseHistory, LearningElement
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import pre_save, post_save, pre_delete
from django.dispatch.dispatcher import receiver
from util.models import HitCounter, TimeKeeper
from util.methods import receiver_subclasses
from model_utils.managers import InheritanceManager
class Quiz(models.Model):
"""
This class encapsulates the model of a quiz
"""
title = models.TextField() # title of the quiz
# Number of question modules in Quiz, Auto
question_modules = models.IntegerField(default=0)
# Number of questions in Quiz, Auto
questions = models.IntegerField(default=0)
marks = models.FloatField(default=0.0) # max marks for this quiz
# Future Extension: playlist of question_modules for easy editing
class QuestionModule(models.Model):
"""
Each quiz is composed of several QuestionModules where each QuestionModule
can have 1 or more questions
"""
#course = models.ForeignKey(
# Course,
# related_name="QuestionModule_Course",
# db_index=True
#)
quiz = models.ForeignKey(Quiz, related_name='QuestionModule_Quiz')
title = models.TextField()
# Future Extension: playlist of questions for easy editing
# Use a question_module_id to order question modules
class QuizHistory(models.Model):
"""
This class captures the quiz history of a user
"""
quiz = models.ForeignKey(Quiz, related_name='QuizHistory_Quiz')
current_question_module = models.ForeignKey(
QuestionModule, related_name='QuizHistory_QuestionModule', null=True)
marks = models.FloatField(default=0.0)
solved = models.IntegerField(default=0) # Number of questions solved
is_graded = models.BooleanField(default=False)
class Question(HitCounter):
"""
This is the basic gradable unit - a question. It can be of multiple types
"""
MANUAL_GRADING = 'M'
DIRECT_GRADING = 'D'
GRADER_TYPES = (
(MANUAL_GRADING, 'Manual Grading'),
(DIRECT_GRADING, 'Direct Grading')
)
SINGLE_CHOICE_QUESTION = 'S'
MULTIPLE_CHOICE_QUESTION = 'M'
FIXED_ANSWER_QUESTION = 'F'
DESCRIPTIVE_ANSWER_QUESTION = 'D'
PROGRAMMING_QUESTION = 'P'
QUESTION_TYPES = (
(SINGLE_CHOICE_QUESTION, 'Single Choice Correct'),
(MULTIPLE_CHOICE_QUESTION, 'Multiple Choice Correct'),
(FIXED_ANSWER_QUESTION, 'Fixed Answer'),
(DESCRIPTIVE_ANSWER_QUESTION, 'Descriptive Answer'),
(PROGRAMMING_QUESTION, 'Programming Question')
)
#course = models.ForeignKey(
# Course,
# related_name="Question_Course",
# db_index=True
#)
quiz = models.ForeignKey(Quiz, related_name="Question_Quiz")
question_module = models.ForeignKey(
QuestionModule,
related_name='Question_QuestionModule',
db_index=True) # index this field
description = models.TextField()
# hint_available = models.BooleanField(default=False)
hint = models.TextField(
help_text="Hint you want to give if any",
blank=True,
null=True)
grader_type = models.CharField(
max_length=1,
help_text="Which grader to use for grading this question",
choices=GRADER_TYPES,
default='D')
#order = models.IntegerField()
answer_description = models.TextField(
help_text="Description of the answer",
blank=True)
marks = models.FloatField(default=0)
gradable = models.BooleanField(default=True)
granularity = models.TextField(
help_text="Enter a string with marks separated by commas. \
Last entry will be repeated until infinity attempts")
# granularity after the hint is given
granularity_hint = models.TextField(
help_text="Enter a string with marks separated by commas. \
Last entry will be repeated until infinity attempts",
blank=True,
null=True)
type = models.CharField( # so that this can be accessed directly
max_length=1,
choices=QUESTION_TYPES,
help_text="Type of question",
)
attempts = models.IntegerField(default=1)
def is_hint_available(self):
""" Whether a hint is available """
if self.hint is not None:
return True
else:
return False
def get_data(self):
""" Get the data of the model as a dict """
return {
'id': self.pk,
'quiz': self.quiz,
'question_module': self.question_module,
'description': self.description,
'hint': self.hint,
'grader_type': self.grader_type,
'type': self.type,
'gradable': self.gradable,
'granularity': self.granularity,
'granularity_hint': self.granularity_hint,
'marks': self.marks,
'attempts': self.attempts,
'answer_description': self.answer_description
}
def get_default_granularity(self, hint=False):
"""
When courseware module is completed, uncomment the below code to
define the default granularity for a course
course_info = self.course.course_info
if hint:
if (course_info.granularity_hint is not None and
course_info.granularity_hint.strip() != ''):
return course_info.granularity_hint
else:
if (course_info.granularity is not None and
course_info.granularity.strip() != ''):
return course_info.granularity
"""
# NOTE on implementation of default granularity:
# It is very naive implementation since I couldn't get the serializer
# to accept Blank value for granularity. So UI send "undefined" to backend
# and backend takes this as a keyword and assignes default value
# In future we may need granularity in a different format since the current
# one seems to be inefficient for larger number of attempts
# Need to comment out lines from courseware models to add granularity
# to course info.
granularity = ((str(self.marks) + ',') * self.attempts) + "0"
return granularity
def save(self, *args, **kwargs):
""" Process some fields before save """
if self.hint is not None and self.hint.strip() == '':
self.hint = None
if (self.granularity is None or
self.granularity.strip() == '' or
self.granularity == 'undefined'):
self.granularity = self.get_default_granularity()
if (self.granularity_hint is None or
self.granularity_hint.strip() == '' or
self.granularity_hint == 'undefined'):
self.granularity_hint = self.get_default_granularity(hint=True)
if self.answer_description is None:
self.answer_description = ''
super(Question, self).save(*args, **kwargs)
objects = InheritanceManager()
class Meta:
"""
This is not an abstract class
"""
abstract = False
class DescriptiveQuestion(Question):
"""
A question with a descriptive answer
"""
answer = models.TextField()
class SingleChoiceQuestion(Question):
"""
A question which has only 1 of the possible choices as the correct answer
"""
options = models.TextField(
max_length=256,
help_text='Enter choices seperated by comma: e.g.: choice_1, choice 2')
answer = models.IntegerField(
max_length=2,
help_text="Answer will be the index of the choice above")
class MultipleChoiceQuestion(Question):
"""
A question which may have 1 or more correct answers from
the possible choices
"""
options = models.CharField(
max_length=256,
help_text='Enter choices seperated by comma (no comma at end): \
e.g.: choice_1, choice 2, choice 2')
answer = models.CharField(
max_length=64,
help_text='Answer will be in the form or Y,N,Y,Y etc')
class FixedAnswerQuestion(Question):
"""
A question which has a fixed answer to be input in a text field
"""
answer = models.CharField(max_length=128)
def get_answer(self, showToUser=False):
"""
Return the answer to this question.
Ideally we want that we should set the answer_shown in question_history
whenever this is called, but that is expensive. So, wherever we
call get_answer, set answer_shown = True and save.
"""
if showToUser:
answer = self.answer
if len(self.answer.split(',')) > 1:
answer = string.replace(answer, ',', ', ')
return answer
else:
return self.answer.split(',')
def get_answer_data(self):
"""
Return answer data packaged up
"""
data = {
'answer': self.get_answer()
}
return data
class ProgrammingQuestion(Question):
"""
A question which requires the submission of a file to be graded
according to the command given with it
"""
num_testcases = models.IntegerField()
command = models.TextField() # command to compile and run the submission
# string of file extensions separated by comma
acceptable_languages = models.TextField()
class Testcase(models.Model):
"""
A testcase is one of the many inputs against which a ProgrammingQuestion is
to be evaluated
"""
question = models.ForeignKey(
ProgrammingQuestion,
related_name='Testcase_ProgrammingQuestion')
input_text = models.TextField()
correct_output = models.TextField()
class QuestionHistory(models.Model):
"""
This class captures the history of a question associated with each student
"""
question = models.ForeignKey(
Question,
related_name='QuestionHistory_Question')
student = models.ForeignKey(User, related_name='QuestionHistory_User')
attempts = models.IntegerField(default=0)
marks = models.FloatField(default=0.0)
NOT_ATTEMPTED = 'N'
ATTEMPTED_ONCE = 'O'
AWAITING_RESULTS = 'A'
SOLVED = 'S'
status_codes = (
(NOT_ATTEMPTED, 'Not Attempted'),
(ATTEMPTED_ONCE, 'Attempted atleast once'),
(AWAITING_RESULTS, 'Awaiting Results'),
(SOLVED, 'Solved')
)
status = models.CharField(max_length=1, choices=status_codes, default='N')
hint_taken = models.BooleanField(default=False)
answer_shown = models.BooleanField(default=False)
class Meta:
"""
question and student combined should be unique
"""
unique_together = ("question", "student")
class Queue(TimeKeeper):
"""
This is a utility class to store objects that we need to perform actions
on asynchronously - email, notification, grading of programming question
"""
object_id = models.TextField() # id of notification or email or submission
is_processed = models.BooleanField(default=False)
EMAIL = 'E'
NOTIFICATION = 'N'
SUBMISSION = 'S'
object_types = (
(EMAIL, 'Email'),
(NOTIFICATION, 'Notification'),
(SUBMISSION, 'Submission')
)
object_type = models.CharField(
max_length=1,
choices=object_types,
default='E')
info = models.TextField() # extra information
class Submission(TimeKeeper):
"""
A submission is added when a student answers the question. Depending on the
type of grader being used, the evaluation may be instant or waiting
"""
#course = models.ForeignKey(
# Course,
# related_name="Submission_Course",
# db_index=True
#)
question = models.ForeignKey(Question, related_name='Submission_Question')
student = models.ForeignKey(User, related_name='Submission_User')
grader_type = models.CharField(
max_length=1,
choices=Question.GRADER_TYPES,
default='D') # so its easy to know rather than going to question
answer = models.TextField()
# No queue for the time being
#queue_id = models.ForeignKey(Queue, related_name='Submission_Queue')
AWAITING_RESULTS = 'A'
DONE = 'D'
status_codes = (
(AWAITING_RESULTS, 'Awaiting Results'),
(DONE, 'Done')
)
status = models.CharField(max_length=1,
choices=status_codes,
default=AWAITING_RESULTS)
feedback = models.TextField(default='') # feedback from the grader
result = models.FloatField(default=0.0) # marks given to this submission
is_correct = models.BooleanField(default=False)
is_plagiarised = models.BooleanField(default=False) # plagiarism checking
# has been checked for plagiarism or not
has_been_checked = models.BooleanField(default=False)
@receiver_subclasses(pre_save, Question, "question_pre_save")
def update_question_stats_pre_save(sender, **kwargs):
""" Increase question count by 1 and max_marks"""
instance = kwargs['instance']
if instance.pk is not None: # update
instance.quiz.marks -= instance.marks
instance.quiz.save()
@receiver_subclasses(post_save, Question, "question_post_save")
def update_question_stats_post_save(sender, **kwargs):
""" Increase question count by 1 and max_marks"""
instance = kwargs['instance']
if kwargs['created']: # create
instance.quiz.questions += 1
instance.quiz.marks += instance.marks
instance.quiz.save()
@receiver_subclasses(pre_delete, Question, "question_pre_delete")
def update_question_stats_on_delete(sender, **kwargs):
""" Decrease question count by 1 and max_marks"""
instance = kwargs['instance']
if type(instance) != Question:
# This is necessary otherwise it is called twice: once for parent class
# and other for subclass
instance.quiz.questions -= 1
instance.quiz.marks -= instance.marks
instance.quiz.save()
@receiver(post_save, sender=QuestionModule)
def update_question_module_stats_post_save(sender, **kwargs):
""" Increase question module count by 1"""
instance = kwargs['instance']
if kwargs['created']: # create
instance.quiz.question_modules += 1
instance.quiz.save()
@receiver(pre_delete, sender=QuestionModule)
def update_question_module_stats_on_delete(sender, **kwargs):
""" Decrease question module count by 1"""
instance = kwargs['instance']
instance.quiz.question_modules -= 1
instance.quiz.save()
| |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
@patch("os.path.exists", new = MagicMock(return_value=True))
class TestHBaseMaster(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "HBASE/0.96.0.2.0/package"
STACK_VERSION = "2.0.6"
TMP_PATH = "/hadoop"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
CONFIG_OVERRIDES = {"serviceName":"HBASE", "role":"HBASE_MASTER"}
def test_install_hbase_master_default_no_phx(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "install",
config_file="hbase_no_phx.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
try_install=True,
checked_call_mocks = [(0, "OK.", ""),(0, "OK.", "")],
)
self.assertResourceCalled('Package', 'hbase_2_3_*',
retry_count=5,
retry_on_repo_unavailability=False)
self.assertNoMoreResources()
def test_install_hbase_master_default_with_phx(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "install",
config_file="hbase_with_phx.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
try_install=True,
checked_call_mocks = [(0, "OK.", ""),(0, "OK.", "")],
)
self.assertResourceCalled('Package', 'hbase_2_3_*',
retry_count=5,
retry_on_repo_unavailability=False)
self.assertResourceCalled('Package', 'phoenix_2_3_*',
retry_count=5,
retry_on_repo_unavailability=False)
self.assertNoMoreResources()
@patch("resource_management.libraries.script.get_provider")
def test_install_hbase_master_with_version(self, get_provider):
from resource_management.core.providers.package.yumrpm import YumProvider
provider = YumProvider(None)
with patch.object(provider, "_lookup_packages") as lookup_packages:
lookup_packages.return_value = [["hbase_2_3_0_1_1234", "1.0", "testrepo"]]
get_provider.return_value = provider
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/hbase_with_phx.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.1-1234'
# the json file is not a "well formed" install command
json_content['roleCommand'] = 'INSTALL'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['package_list'] = "[{\"name\":\"hbase_${stack_version}\",\"condition\":\"\",\"skipUpgrade\":false}]"
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "install",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
try_install=True,
os_type=('Redhat', '6.4', 'Final'),
checked_call_mocks = [(0, "OK.", "")],
)
# only assert that the correct package is trying to be installed
self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
retry_count=5,
retry_on_repo_unavailability=False)
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
user = 'hbase'
)
self.assertNoMoreResources()
pass
def test_start_default_bucketcache(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "start",
config_file="default_with_bucket.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default(bucketcache_ioengine_as_file=True)
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
user = 'hbase'
)
self.assertNoMoreResources()
pass
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
only_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
on_timeout = '! ( ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid`',
timeout = 30,
user = 'hbase',
)
self.assertResourceCalled('File', '/var/run/hbase/hbase-hbase-master.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def test_decom_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "decommission",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',
content = StaticFile('draining_servers.rb'),
mode = 0755,
)
self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host1',
logoutput = True,
user = 'hbase',
)
self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
logoutput = True,
user = 'hbase',
)
self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host2',
logoutput = True,
user = 'hbase',
)
self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host2',
logoutput = True,
user = 'hbase',
)
self.assertNoMoreResources()
def test_decom_default_draining_only(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "decommission",
config_file="default.hbasedecom.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',
content = StaticFile('draining_servers.rb'),
mode = 0755,
)
self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb remove host1',
logoutput = True,
user = 'hbase',
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "configure",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertNoMoreResources()
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "start",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
user = 'hbase',
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "stop",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
only_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
on_timeout = '! ( ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid`',
timeout = 30,
user = 'hbase',
)
self.assertResourceCalled('File', '/var/run/hbase/hbase-hbase-master.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def test_decom_secure(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "decommission",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',
content = StaticFile('draining_servers.rb'),
mode = 0755,
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.service.keytab hbase/c6401.ambari.apache.org@EXAMPLE.COM; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf -Djava.security.auth.login.config=/etc/hbase/conf/hbase_master_jaas.conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host1',
logoutput = True,
user = 'hbase',
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.service.keytab hbase/c6401.ambari.apache.org@EXAMPLE.COM; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf -Djava.security.auth.login.config=/etc/hbase/conf/hbase_master_jaas.conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
logoutput = True,
user = 'hbase',
)
self.assertNoMoreResources()
def assert_configure_default(self, bucketcache_ioengine_as_file=False):
self.assertResourceCalled('Directory', '/etc/hbase',
mode = 0755
)
self.assertResourceCalled('Directory', '/etc/hbase/conf',
owner = 'hbase',
group = 'hadoop',
create_parents = True,
)
self.assertResourceCalled('Directory', '/tmp',
create_parents = True,
mode = 0777
)
if bucketcache_ioengine_as_file:
self.assertResourceCalled('Directory', '/mnt/bucket',
create_parents = True,
owner = 'hbase',
group = 'hadoop',
mode = 0755
)
pass
self.assertResourceCalled('Directory', '/hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chmod', '1777', u'/hadoop'),
sudo = True,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
owner = 'hbase',
group = 'hadoop'
)
self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
owner = 'hbase',
content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
group = 'hadoop',
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
content = Template('hbase.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
owner = 'hbase',
template_tag = 'GANGLIA-MASTER',
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
owner = 'hbase',
template_tag = None,
)
self.assertResourceCalled('Directory', '/var/run/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('File',
'/etc/hbase/conf/log4j.properties',
mode=0644,
group='hadoop',
owner='hbase',
content=InlineTemplate('log4jproperties\nline2')
)
self.assertResourceCalled('HdfsResource', 'hdfs://c6401.ambari.apache.org:8020/apps/hbase/data',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_conf_dir = '/etc/hadoop/conf',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_bin_dir = '/usr/bin',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
mode = 0711,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/etc/hadoop/conf',
)
def assert_configure_secured(self):
self.assertResourceCalled('Directory', '/etc/hbase',
mode = 0755
)
self.assertResourceCalled('Directory', '/etc/hbase/conf',
owner = 'hbase',
group = 'hadoop',
create_parents = True,
)
self.assertResourceCalled('Directory', '/tmp',
create_parents = True,
mode = 0777
)
self.assertResourceCalled('Directory', '/hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chmod', '1777', u'/hadoop'),
sudo = True,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
owner = 'hbase',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
owner = 'hbase',
content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
group = 'hadoop',
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
content = Template('hbase.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
owner = 'hbase',
template_tag = 'GANGLIA-MASTER',
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
owner = 'hbase',
template_tag = None,
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_master_jaas.conf',
owner = 'hbase',
template_tag = None,
)
self.assertResourceCalled('Directory', '/var/run/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('File',
'/etc/hbase/conf/log4j.properties',
mode=0644,
group='hadoop',
owner='hbase',
content=InlineTemplate('log4jproperties\nline2')
)
self.assertResourceCalled('HdfsResource', 'hdfs://c6401.ambari.apache.org:8020/apps/hbase/data',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = True,
hadoop_bin_dir = '/usr/bin',
keytab = '/etc/security/keytabs/hdfs.headless.keytab',
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = True,
hadoop_conf_dir = '/etc/hadoop/conf',
keytab = '/etc/security/keytabs/hdfs.headless.keytab',
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_bin_dir = '/usr/bin',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
mode = 0711,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = True,
hadoop_bin_dir = '/usr/bin',
keytab = '/etc/security/keytabs/hdfs.headless.keytab',
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/etc/hadoop/conf',
)
def test_start_default_22(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "start",
config_file="hbase-2.2.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Directory', '/etc/hbase',
mode = 0755)
self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-master/conf',
owner = 'hbase',
group = 'hadoop',
create_parents = True)
self.assertResourceCalled('Directory', '/tmp',
create_parents = True,
mode = 0777
)
self.assertResourceCalled('Directory', '/hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chmod', '1777', u'/hadoop'),
sudo = True,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-master/conf',
configurations = self.getConfig()['configurations']['hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site'])
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-master/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'])
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-master/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'])
self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-master/conf',
configurations = self.getConfig()['configurations']['hbase-policy'],
configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy'])
self.assertResourceCalled('File', '/usr/hdp/current/hbase-master/conf/hbase-env.sh',
owner = 'hbase',
content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
group = 'hadoop'
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
content = Template('hbase.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-master/conf/hadoop-metrics2-hbase.properties',
owner = 'hbase',
template_tag = 'GANGLIA-MASTER')
self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-master/conf/regionservers',
owner = 'hbase',
template_tag = None)
self.assertResourceCalled('Directory', '/var/run/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('File',
'/usr/hdp/current/hbase-master/conf/log4j.properties',
mode=0644,
group='hadoop',
owner='hbase',
content=InlineTemplate('log4jproperties\nline2'))
self.assertResourceCalled('HdfsResource', 'hdfs://nn1/apps/hbase/data',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://nn1',
hdfs_site = self.getConfig()['configurations']['hdfs-site'],
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
)
self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://nn1',
hdfs_site = self.getConfig()['configurations']['hdfs-site'],
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
mode = 0711,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://nn1',
hdfs_site = self.getConfig()['configurations']['hdfs-site'],
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
)
self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh --config /usr/hdp/current/hbase-master/conf start master',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
user = 'hbase')
self.assertNoMoreResources()
def test_upgrade_backup(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_upgrade.py",
classname = "HbaseMasterUpgrade",
command = "take_snapshot",
config_file="hbase-preupgrade.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute', " echo 'snapshot_all' | /usr/hdp/current/hbase-client/bin/hbase shell",
user = 'hbase')
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart(self, call_mock):
call_mock.side_effects = [(0, None), (0, None)]
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.2.1.0-3242'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
mocks_dict = mocks_dict)
self.assertResourceCalled('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-master', version), sudo=True,)
self.assertFalse(call_mock.called)
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_upgrade_23(self, call_mock):
call_mock.side_effects = [(0, None), (0, None)]
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-master', version), sudo=True)
| |
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for the sequence tagging dataset."""
import codecs
import collections
import enum
import tensorflow.compat.v2 as tf
# Values for padding, unknown words and a root.
PAD = '<p>'
PAD_ID = 0
UNKNOWN = '<u>'
UNKNOWN_ID = 1
ROOT = '<r>'
ROOT_ID = 2
class CoNLLAttributes(enum.Enum):
"""CoNLL attributre names and indices.
A UD CoNLL file looks like:
1 They they PRON PRP Case=Nom|Number=Plur 2 nsubj
2 buy buy VERB VBP Number=Plur|PTense=Pres 0 root
3 books book NOUN NNS Number=Plur 2 obj
4 . . PUNCT . _ 2 punct
For details, please see: http://universaldependencies.org/format.html.
"""
ID = 0
FORM = 1
LEMMA = 2
UPOS = 3
XPOS = 4
FEATS = 5
HEAD = 6
DEPREL = 7
def create_vocabs(filename, max_num_forms=100000):
"""Loads corpus and create vocabulary lists.
Args:
filename: file name of a corpus.
max_num_forms: maximum number of tokens included.
Returns:
Dictionary containing named vocab dictionaries.
"""
form_counter = collections.Counter()
xpos_counter = collections.Counter()
with tf.io.gfile.GFile(filename, 'rb') as f:
for line in codecs.getreader('utf-8')(f):
line = line.strip()
split = line.split(u'\t')
if not line.startswith('#') and split[0]:
form_counter[split[CoNLLAttributes.FORM.value]] += 1
xpos_counter[split[CoNLLAttributes.XPOS.value]] += 1
special_tokens = {PAD: PAD_ID, UNKNOWN: UNKNOWN_ID, ROOT: ROOT_ID}
# create word form vocab
vocabs = {'forms': {}, 'xpos': {}}
vocabs['forms'].update(special_tokens)
vocabs['forms'].update({
form[0]: id for id, form in enumerate(
form_counter.most_common(max_num_forms), start=ROOT_ID + 1)
})
# create xpos vocab
vocabs['xpos'].update(special_tokens)
vocabs['xpos'].update({
tag[0]: id
for id, tag in enumerate(xpos_counter.most_common(), start=ROOT_ID + 1)
})
return vocabs
def create_token(token, attributes, vocabs):
"""Map for a token a selected subset of attributes to indices.
Input example: CoNLL 09 representation for a token.
['Ms.', 'ms.', 'ms.', 'NNP', '_', '2', 'TITLE]
Output example: Indices as defined in self._attributes, e.g., [word form,
part-of-speech tag, and head].
[1025, 3, 1]
Args:
token: CoNLL token atrributes.
attributes: selected attributes.
vocabs: dictonery of vocabs.
Returns:
List of attribute ids for a token, e.g. [1025, 3] with word id and pos id.
Raises:
ValueError: CoNLL attribute requested but not covered by mapping.
"""
selected_attributes = []
for attribute in attributes:
index = attribute.value
if attribute == CoNLLAttributes.FORM:
selected_attributes.append(vocabs['forms'].get(token[index], UNKNOWN_ID))
elif attribute == CoNLLAttributes.XPOS:
selected_attributes.append(vocabs['xpos'].get(token[index], UNKNOWN_ID))
elif attribute == CoNLLAttributes.HEAD:
selected_attributes.append(int(token[index]))
else:
raise ValueError('CoNLL index %s not covered by mapping.' %
str(attribute.name))
return selected_attributes
def create_sentence_with_root(attributes, vocabs):
"""Create a sentence containing a root.
Args:
attributes: attributes extracted from token.
vocabs: dictonery of vocabs.
Returns:
A list representing a sentence containing the root only,
e.g., [[2, 1, 0]] for root word, unknown xpos, and head 0.
"""
# Create the token properties of an artificial root node.
token_properties = [ROOT for _ in range(12)] # CoNLL 09 has 12 columns.
token_properties[CoNLLAttributes.ID.value] = '0'
token_properties[CoNLLAttributes.HEAD.value] = '0'
token = create_token(token_properties, attributes, vocabs)
if len(token) == 1:
token = token[0]
return [token]
def sentences_from_conll_data(corpus_filename,
vocabs,
attributes,
max_sentence_length=1000):
"""Load and returns conll data in list format.
Args:
corpus_filename: filename of corpus.
vocabs: dictionary of vocabs
attributes: list of conll attributes to include into the batch
max_sentence_length: cut off sentences longer as max tokens
Yields:
A sentence as a list of tokens while tokens are lists of attributes.
"""
with tf.io.gfile.GFile(corpus_filename, 'rb') as f:
sentence = create_sentence_with_root(attributes, vocabs)
for line in codecs.getreader('utf-8')(f):
line = line.strip()
if line.startswith('#'):
continue
split = line.split('\t')
if split[0]: # Not an empty line, process next token:
if len(sentence) < max_sentence_length:
if len(attributes) == 1:
sentence.append(create_token(split, attributes, vocabs)[0])
else:
sentence.append(create_token(split, attributes, vocabs))
else: # Sentences start with an empty line, yield sentence:
yield sentence
# Reset sentence.
sentence = create_sentence_with_root(attributes, vocabs)
if len(sentence) > 1: # sentences does not only contain a root.
yield sentence
def sentence_dataset_dict(filename,
vocabs,
attributes_input,
attributes_target,
batch_size,
bucket_size,
repeat=None,
prefetch_size=tf.data.experimental.AUTOTUNE):
"""Combines sentences into a dataset of padded batches.
Args:
filename: file name of a corpus.
vocabs: dictionary of dictionaries to map from strings to ids.
attributes_input: attributes for the input.
attributes_target: target attributes empty targets is not inclueded.
batch_size: the size of a batch.
bucket_size: the size of a bucket.
repeat: number of times the dataset is repeated.
prefetch_size: prefetch size of the data.
Returns:
Returns dataset as dictionary containing the data as key value pairs.
"""
data_keys = ['inputs']
if attributes_target:
data_keys.append('targets')
def generator():
"""Generator to create the data."""
input_generator = sentences_from_conll_data(
filename, vocabs, attributes_input, max_sentence_length=bucket_size)
if attributes_target:
target_generator = sentences_from_conll_data(
filename, vocabs, attributes_target, max_sentence_length=bucket_size)
for inputs in input_generator:
data = {'inputs': inputs}
if attributes_target:
data['targets'] = next(target_generator)
yield data
output_types = {k: tf.float32 for k in data_keys}
output_shapes = {k: (None,) for k in data_keys}
dataset = tf.data.Dataset.from_generator(
generator, output_types=output_types, output_shapes=output_shapes)
# cache the dataset in memory and repeat.
dataset = dataset.cache()
dataset = dataset.repeat(repeat)
# static padding up to bucket size.
padded_shapes = {k: [bucket_size] for k in data_keys}
dataset = dataset.padded_batch(
batch_size=batch_size, padded_shapes=(padded_shapes))
dataset = dataset.prefetch(prefetch_size)
return dataset
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.actions.installer."""
from unittest import mock
from absl.testing import absltest
from glazier.lib import buildinfo
from glazier.lib import stage
from glazier.lib.actions import installer
from pyfakefs import fake_filesystem
from glazier.lib import constants
class InstallerTest(absltest.TestCase):
@mock.patch.object(buildinfo, 'BuildInfo', autospec=True)
def testAddChoice(self, build_info):
choice = {
'type':
'toggle',
'prompt':
'Set system shell to PowerShell',
'name':
'core_ps_shell',
'options': [{
'tip': '',
'value': False,
'label': 'False'
}, {
'default': True,
'tip': '',
'value': True,
'label': 'True'
}]
}
a = installer.AddChoice(choice, build_info)
a.Run()
build_info.AddChooserOption.assert_called_with(choice)
def testAddChoiceValidate(self):
choice = {
'type':
'toggle',
'prompt':
'Set system shell to PowerShell',
'name':
'core_ps_shell',
'options': [{
'tip': '',
'value': False,
'label': 'False'
}, {
'default': True,
'tip': '',
'value': True,
'label': 'True'
}]
}
a = installer.AddChoice(choice, None)
a.Validate()
# prompt (name, type)
choice['name'] = True
self.assertRaises(installer.ValidationError, a.Validate)
# tip
choice['name'] = 'core_ps_shell'
choice['options'][0]['tip'] = True
self.assertRaises(installer.ValidationError, a.Validate)
# default
choice['options'][0]['tip'] = ''
choice['options'][0]['default'] = 3
self.assertRaises(installer.ValidationError, a.Validate)
# label
choice['options'][0]['default'] = True
choice['options'][0]['label'] = False
self.assertRaises(installer.ValidationError, a.Validate)
# value
choice['options'][0]['label'] = 'False'
choice['options'][0]['value'] = []
self.assertRaises(installer.ValidationError, a.Validate)
# options dict
choice['options'][0] = False
self.assertRaises(installer.ValidationError, a.Validate)
# options list
choice['options'] = False
self.assertRaises(installer.ValidationError, a.Validate)
del choice['name']
self.assertRaises(installer.ValidationError, a.Validate)
a = installer.AddChoice(False, None)
self.assertRaises(installer.ValidationError, a.Validate)
@mock.patch.object(buildinfo, 'BuildInfo', autospec=True)
def testBuildInfoDump(self, build_info):
d = installer.BuildInfoDump(None, build_info)
d.Run()
build_info.Serialize.assert_called_with('{}/build_info.yaml'.format(
constants.SYS_CACHE))
@mock.patch.object(installer.registry, 'set_value', autospec=True)
@mock.patch.object(buildinfo, 'BuildInfo', autospec=True)
def testBuildInfoSave(self, build_info, sv):
fs = fake_filesystem.FakeFilesystem()
installer.open = fake_filesystem.FakeFileOpen(fs)
installer.os = fake_filesystem.FakeOsModule(fs)
timer_root = r'{0}\{1}'.format(constants.REG_ROOT, 'Timers')
fs.create_file(
'{}/build_info.yaml'.format(constants.SYS_CACHE),
contents='{BUILD: {opt 1: true, TIMER_opt 2: some value, opt 3: 12345}}\n'
)
s = installer.BuildInfoSave(None, build_info)
s.Run()
sv.assert_has_calls([
mock.call('opt 1', True, 'HKLM', constants.REG_ROOT),
mock.call('TIMER_opt 2', 'some value', 'HKLM', timer_root),
mock.call('opt 3', 12345, 'HKLM', constants.REG_ROOT),
],
any_order=True)
s.Run()
@mock.patch.object(installer.logging, 'debug', autospec=True)
@mock.patch.object(buildinfo, 'BuildInfo', autospec=True)
def testBuildInfoSaveError(self, build_info, d):
installer.BuildInfoSave(None, build_info).Run()
d.assert_called_with('%s does not exist - skipped processing.',
'{}/build_info.yaml'.format(constants.SYS_CACHE))
def testChangeServer(self):
build_info = buildinfo.BuildInfo()
d = installer.ChangeServer(
['http://new-server.example.com', '/new/conf/root'], build_info)
self.assertRaises(installer.ServerChangeEvent, d.Run)
self.assertEqual(build_info.ConfigServer(), 'http://new-server.example.com')
self.assertEqual(build_info.ActiveConfigPath(), '/new/conf/root')
@mock.patch.object(installer.file_system, 'CopyFile', autospec=True)
def testExitWinPE(self, copy):
cache = constants.SYS_CACHE
ex = installer.ExitWinPE(None, None)
with self.assertRaises(installer.RestartEvent):
ex.Run()
copy.assert_has_calls([
mock.call(['/task_list.yaml',
'%s/task_list.yaml' % cache], mock.ANY),
])
copy.return_value.Run.assert_called()
@mock.patch.object(installer.log_copy, 'LogCopy', autospec=True)
def testLogCopy(self, copy):
log_file = r'X:\glazier.log'
log_host = 'log-server.example.com'
# copy eventlog
lc = installer.LogCopy([log_file], None)
lc.Run()
copy.return_value.EventLogCopy.assert_called_with(log_file)
self.assertFalse(copy.return_value.ShareCopy.called)
copy.reset_mock()
# copy both
lc = installer.LogCopy([log_file, log_host], None)
lc.Run()
copy.return_value.EventLogCopy.assert_called_with(log_file)
copy.return_value.ShareCopy.assert_called_with(log_file, log_host)
copy.reset_mock()
# copy errors
copy.return_value.EventLogCopy.side_effect = installer.log_copy.LogCopyError
copy.return_value.ShareCopy.side_effect = installer.log_copy.LogCopyError
lc.Run()
copy.return_value.EventLogCopy.assert_called_with(log_file)
copy.return_value.ShareCopy.assert_called_with(log_file, log_host)
def testLogCopyValidate(self):
log_host = 'log-server.example.com'
lc = installer.LogCopy(r'X:\glazier.log', None)
self.assertRaises(installer.ValidationError, lc.Validate)
lc = installer.LogCopy([1, 2, 3], None)
self.assertRaises(installer.ValidationError, lc.Validate)
lc = installer.LogCopy([1], None)
self.assertRaises(installer.ValidationError, lc.Validate)
lc = installer.LogCopy([r'X:\glazier.log'], None)
lc.Validate()
lc = installer.LogCopy([r'X:\glazier.log', log_host], None)
lc.Validate()
@mock.patch.object(installer.time, 'sleep', autospec=True)
def testSleep(self, sleep):
s = installer.Sleep([1520], None)
s.Run()
sleep.assert_called_with(1520)
@mock.patch.object(installer.time, 'sleep', autospec=True)
def testSleepString(self, sleep):
s = installer.Sleep([1234, 'Some Reason.'], None)
s.Run()
sleep.assert_called_with(1234)
def testSleepValidate(self):
s = installer.Sleep('30', None)
self.assertRaises(installer.ValidationError, s.Validate)
s = installer.Sleep([1, 2, 3], None)
self.assertRaises(installer.ValidationError, s.Validate)
s = installer.Sleep(['30'], None)
self.assertRaises(installer.ValidationError, s.Validate)
s = installer.Sleep([30], None)
s.Validate()
s = installer.Sleep([30, 'Some reason.'], None)
s.Validate()
@mock.patch.object(installer.chooser, 'Chooser', autospec=True)
@mock.patch.object(buildinfo, 'BuildInfo', autospec=True)
def testShowChooser(self, build_info, chooser):
c = installer.ShowChooser(None, build_info)
c.Run()
self.assertTrue(chooser.return_value.Display.called)
self.assertTrue(chooser.return_value.Display.called)
build_info.StoreChooserResponses.assert_called_with(
chooser.return_value.Responses.return_value)
self.assertTrue(build_info.FlushChooserOptions.called)
@mock.patch.object(installer.stage, 'set_stage', autospec=True)
def testStartStage(self, set_stage):
s = installer.StartStage([1], None)
s.Run()
set_stage.assert_called_with(1)
@mock.patch.object(installer.stage, 'set_stage', autospec=True)
@mock.patch.object(installer.stage, 'exit_stage', autospec=True)
def testStartNonTerminalStage(self, exit_stage, set_stage):
installer.StartStage([50, False], None).Run()
set_stage.assert_called_with(50)
self.assertFalse(exit_stage.called)
@mock.patch.object(installer.stage, 'set_stage', autospec=True)
@mock.patch.object(installer.stage, 'exit_stage', autospec=True)
def testStartTerminalStage(self, exit_stage, set_stage):
installer.StartStage([100, True], None).Run()
set_stage.assert_called_with(100)
exit_stage.assert_called_with(100)
@mock.patch.object(installer.stage, 'set_stage', autospec=True)
def testStartStageException(self, set_stage):
set_stage.side_effect = stage.Error('Test')
ss = installer.StartStage([2], None)
self.assertRaises(installer.ActionError, ss.Run)
def testStartStageValidate(self):
s = installer.StartStage('30', None)
self.assertRaises(installer.ValidationError, s.Validate)
s = installer.StartStage([1, 2, 3], None)
self.assertRaises(installer.ValidationError, s.Validate)
s = installer.StartStage(['30'], None)
self.assertRaises(installer.ValidationError, s.Validate)
s = installer.StartStage([30, 'Hello'], None)
self.assertRaises(installer.ValidationError, s.Validate)
s = installer.StartStage([30], None)
s.Validate()
if __name__ == '__main__':
absltest.main()
| |
import csv
import os
from collections import defaultdict
from ...exceptions import AlphabetError
def write_csv_file(path, header, data, mode='w'):
with open(path, mode, newline='', encoding='utf8') as f:
writer = csv.DictWriter(f, header, delimiter=',')
if mode == 'w':
writer.writeheader()
for d in data:
writer.writerow(d)
def data_to_type_csvs(corpus_context, types, type_headers):
"""
Convert a types object into a CSV file
Parameters
----------
corpus_context: :class:`~polyglotdb.corpus.CorpusContext`
the corpus
types : obj
the types in the corpus
type_headers : dict
headers for types
"""
directory = corpus_context.config.temporary_directory('csv')
tfs = {}
for k, v in type_headers.items():
path = os.path.join(directory, '{}_type.csv'.format(k))
header = v
data = [dict(zip(v, t)) for t in types[k]]
write_csv_file(path, header, data)
def data_to_graph_csvs(corpus_context, data):
"""
Convert a DiscourseData object into CSV files for efficient loading
of graph nodes and relationships
Parameters
----------
data : :class:`~polyglotdb.io.helper.DiscourseData`
Data to load into a graph
directory: str
Full path to a directory to store CSV files
"""
directory = corpus_context.config.temporary_directory('csv')
rfs = {}
rel_writers = {}
token_headers = data.token_headers
for s in data.speakers:
for x in data.annotation_types:
path = os.path.join(directory, '{}_{}.csv'.format(s, x))
rfs[s, x] = open(path, 'a', newline='', encoding='utf8')
rel_writers[s, x] = csv.DictWriter(rfs[s, x], token_headers[x], delimiter=',')
subanno_files = {}
subanno_writers = {}
for sp in data.speakers:
for k, v in data.hierarchy.subannotations.items():
for s in v:
path = os.path.join(directory, '{}_{}_{}.csv'.format(sp, k, s))
subanno_files[sp, k, s] = open(path, 'a', newline='', encoding='utf8')
header = ['id', 'begin', 'end', 'annotation_id', 'label']
subanno_writers[sp, k, s] = csv.DictWriter(subanno_files[sp, k, s], header, delimiter=',')
segment_type = data.segment_type
for level in data.highest_to_lowest():
for d in data[level]:
if d.begin is None or d.end is None:
continue
token_additional = dict(zip(d.token_keys(), d.token_values()))
if d.super_id is not None:
token_additional[data[level].supertype] = d.super_id
s = d.speaker
if s is None:
s = 'unknown'
rel_writers[s, level].writerow(dict(begin=d.begin, end=d.end,
type_id=d.sha(corpus=corpus_context.corpus_name),
id=d.id, speaker=s, discourse=data.name,
previous_id=d.previous_id,
**token_additional))
if d.subannotations:
for sub in d.subannotations:
row = {'begin': sub.begin, 'end': sub.end, 'label': sub.label,
'annotation_id': d.id, 'id': sub.id}
subanno_writers[s, level, sub.type].writerow(row)
for x in rfs.values():
x.close()
for x in subanno_files.values():
x.close()
def utterance_data_to_csvs(corpus_context, speaker_data):
"""
Convert time data into a CSV file
Parameters
----------
type : obj
the type of data
directory : str
path to the directory
discourse : str
the name of the discourse
timed_data : list
the timing data
"""
for s, data in speaker_data.items():
path = os.path.join(corpus_context.config.temporary_directory('csv'),
'{}_utterance.csv'.format(s))
header = ['id', 'prev_id', 'begin_word_id', 'end_word_id']
write_csv_file(path, header, data, 'a')
def utterance_enriched_data_to_csvs(corpus_context, utterance_data):
"""
Convert time data into a CSV file
Parameters
----------
type : obj
the type of data
directory : str
path to the directory
discourse : str
the name of the discourse
timed_data : list
the timing data
"""
directory = corpus_context.config.temporary_directory('csv')
with open(os.path.join(directory, 'utterance_enrichment.csv'), 'w') as f:
header = ['id'] + sorted(next(iter(utterance_data.values())).keys())
writer = csv.DictWriter(f, header, delimiter=',')
writer.writeheader()
for k, v in sorted(utterance_data.items()):
v['id'] = k
writer.writerow(v)
def syllables_data_to_csvs(corpus_context, speaker_data):
"""
Convert syllable data into a CSV file
Parameters
----------
corpus_context: :class:`~polyglotdb.corpus.CorpusContext`
the corpus object
data : dict
Data to load into a csv
"""
for s, data in speaker_data.items():
path = os.path.join(corpus_context.config.temporary_directory('csv'),
'{}_syllable.csv'.format(s))
header = ['id', 'prev_id', 'vowel_id', 'onset_id', 'coda_id', 'begin', 'end', 'label', 'type_id']
write_csv_file(path, header, data, 'a')
def syllables_enrichment_data_to_csvs(corpus_context, data):
"""
Convert syllable enrichment data into a CSV file
Parameters
----------
corpus_context: :class:`~polyglotdb.corpus.CorpusContext`
the corpus object
data : Dict
Data to load into a csv
"""
directory = corpus_context.config.temporary_directory('csv')
with open(os.path.join(directory, 'syllable_import.csv'), 'w') as f:
try:
header = ['label'] + sorted(next(iter(data.values())).keys())
except(StopIteration):
raise (AlphabetError)
writer = csv.DictWriter(f, header, delimiter=',')
writer.writeheader()
for k, v in sorted(data.items()):
v['label'] = k
writer.writerow(v)
def nonsyls_data_to_csvs(corpus_context, speaker_data):
"""
Convert non-syllable data into a CSV file
Parameters
----------
corpus_context:class:`~polyglotdb.corpus.CorpusContext`
the corpus object
data : :class:`~polyglotdb.io.helper.DiscourseData`
Data to load into a graph
split_name : str
identifier of the file to load
"""
for s, data in speaker_data.items():
path = os.path.join(corpus_context.config.temporary_directory('csv'),
'{}_nonsyl.csv'.format(s))
header = ['id', 'prev_id', 'break', 'onset_id', 'coda_id', 'begin', 'end', 'label', 'type_id']
write_csv_file(path, header, data, 'a')
def subannotations_data_to_csv(corpus_context, type, data):
"""
Convert subannotation data into a CSV file
Parameters
----------
corpus_context: :class:`~polyglotdb.corpus.AnnotatedContext`
the corpus object
data : :class:`~polyglotdb.io.helper.DiscourseData`
Data to load into a graph
type : str
identifier of the file to load
"""
path = os.path.join(corpus_context.config.temporary_directory('csv'),
'{}_subannotations.csv'.format(type))
header = sorted(data[0].keys())
write_csv_file(path, header, data)
def lexicon_data_to_csvs(corpus_context, data, case_sensitive=False):
"""
Convert lexicon data into a CSV file
Parameters
----------
corpus_context: :class:`~polyglotdb.corpus.CorpusContext`
the corpus object
data : :class:`~polyglotdb.io.helper.DiscourseData`
Data to load into a graph
case_sensitive : boolean
defaults to False
"""
directory = corpus_context.config.temporary_directory('csv')
with open(os.path.join(directory, 'lexicon_import.csv'), 'w') as f:
header = ['label'] + sorted(next(iter(data.values())).keys())
writer = csv.DictWriter(f, header, delimiter=',')
writer.writeheader()
for k, v in sorted(data.items()):
if not case_sensitive:
k = k.lower()
v['label'] = k
writer.writerow(v)
def feature_data_to_csvs(corpus_context, data):
"""
Convert feature data into a CSV file
Parameters
----------
corpus_context: :class:`~polyglotdb.corpus.CorpusContext`
the corpus object
data : :class:`~polyglotdb.io.helper.DiscourseData`
Data to load into a graph
"""
directory = corpus_context.config.temporary_directory('csv')
with open(os.path.join(directory, 'feature_import.csv'), 'w') as f:
try:
header = ['label'] + sorted(next(iter(data.values())).keys())
except(StopIteration):
raise (AlphabetError)
writer = csv.DictWriter(f, header, delimiter=',')
writer.writeheader()
for k, v in sorted(data.items()):
v['label'] = k
writer.writerow(v)
def speaker_data_to_csvs(corpus_context, data):
"""
Convert speaker data into a CSV file
Parameters
----------
corpus_context: :class:`~polyglotdb.corpus.CorpusContext`
the corpus object
data : :class:`~polyglotdb.io.helper.DiscourseData`
Data to load into a graph
"""
directory = corpus_context.config.temporary_directory('csv')
with open(os.path.join(directory, 'speaker_import.csv'), 'w') as f:
header = ['name'] + sorted(next(iter(data.values())).keys())
writer = csv.DictWriter(f, header, delimiter=',')
writer.writeheader()
for k, v in sorted(data.items()):
v['name'] = k
writer.writerow(v)
def discourse_data_to_csvs(corpus_context, data):
"""
Convert discourse data into a CSV file
Parameters
----------
corpus_context: :class:`~polyglotdb.corpus.CorpusContext`
the corpus object
data : :class:`~polyglotdb.io.helper.DiscourseData`
Data to load into a graph
type : str
identifier of the file to load
"""
directory = corpus_context.config.temporary_directory('csv')
with open(os.path.join(directory, 'discourse_import.csv'), 'w') as f:
header = ['name'] + sorted(next(iter(data.values())).keys())
writer = csv.DictWriter(f, header, delimiter=',')
writer.writeheader()
for k, v in sorted(data.items()):
v['name'] = k
writer.writerow(v)
def create_utterance_csvs(corpus_context):
header = ['id', 'prev_id', 'begin_word_id', 'end_word_id']
for s in corpus_context.speakers:
path = os.path.join(corpus_context.config.temporary_directory('csv'),
'{}_utterance.csv'.format(s))
with open(path, 'w', newline='', encoding='utf8') as f:
writer = csv.DictWriter(f, header, delimiter=',')
writer.writeheader()
def create_syllabic_csvs(corpus_context):
header = ['id', 'prev_id', 'vowel_id', 'onset_id', 'coda_id', 'begin', 'end', 'label', 'type_id']
for s in corpus_context.speakers:
path = os.path.join(corpus_context.config.temporary_directory('csv'),
'{}_syllable.csv'.format(s))
with open(path, 'w', newline='', encoding='utf8') as f:
writer = csv.DictWriter(f, header, delimiter=',')
writer.writeheader()
def create_nonsyllabic_csvs(corpus_context):
header = ['id', 'prev_id', 'break', 'onset_id', 'coda_id', 'begin', 'end', 'label', 'type_id']
for s in corpus_context.speakers:
path = os.path.join(corpus_context.config.temporary_directory('csv'),
'{}_nonsyl.csv'.format(s))
with open(path, 'w', newline='', encoding='utf8') as f:
writer = csv.DictWriter(f, header, delimiter=',')
writer.writeheader()
| |
import itertools
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.util import testing as tm
@pytest.fixture
def single_level_multiindex():
"""single level MultiIndex"""
return MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
@pytest.fixture
def frame_random_data_integer_multi_index():
levels = [[0, 1], [0, 1, 2]]
codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, codes=codes)
return DataFrame(np.random.randn(6, 2), index=index)
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
class TestMultiIndexLoc(object):
def test_loc_getitem_series(self):
# GH14730
# passing a series as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = Series([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
result = x.loc[[1, 3]]
tm.assert_series_equal(result, expected)
# GH15424
y1 = Series([1, 3], index=[1, 2])
result = x.loc[y1]
tm.assert_series_equal(result, expected)
empty = Series(data=[], dtype=np.float64)
expected = Series([], index=MultiIndex(
levels=index.levels, codes=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
def test_loc_getitem_array(self):
# GH15434
# passing an array as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = np.array([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
# empty array:
empty = np.array([])
expected = Series([], index=MultiIndex(
levels=index.levels, codes=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
# 0-dim array (scalar):
scalar = np.int64(1)
expected = Series(
data=[0, 1, 2],
index=['A', 'B', 'C'],
dtype=np.float64)
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
def test_loc_multiindex(self):
mi_labels = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_labels.loc['i']
with catch_warnings(record=True):
xp = mi_labels.ix['i']
tm.assert_frame_equal(rs, xp)
# 2nd (last) columns
rs = mi_labels.loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# corner column
rs = mi_labels.loc['j'].loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# with a tuple
rs = mi_labels.loc[('i', 'X')]
with catch_warnings(record=True):
xp = mi_labels.ix[('i', 'X')]
tm.assert_frame_equal(rs, xp)
rs = mi_int.loc[4]
with catch_warnings(record=True):
xp = mi_int.ix[4]
tm.assert_frame_equal(rs, xp)
# missing label
with pytest.raises(KeyError, match=r"^2L?$"):
mi_int.loc[2]
with catch_warnings(record=True):
# GH 21593
with pytest.raises(KeyError, match=r"^2L?$"):
mi_int.ix[2]
def test_loc_multiindex_indexer_none(self):
# GH6788
# multi-index indexer is None (meaning take all)
attributes = ['Attribute' + str(i) for i in range(1)]
attribute_values = ['Value' + str(i) for i in range(5)]
index = MultiIndex.from_product([attributes, attribute_values])
df = 0.1 * np.random.randn(10, 1 * 5) + 0.5
df = DataFrame(df, columns=index)
result = df[attributes]
tm.assert_frame_equal(result, df)
# GH 7349
# loc with a multi-index seems to be doing fallback
df = DataFrame(np.arange(12).reshape(-1, 1),
index=MultiIndex.from_product([[1, 2, 3, 4],
[1, 2, 3]]))
expected = df.loc[([1, 2], ), :]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_incomplete(self):
# GH 7399
# incomplete indexers
s = Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.loc[:, 'a':'c']
result = s.loc[0:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[0:, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
# GH 7400
# multiindexer gettitem with list of indexers skips wrong element
s = Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.iloc[[6, 7, 8, 12, 13, 14]]
result = s.loc[2:4:2, 'a':'c']
tm.assert_series_equal(result, expected)
def test_get_loc_single_level(self, single_level_multiindex):
single_level = single_level_multiindex
s = Series(np.random.randn(len(single_level)),
index=single_level)
for k in single_level.values:
s[k]
def test_loc_getitem_int_slice(self):
# GH 3053
# loc should treat integer slices like label slices
index = MultiIndex.from_tuples([t for t in itertools.product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
expected = df
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in itertools.product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
expected = df.iloc[2:]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
expected = df.iloc[0:2]
expected.index = ['a', 'b']
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'indexer_type_1',
(list, tuple, set, slice, np.ndarray, Series, Index))
@pytest.mark.parametrize(
'indexer_type_2',
(list, tuple, set, slice, np.ndarray, Series, Index))
def test_loc_getitem_nested_indexer(self, indexer_type_1, indexer_type_2):
# GH #19686
# .loc should work with nested indexers which can be
# any list-like objects (see `pandas.api.types.is_list_like`) or slices
def convert_nested_indexer(indexer_type, keys):
if indexer_type == np.ndarray:
return np.array(keys)
if indexer_type == slice:
return slice(*keys)
return indexer_type(keys)
a = [10, 20, 30]
b = [1, 2, 3]
index = MultiIndex.from_product([a, b])
df = DataFrame(
np.arange(len(index), dtype='int64'),
index=index, columns=['Data'])
keys = ([10, 20], [2, 3])
types = (indexer_type_1, indexer_type_2)
# check indexers with all the combinations of nested objects
# of all the valid types
indexer = tuple(
convert_nested_indexer(indexer_type, k)
for indexer_type, k in zip(types, keys))
result = df.loc[indexer, 'Data']
expected = Series(
[1, 2, 4, 5], name='Data',
index=MultiIndex.from_product(keys))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer, is_level1, expected_error', [
([], False, None), # empty ok
(['A'], False, None),
(['A', 'D'], False, None),
(['D'], False, r"\['D'\] not in index"), # not any values found
(pd.IndexSlice[:, ['foo']], True, None),
(pd.IndexSlice[:, ['foo', 'bah']], True, None)
])
def test_loc_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1,
expected_error):
# GH 7866
# multi-index slicing with missing indexers
idx = MultiIndex.from_product([['A', 'B', 'C'],
['foo', 'bar', 'baz']],
names=['one', 'two'])
s = Series(np.arange(9, dtype='int64'), index=idx).sort_index()
if indexer == []:
expected = s.iloc[[]]
elif is_level1:
expected = Series([0, 3, 6], index=MultiIndex.from_product(
[['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index()
else:
exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']],
names=['one', 'two'])
expected = Series(np.arange(3, dtype='int64'),
index=exp_idx).sort_index()
if expected_error is not None:
with pytest.raises(KeyError, match=expected_error):
s.loc[indexer]
else:
result = s.loc[indexer]
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
@pytest.mark.parametrize('indexer', [
lambda s: s.loc[[(2000, 3, 10), (2000, 3, 13)]],
lambda s: s.ix[[(2000, 3, 10), (2000, 3, 13)]]
])
def test_series_loc_getitem_fancy(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[49:51])
result = indexer(s)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns_indexer', [
([], slice(None)),
(['foo'], [])
])
def test_loc_getitem_duplicates_multiindex_empty_indexer(columns_indexer):
# GH 8737
# empty indexer
multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'],
['alpha', 'beta']))
df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0])
result = df.loc[:, columns_indexer]
tm.assert_frame_equal(result, expected)
def test_loc_getitem_duplicates_multiindex_non_scalar_type_object():
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median], ['mean', 'median']],
columns=MultiIndex.from_tuples([('functs', 'mean'),
('functs', 'median')]),
index=['function', 'name'])
result = df.loc['function', ('functs', 'mean')]
expected = np.mean
assert result == expected
def test_loc_getitem_tuple_plus_slice():
# GH 671
df = DataFrame({'a': np.arange(10),
'b': np.arange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)}
).set_index(['a', 'b'])
expected = df.loc[0, 0]
result = df.loc[(0, 0), :]
tm.assert_series_equal(result, expected)
def test_loc_getitem_int(frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
result = df.loc[1]
expected = df[-3:]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
def test_loc_getitem_int_raises_exception(
frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
with pytest.raises(KeyError, match=r"^3L?$"):
df.loc[3]
def test_loc_getitem_lowerdim_corner(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
# test setup - check key not in dataframe
with pytest.raises(KeyError, match=r"^11L?$"):
df.loc[('bar', 'three'), 'B']
# in theory should be inserting in a sorted space????
df.loc[('bar', 'three'), 'B'] = 0
expected = 0
result = df.sort_index().loc[('bar', 'three'), 'B']
assert result == expected
| |
# (c) Copyright [2016] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from io import BytesIO
from zope.interface import implementer
from twisted.internet.endpoints import UNIXClientEndpoint
from twisted.web.iweb import IAgentEndpointFactory
from twisted.web.client import Agent, readBody, FileBodyProducer
from twisted.internet import reactor
from twisted.web.http_headers import Headers
import json
from json import dumps
from twisted.trial import unittest
import subprocess
from sh import cat
from sh import kill
from config.setupcfg import getdefaultconfig, setup_logging
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONFIG_FILE = '/etc/hpedockerplugin/hpe.conf'
CONFIG = ['--config-file', CONFIG_FILE]
TEST_DIR = os.path.abspath('../')
TWISTD_PID = TEST_DIR + '/twistd.pid'
hpe_sock_path = b"/run/docker/plugins/hpe/hpe.sock"
@implementer(IAgentEndpointFactory)
class HPEEndpointFactory(object):
"""
Connect to hpe3's Unix socket.
"""
def __init__(self):
self.reactor = reactor
def endpointForURI(self, uri):
return UNIXClientEndpoint(self.reactor, hpe_sock_path)
class HPEPLUGINTESTS(unittest.TestCase):
def _wait_for_pid_file(self, filename, wait_time):
count = 0
while not os.path.exists(filename):
if count == wait_time:
break
time.sleep(1)
count += 1
if os.path.isfile(filename):
self.twistd_pid = cat(filename)
print 'self.twistd_pid: ', self.twistd_pid
else:
raise ValueError("%s isn't a file!" % filename)
def checkResponse(self, response, exp_result):
# TODO: convert to log messages
"""
print 'Response version:', response.version
print 'Response code:', response.code
print 'Response phrase:', response.phrase
print 'Response headers:'
print pformat(list(response.headers.getAllRawHeaders()))
"""
"""
LOG.debug("Response Body %s", str(response.version))
LOG.debug("Response Body %s", str(response.code))
LOG.debug("Response Body %s", str(response.phrase))
LOG.debug("Response Body %s",
str(list(response.headers.getAllRawHeaders())))
LOG.debug("Expected Results %s", str(exp_result))
"""
d = readBody(response)
d.addCallback(self.assertResponse, exp_result)
return d
def getResponse(self, response):
# TODO: convert to log messages
"""
print 'Response version:', response.version
print 'Response code:', response.code
print 'Response phrase:', response.phrase
print 'Response headers:'
print pformat(list(response.headers.getAllRawHeaders()))
"""
"""
LOG.debug("Response Body %s", str(response.version))
LOG.debug("Response Body %s", str(response.code))
LOG.debug("Response Body %s", str(response.phrase))
LOG.debug("Response Body %s",
str(list(response.headers.getAllRawHeaders())))
LOG.debug("Expected Results %s", str(exp_result))
"""
d = readBody(response)
return d
def assertResponse(self, body, exp_result):
LOG.debug("Response Body %s", str(body))
LOG.debug("Expected Results %s", str(exp_result))
self.assertEqual(body, exp_result)
def cbFailed(self, failure):
LOG.error("Test Failed %s", str(failure))
self.fail(msg='Test Failed')
"""
Connect to hpe3's Unix socket.
"""
def setUp(self):
# Setup Test Logging
# Set Logging level
# Setup the default, hpe3parconfig, and hpelefthandconfig
# configuration objects.
hpedefaultconfig = getdefaultconfig(CONFIG)
logging_level = hpedefaultconfig.logging
setup_logging('test_hpe_plugin', logging_level)
# Start HPE Docker Plugin
bashcommand = "/usr/bin/twistd hpe_plugin_service"
try:
subprocess.check_output(['sh', '-c', bashcommand], cwd=TEST_DIR)
except:
LOG.error("Test Setup Failed: Could not change dir")
self.fail(msg='Test Failed')
self._wait_for_pid_file(TWISTD_PID, 5)
def tearDown(self):
# Stop HPE Docker Plugin
kill(str(self.twistd_pid))
is_running = os.path.exists("/proc/%s" % str(self.twistd_pid))
while is_running:
is_running = os.path.exists("/proc/%s" % str(self.twistd_pid))
time.sleep(0.25)
def test_hpe_activate(self):
path = b"/Plugin.Activate"
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path)
d.addCallback(self.checkResponse, json.dumps({u"Implements":
[u"VolumeDriver"]}))
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume(self):
name = 'test-create-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": None}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume_size_option(self):
name = 'test-create-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": {u"size": u"50"}}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addCallback(self._remove_volume_callback, name)
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume_provisioning_option(self):
name = 'test-create-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": {u"provisioning": u"full"}}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addCallback(self._remove_volume_callback, name)
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume_invalid_provisioning_option(self):
name = 'test-create-volume-fake'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": {u"provisioning": u"fake"}}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({
u"Err": "Invalid input received: Must specify a valid " +
"provisioning type ['thin', 'full', " +
"'dedup'], value 'fake' is invalid."}))
d.addCallback(self._remove_volume_callback, name)
d.addErrback(self.cbFailed)
return d
def test_hpe_create_volume_invalid_option(self):
name = 'test-create-volume-fake'
path = b"/VolumeDriver.Create"
body = {u"Name": name,
u"Opts": {u"fake": u"fake"}}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({
u"Err": "create volume failed, error is: fake is not a valid "
"option. Valid options are: ['size', 'provisioning', "
"'flash-cache']"}))
d.addCallback(self._remove_volume_callback, name)
d.addErrback(self.cbFailed)
return d
def _remove_volume_callback(self, body, name):
# NOTE: body arg is the result from last deferred call.
# Python complains about parameter mis-match if you don't include it
return self._remove_volume(name)
def _remove_volume(self, name):
path = b"/VolumeDriver.Remove"
body = {u"Name": name}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
return d
def test_hpe_remove_volume(self):
name = 'test-create-volume'
return self._remove_volume(name)
def _get_volume_mount_path(self, body, name):
# NOTE: body arg is the result from last deferred call.
# Python complains about parameter mis-match if you don't include it
# In this test, we need it to compare expected results with Path
# request
# Compare path returned by mount (body) with Get Path request
path = b"/VolumeDriver.Path"
newbody = {u"Name": name}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(newbody)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, body)
d.addErrback(self.cbFailed)
return d
def _mount_the_volume(self, body, name):
# NOTE: body arg is the result from last deferred call.
# Python complains about parameter mis-match if you don't include it
# Mount the previously created volume
path = b"/VolumeDriver.Mount"
newbody = {u"Name": name}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(newbody)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.getResponse)
# If we get a valid response from Path request then we assume
# the mount passed.
# TODO: Add additonal logic to verify the mountpath
d.addCallback(self._get_volume_mount_path, name)
return d
def _unmount_the_volume(self, body, name):
# NOTE: body arg is the result from last deferred call.
# Python complains about parameter mis-match if you don't include it
path = b"/VolumeDriver.Unmount"
newbody = {u"Name": name}
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(newbody)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
return d
def broken_test_hpe_mount_umount_volume(self):
name = 'test-mount-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name}
# Create a volume to be mounted
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
# Mount the previously created volume
d.addCallback(self._mount_the_volume, name)
# UMount the previously created volume
d.addCallback(self._unmount_the_volume, name)
# Remove the previously created volume
d.addCallback(self._remove_volume_callback, name)
return d
def test_hpe_get_volume(self):
name = 'test-get-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name}
# Create a volume to be mounted
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
# Get the previously created volume
expected = {u"Volume": {u"Status": {},
u"Mountpoint": '',
u"Name": name},
u"Err": ''}
d.addCallback(self._get_volume, name, expected)
# Remove the previously created volume
d.addCallback(self._remove_volume_callback, name)
return d
def test_hpe_get_non_existent_volume(self):
name = 'test-get-volume'
# Get the previously created volume
expected = {u"Err": ''}
d = self._get_volume({}, name, expected)
return d
def _get_volume(self, body, name, expected):
path = b"/VolumeDriver.Get"
body = {u"Name": name}
# Get a volume
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps(expected))
d.addErrback(self.cbFailed)
return d
def broken_test_hpe_list_volume(self):
name = 'test-list-volume'
path = b"/VolumeDriver.Create"
body = {u"Name": name}
# Create a volume to be mounted
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": ''}))
d.addErrback(self.cbFailed)
# List volumes
expected = {u"Err": '',
u"Volumes": [{u"Mountpoint": '',
u"Name": name}]}
d.addCallback(self._list_volumes, name, expected)
# Remove the previously created volume
d.addCallback(self._remove_volume_callback, name)
return d
def broken_test_hpe_list_volume_no_volumes(self):
path = b"/VolumeDriver.List"
# Create a volume to be mounted
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps({})))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps({u"Err": '',
u"Volumes": []}))
d.addErrback(self.cbFailed)
return d
def _list_volumes(self, body, name, expected):
path = b"/VolumeDriver.List"
body = {u"Name": name}
# Get a volume
headers = Headers({b"content-type": [b"application/json"]})
body_producer = FileBodyProducer(BytesIO(dumps(body)))
agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory())
d = agent.request(b'POST', b"UNIX://localhost" + path, headers,
body_producer)
d.addCallback(self.checkResponse, json.dumps(expected))
d.addErrback(self.cbFailed)
return d
| |
from django.conf.urls import patterns
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import resolve
import pytest
from actionviews.base import TemplateResponseMixin
from actionviews.decorators import require_method
@pytest.fixture(params=list(range(1)))
def TestView(request, monkeypatch):
from actionviews.base import View
class TestView(View):
def do_index(self:''):
return {'result': 'test'}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {'urlpatterns': patterns('', *TestView.urls)}))
return [TestView][request.param]
@pytest.fixture
def django_request(request_factory):
return request_factory.get('/')
@pytest.fixture
def django_request_post(request_factory):
return request_factory.post('/')
@pytest.fixture
def django_request_options(request_factory):
return request_factory.options('/')
def test_view(TestView, django_request):
class TestGetView(TestView):
def get(self, request):
return self.action()
view = TestGetView.urls[0].callback
response = view(django_request)
assert response == {'result': 'test'}
def test_decorated_action_on_view(TestView, django_request):
def test_decorator(func):
func.is_decorated = True
return func
class TestGetView(TestView):
def get(self, request):
assert self.action.is_decorated
@test_decorator
def do_index(self):
return {'result': 'test'}
view = TestGetView.urls[0].callback
view(django_request)
def test_default_template_name(TestView, django_request):
class TestGetView(TestView, TemplateResponseMixin):
def get(self, request):
assert self.get_template_names() == ['TestGetView/index.html']
view = TestGetView.urls[0].callback
view(django_request)
def test_template_view(django_request, monkeypatch):
from actionviews.base import TemplateView
class TestTemplateView(TemplateView):
def do_index(self:''):
return {'result': 'test'}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestTemplateView.urls)}))
view = TestTemplateView.urls[0].callback
response = view(django_request)
assert response.rendered_content == 'test'
def test_method_allowed(TestView, django_request_post, monkeypatch):
from actionviews.base import TemplateView
class TestPostView(TemplateView):
@require_method('post')
def do_index(self:''):
return {'result': 'test'}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestPostView.urls)}))
view = TestPostView.urls[0].callback
response = view(django_request_post)
assert response.status_code == 200
assert response.rendered_content == 'test'
def test_method_not_allowed(django_request, monkeypatch):
from actionviews.base import TemplateView
class TestPostView(TemplateView):
@require_method('post')
def do_index(self:''):
return {'result': 'test'}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestPostView.urls)}))
view = TestPostView.urls[0].callback
response = view(django_request)
assert response.status_code == 405
def test_options_method(TestView, django_request_options):
view = TestView.urls[0].callback
response = view(django_request_options)
assert response.status_code == 200
assert response['Allow'] == 'OPTIONS'
assert response['Content-Length'] == '0'
def test_child(monkeypatch, django_request):
from actionviews.base import View, TemplateView
from actionviews.decorators import child_view
class ChildView(TemplateView):
def do_index(self:''):
return {'result': 'test'}
class ParentView(View):
@child_view(ChildView)
def do_index(self:''):
pass
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *ParentView.urls)}))
view = resolve('/').func
response = view(django_request)
assert response.rendered_content == 'test'
def test_child_defaults_for_parent(monkeypatch, request_factory):
from actionviews.base import View, TemplateView
from actionviews.decorators import child_view
class ChildView(TemplateView):
def do_index(self):
return {}
class ParentView(View):
@child_view(ChildView)
def do_pindex(self, result='test'):
return {'result': result}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *ParentView.urls)}))
resolver_match = resolve('/pindex/result/test/index/')
response = resolver_match.func(
request_factory.get('/pindex/result/test/index/'),
**resolver_match.kwargs)
assert response.rendered_content == 'test'
def test_raise_response_from_action(django_request, monkeypatch):
from django.http.response import HttpResponse
from actionviews.base import TemplateView
from actionviews.exceptions import ActionResponse
class TestView(TemplateView):
def do_index(self:''):
raise ActionResponse(HttpResponse())
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestView.urls)}))
view = resolve('/').func
response = view(django_request)
assert response.status_code == 200
def test_raise_non_response_from_action(django_request, monkeypatch):
from actionviews.base import TemplateView
from actionviews.exceptions import ActionResponse
class TestView(TemplateView):
def do_index(self:''):
raise ActionResponse({})
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestView.urls)}))
view = resolve('/').func
with pytest.raises(ImproperlyConfigured):
view(django_request)
def test_return_response_from_action(django_request, monkeypatch):
from django.http.response import HttpResponse
from actionviews.base import TemplateView
class TestView(TemplateView):
def do_index(self:''):
return HttpResponse()
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestView.urls)}))
view = resolve('/').func
response = view(django_request)
assert response.status_code == 200
| |
# Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import uuid
import ddt
from oslo_utils import strutils
from oslo_utils import timeutils
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import group_specs as v3_group_specs
from cinder.api.v3 import group_types as v3_group_types
from cinder.api.v3.views import group_types as views_types
from cinder import context
from cinder import exception
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
from cinder.volume import group_types
IN_USE_GROUP_TYPE = fake.GROUP_TYPE3_ID
def stub_group_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"
}
return dict(
id=id,
name='group_type_%s' % id,
description='group_type_desc_%s' % id,
group_specs=specs,
)
def return_group_types_get_all_types(context, filters=None, marker=None,
limit=None, sort_keys=None,
sort_dirs=None, offset=None,
list_result=False):
result = dict(group_type_1=stub_group_type(1),
group_type_2=stub_group_type(2),
group_type_3=stub_group_type(3)
)
if list_result:
return list(result.values())
return result
def return_empty_group_types_get_all_types(context, filters=None, marker=None,
limit=None, sort_keys=None,
sort_dirs=None, offset=None,
list_result=False):
if list_result:
return []
return {}
def return_group_types_get_group_type(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.GroupTypeNotFound(group_type_id=id)
return stub_group_type(id)
def return_group_types_get_default():
return stub_group_type(1)
def return_group_types_get_default_not_found():
return {}
def return_group_types_with_groups_destroy(context, id):
if id == IN_USE_GROUP_TYPE:
raise exception.GroupTypeInUse(group_type_id=id)
@ddt.ddt
class GroupTypesApiTest(test.TestCase):
def _create_group_type(self, group_type_name, group_specs=None,
is_public=True, projects=None):
return group_types.create(self.ctxt, group_type_name, group_specs,
is_public, projects).get('id')
def setUp(self):
super(GroupTypesApiTest, self).setUp()
self.controller = v3_group_types.GroupTypesController()
self.specs_controller = v3_group_specs.GroupTypeSpecsController()
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID,
is_admin=True)
self.user_ctxt = context.RequestContext(user_id=fake.USER2_ID,
project_id=fake.PROJECT2_ID,
is_admin=False)
self.type_id1 = self._create_group_type('group_type1',
{'key1': 'value1'})
self.type_id2 = self._create_group_type('group_type2',
{'key2': 'value2'})
self.type_id3 = self._create_group_type('group_type3',
{'key3': 'value3'}, False,
[fake.PROJECT_ID])
self.type_id0 = group_types.get_default_cgsnapshot_type()['id']
@ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on',
'y', 'yes')
@mock.patch.object(group_types, "get_group_type_by_name")
@mock.patch.object(group_types, "create")
@mock.patch("cinder.api.openstack.wsgi.Request.cache_resource")
@mock.patch("cinder.api.views.types.ViewBuilder.show")
def test_create_group_type_with_valid_is_public_in_string(
self, is_public, mock_show, mock_cache_resource,
mock_create, mock_get):
boolean_is_public = strutils.bool_from_string(is_public)
req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
body = {"group_type": {"is_public": is_public, "name": "group_type1",
"description": None}}
self.controller.create(req, body=body)
mock_create.assert_called_once_with(
self.ctxt, 'group_type1', {},
boolean_is_public, description=None)
@mock.patch.object(group_types, "get_group_type_by_name")
@mock.patch.object(group_types, "create")
@mock.patch("cinder.api.openstack.wsgi.Request.cache_resource")
@mock.patch("cinder.api.views.types.ViewBuilder.show")
def test_create_group_type_with_group_specs_null(
self, mock_show, mock_cache_resource,
mock_create, mock_get):
req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
body = {"group_type": {"name": "group_type1",
"group_specs": None}}
self.controller.create(req, body=body)
mock_create.assert_called_once_with(
self.ctxt, 'group_type1', None, True, description=None)
@ddt.data(fake.GROUP_TYPE_ID, IN_USE_GROUP_TYPE)
def test_group_type_destroy(self, grp_type_id):
grp_type = {'id': grp_type_id, 'name': 'grp' + grp_type_id}
self.mock_object(group_types, 'get_group_type',
return_value=grp_type)
self.mock_object(group_types, 'destroy',
return_group_types_with_groups_destroy)
mock_notify_info = self.mock_object(
v3_group_types.GroupTypesController,
'_notify_group_type_info')
mock_notify_error = self.mock_object(
v3_group_types.GroupTypesController,
'_notify_group_type_error')
req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' % (
fake.PROJECT_ID, grp_type_id),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
if grp_type_id == IN_USE_GROUP_TYPE:
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete,
req, grp_type_id)
mock_notify_error.assert_called_once_with(
self.ctxt, 'group_type.delete', mock.ANY,
group_type=grp_type)
else:
self.controller.delete(req, grp_type_id)
mock_notify_info.assert_called_once_with(
self.ctxt, 'group_type.delete', grp_type)
def test_group_types_index(self):
self.mock_object(group_types, 'get_all_group_types',
return_group_types_get_all_types)
req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID,
use_admin_context=True,
version=mv.GROUP_TYPE)
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['group_types']))
expected_names = ['group_type_1', 'group_type_2', 'group_type_3']
actual_names = map(lambda e: e['name'], res_dict['group_types'])
self.assertEqual(set(expected_names), set(actual_names))
for entry in res_dict['group_types']:
self.assertEqual('value1', entry['group_specs']['key1'])
def test_group_types_index_no_data(self):
self.mock_object(group_types, 'get_all_group_types',
return_empty_group_types_get_all_types)
req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['group_types']))
def test_group_types_index_with_limit(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(1, len(res['group_types']))
self.assertEqual(self.type_id3, res['group_types'][0]['id'])
expect_next_link = ('http://localhost/v3/%s/group_types?limit=1'
'&marker=%s' %
(fake.PROJECT_ID, res['group_types'][0]['id']))
self.assertEqual(expect_next_link, res['group_type_links'][0]['href'])
def test_group_types_index_with_offset(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?offset=1' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(3, len(res['group_types']))
def test_group_types_index_with_offset_out_of_range(self):
url = '/v3/%s/group_types?offset=424366766556787' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, version=mv.GROUP_TYPE)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_group_types_index_with_limit_and_offset(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?limit=2&offset=1' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(2, len(res['group_types']))
self.assertEqual(self.type_id2, res['group_types'][0]['id'])
self.assertEqual(self.type_id1, res['group_types'][1]['id'])
def test_group_types_index_with_limit_and_marker(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1'
'&marker=%s' %
(fake.PROJECT_ID,
self.type_id2),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(1, len(res['group_types']))
self.assertEqual(self.type_id1, res['group_types'][0]['id'])
def test_group_types_index_with_valid_filter(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?is_public=True' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(4, len(res['group_types']))
self.assertEqual(self.type_id3, res['group_types'][0]['id'])
self.assertEqual(self.type_id2, res['group_types'][1]['id'])
self.assertEqual(self.type_id1, res['group_types'][2]['id'])
self.assertEqual(self.type_id0, res['group_types'][3]['id'])
def test_group_types_index_with_invalid_filter(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?id=%s' % (fake.PROJECT_ID, self.type_id1),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(4, len(res['group_types']))
def test_group_types_index_with_sort_keys(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_types?sort=id' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(4, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
self.assertEqual(expect_result[2], res['group_types'][2]['id'])
self.assertEqual(expect_result[3], res['group_types'][3]['id'])
def test_group_types_index_with_sort_and_limit(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?sort=id&limit=2' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(2, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
def test_group_types_index_with_sort_keys_and_sort_dirs(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types?sort=id:asc' % fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id0, self.type_id1, self.type_id2,
self.type_id3]
expect_result.sort()
self.assertEqual(4, len(res['group_types']))
self.assertEqual(expect_result[0], res['group_types'][0]['id'])
self.assertEqual(expect_result[1], res['group_types'][1]['id'])
self.assertEqual(expect_result[2], res['group_types'][2]['id'])
self.assertEqual(expect_result[3], res['group_types'][3]['id'])
@ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on',
'y', 'yes')
@mock.patch.object(group_types, "get_group_type")
@mock.patch.object(group_types, "update")
@mock.patch("cinder.api.openstack.wsgi.Request.cache_resource")
@mock.patch("cinder.api.views.types.ViewBuilder.show")
def test_update_group_type_with_valid_is_public_in_string(
self, is_public, mock_show, mock_cache_resource,
mock_update, mock_get):
type_id = str(uuid.uuid4())
req = fakes.HTTPRequest.blank(
'/v3/%s/types/%s' % (fake.PROJECT_ID, type_id),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
boolean_is_public = strutils.bool_from_string(is_public)
body = {"group_type": {"is_public": is_public, "name": "group_type1"}}
self.controller.update(req, type_id, body=body)
mock_update.assert_called_once_with(
self.ctxt, type_id, 'group_type1', None,
is_public=boolean_is_public)
def test_update_group_type_with_name_null(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/types/%s' % (fake.PROJECT_ID, fake.GROUP_TYPE_ID),
version=mv.GROUP_TYPE)
req.environ['cinder.context'] = self.ctxt
body = {"group_type": {"name": None}}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, fake.GROUP_TYPE_ID, body=body)
@ddt.data({"group_type": {"name": None,
"description": "description"}},
{"group_type": {"name": "test",
"is_public": True}},
{"group_type": {"description": None,
"is_public": True}})
def test_update_group_type(self, body):
req = fakes.HTTPRequest.blank(
'/v3/%s/types/%s' % (fake.PROJECT_ID, fake.GROUP_TYPE_ID),
version=mv.GROUP_TYPE)
group_type_1 = group_types.create(self.ctxt, 'group_type')
req.environ['cinder.context'] = self.ctxt
res = self.controller.update(req, group_type_1.get('id'), body=body)
expected_name = body['group_type'].get('name')
if expected_name is not None:
self.assertEqual(expected_name, res['group_type']['name'])
expected_is_public = body['group_type'].get('is_public')
if expected_is_public is not None:
self.assertEqual(expected_is_public,
res['group_type']['is_public'])
self.assertEqual(body['group_type'].get('description'),
res['group_type']['description'])
def test_group_types_show(self):
self.mock_object(group_types, 'get_group_type',
return_group_types_get_group_type)
type_id = str(uuid.uuid4())
req = fakes.HTTPRequest.blank('/v3/%s/group_types/' % fake.PROJECT_ID
+ type_id,
version=mv.GROUP_TYPE)
res_dict = self.controller.show(req, type_id)
self.assertEqual(1, len(res_dict))
self.assertEqual(type_id, res_dict['group_type']['id'])
type_name = 'group_type_' + type_id
self.assertEqual(type_name, res_dict['group_type']['name'])
def test_group_types_show_pre_microversion(self):
self.mock_object(group_types, 'get_group_type',
return_group_types_get_group_type)
type_id = uuid.uuid4()
req = fakes.HTTPRequest.blank(
'/v3/%s/group_types/%s' % (fake.PROJECT_ID, type_id),
version=mv.get_prior_version(mv.GROUP_TYPE))
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, req, type_id)
def test_group_types_show_not_found(self):
self.mock_object(group_types, 'get_group_type',
return_group_types_get_group_type)
req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=mv.GROUP_TYPE)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_get_default(self):
self.mock_object(group_types, 'get_default_group_type',
return_group_types_get_default)
req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.method = 'GET'
res_dict = self.controller.show(req, 'default')
self.assertEqual(1, len(res_dict))
self.assertEqual('group_type_1', res_dict['group_type']['name'])
self.assertEqual('group_type_desc_1',
res_dict['group_type']['description'])
def test_get_default_not_found(self):
self.mock_object(group_types, 'get_default_group_type',
return_group_types_get_default_not_found)
req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' %
fake.PROJECT_ID,
version=mv.GROUP_TYPE)
req.method = 'GET'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'default')
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_show_admin(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3", use_admin_context=True,
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
group_specs={},
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def __test_view_builder_show_qos_specs_id_policy(self):
with mock.patch.object(context.RequestContext,
'authorize',
side_effect=[False, True]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_show_group_specs_policy(self):
with mock.patch.object(context.RequestContext,
'authorize',
side_effect=[True, False]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
group_specs={},
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_show_pass_all_policy(self):
with mock.patch.object(context.RequestContext,
'authorize',
side_effect=[True, False]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.show(request, raw_group_type)
self.assertIn('group_type', output)
expected_group_type = dict(
name='new_type',
description='new_type_desc',
group_specs={},
is_public=True,
id=42,
)
self.assertDictEqual(expected_group_type, output['group_type'])
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_types = []
for i in range(0, 10):
raw_group_types.append(
dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v3",
version=mv.GROUP_TYPE)
output = view_builder.index(request, raw_group_types)
self.assertIn('group_types', output)
for i in range(0, 10):
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42 + i
)
self.assertDictEqual(expected_group_type,
output['group_types'][i])
def test_view_builder_list_admin(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_group_types = []
for i in range(0, 10):
raw_group_types.append(
dict(
name='new_type',
description='new_type_desc',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
group_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v3", use_admin_context=True,
version=mv.GROUP_TYPE)
output = view_builder.index(request, raw_group_types)
self.assertIn('group_types', output)
for i in range(0, 10):
expected_group_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
group_specs={},
id=42 + i
)
self.assertDictEqual(expected_group_type,
output['group_types'][i])
| |
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'Tejal Desai'
import sys
import unittest
from mock import patch
import fake_filesystem as pyfakefs
import twitter.common.fs
from twitter.common.fs import HDFSHelper
class MockCommandUtil:
@staticmethod
def execute(cmd, get_output=True):
if (cmd[4] == '-lsr' or cmd[4] == '-ls') and cmd[5] =='path':
return (0,"\n".join(["Found 1 items",
"drwxr-xr-x - tdesai staff 68 2012-08-06 13:51 hadoop_dir/test_dir",
"-rwxrwxrwx 1 tdesai staff 6 2012-08-06 14:01 tejal.txt",
"-rwxrwxrwx 1 tdesai staff 6 2012-08-06 14:01 tejal txt"]))
if (cmd[4] == '-lsr' or cmd[4] == '-ls') and cmd[5] =='non_existing':
return (255,"ls: File doesnot exists")
if (cmd[4] == '-lsr' or cmd[4] == '-ls') and cmd[5] =='empty':
return (0,None)
if cmd[4] == '-test':
return " ".join(cmd) == \
'hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -test -e hadoop_dir'
if cmd[4] == '-copyToLocal':
if get_output:
tmp_file = cmd[6]
if " ".join(cmd) == \
"hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -copyToLocal " + \
"somefile " + tmp_file:
with open(tmp_file, "w") as f:
f.write("read_test")
return 0
elif " ".join(cmd) == \
"hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -copyToLocal " + \
"non_exist " + tmp_file:
return 1
if cmd[4] == '-copyFromLocal':
if cmd[5] != 'text_file':
tmp_file = cmd[5]
with open(tmp_file, "r") as f:
text1 = f.read()
return (text1 == "write_text" and
" ".join(cmd) == " ".join(["hadoop", "--config", "/etc/hadoop/hadoop-conf-tst-smf1",
"dfs", "-copyFromLocal", tmp_file, "somefile"]))
#For rest all cases return the command
return " ".join(cmd)
@staticmethod
def execute_and_get_output(cmd):
return MockCommandUtil.execute(cmd, True)
@staticmethod
def execute_suppress_stdout(cmd):
return MockCommandUtil.execute(cmd, get_output=False)
class HdfsTest(unittest.TestCase):
_config_dir = "/etc/hadoop/hadoop-conf-tst-smf1"
_site_config = "%s/site.xml" % _config_dir
_original_cwd = None
def setUp(self):
fake_fs = pyfakefs.FakeFilesystem()
fake_os = pyfakefs.FakeOsModule(fake_fs)
fake_fs.CreateFile(HdfsTest._site_config, contents="this is not a real file.")
fake_fs.CreateFile("src", contents="heh. before pyfakefs this was unintentionally a dir.")
self.original_os = twitter.common.fs.hdfs.os
twitter.common.fs.hdfs.os = fake_os
def tearDown(self):
twitter.common.fs.hdfs.os = self.original_os
def test_get_config_behavior(self):
self.assertRaises(ValueError, HDFSHelper, "/this/does/not/exist",
command_class=MockCommandUtil)
self.assertRaises(ValueError, HDFSHelper, HdfsTest._site_config,
command_class=MockCommandUtil)
def test_get_config(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
self.assertEqual(hdfs_helper.config,'/etc/hadoop/hadoop-conf-tst-smf1')
def test_get(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.get(['src'],"dst")
expected_cmd = "hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -get src dst"
self.assertEqual(cmd, expected_cmd)
def test_put(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.put('src','dst')
expected_cmd = "hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -put src dst"
self.assertEqual(cmd, expected_cmd)
def test_cat(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.cat('text_file', 'local')
expected_cmd = "hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -cat " + \
"text_file"
self.assertEqual(cmd, expected_cmd)
def test_hdfs_ls(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.ls('path', True)
expected_output_dir = [['hadoop_dir/test_dir', 68]]
expected_output = [['tejal.txt', 6], ['tejal txt',6]]
self.assertEqual(cmd, expected_output_dir)
cmd = hdfs_helper.ls('path')
self.assertEqual(cmd, expected_output)
#Empty path
cmd = hdfs_helper.ls('empty', True)
self.assertTrue(not cmd)
#Return code 255
self.assertRaises(HDFSHelper.InternalError,hdfs_helper.ls,'non_existing', True )
def test_hdfs_lsr(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
expected_output = [['tejal.txt', 6], ['tejal txt',6]]
expected_output_dir = [['hadoop_dir/test_dir', 68]]
cmd = hdfs_helper.lsr('path')
self.assertEqual(cmd, expected_output)
cmd = hdfs_helper.lsr('path', True)
self.assertEqual(cmd, expected_output_dir)
def test_exists(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
self.assertEquals(0, hdfs_helper.exists('hadoop_dir'))
def test_read(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
read_text = hdfs_helper.read('somefile')
self.assertEqual("read_test", read_text)
read_text = hdfs_helper.read('non_exist')
self.assertEqual(None, read_text)
def test_write(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
self.assertEqual(True, hdfs_helper.write('somefile',"write_text"))
def test_mkdir(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.mkdir('dest')
expected_cmd = "hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -mkdir dest"
self.assertEqual(cmd, expected_cmd)
def test_rm(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.rm('dest')
expected_cmd = "hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -rm dest"
self.assertEqual(cmd, expected_cmd)
def test_cp(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.cp('src','dest')
expected_cmd = "hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -cp src dest"
self.assertEqual(cmd, expected_cmd)
def test_mv(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.mv('src', 'dest')
expected_cmd = "hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs -mv src dest"
self.assertEqual(cmd, expected_cmd)
def test_copy_from_local(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.copy_from_local('text_file','dest')
expected_cmd = "hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs " + \
"-copyFromLocal text_file dest"
self.assertEqual(cmd, expected_cmd)
def test_copy_to_local(self):
hdfs_helper = HDFSHelper("/etc/hadoop/hadoop-conf-tst-smf1",
command_class=MockCommandUtil)
cmd = hdfs_helper.copy_to_local('text_file','dest')
expected_cmd = "hadoop --config /etc/hadoop/hadoop-conf-tst-smf1 dfs " + \
"-copyToLocal text_file dest"
self.assertEqual(cmd, expected_cmd)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import time
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
def _make_coordinated_sloppy_dataset(num_elements, num_parallel_calls):
"""Produces a dataset iterator and events to control the order of elements.
Args:
num_elements: the number of input elements
num_parallel_calls: the degree of map parallelism
Returns:
A dataset iterator (represented as `get_next` op) and events that can be
used to control the order of output elements.
"""
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
coordination_events = {i: threading.Event() for i in range(num_elements)}
def map_py_fn(x):
coordination_events[x].wait()
coordination_events[x].clear()
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset_ops.Dataset.range(num_elements).map(
map_fn, num_parallel_calls).with_options(options)
return dataset, coordination_events
# TODO(jsimsa): Add tests for `map_with_legacy_function`.
@test_util.run_all_in_graph_and_eager_modes
class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(self._buildMapDataset(components, 14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage, different threads run in graph
# context.
@test_util.run_v1_only("b/120545219")
def testSkipEagerMapDatasetMultithreaded(self):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(self._buildMapDataset(components, 18))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=num_parallel_calls).prefetch(
output_buffer_size).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
def do_test(num_parallel_calls, output_buffer_size):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._buildParallelMapDataset(components, 14, num_parallel_calls,
output_buffer_size))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for num_parallel_calls_val, output_buffer_size_val in [(1, 1), (1, 2), (2,
2),
(2, 4), (8, 8),
(8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
# TODO(b/117581999): add eager coverage, different threads run in graph
# context.
@test_util.run_v1_only("b/120545219")
def testSkipEagerParallelMapDatasetMultithreaded(self):
def do_test(num_parallel_calls, output_buffer_size):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._buildParallelMapDataset(components, 18, num_parallel_calls,
output_buffer_size))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaptureIterator(self):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return dataset_ops.Dataset.range(10).map(_map_fn)
def _build_graph():
if context.executing_eagerly():
captured_iterator = iter(dataset_ops.Dataset.range(10))
else:
captured_iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10))
ds = _build_ds(captured_iterator)
return captured_iterator, ds
captured_iter, ds = _build_graph()
if not context.executing_eagerly():
self.evaluate(captured_iter.initializer)
get_next = self.getNext(ds, requires_initialization=True)
for i in range(10):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
dataset = input_sentences.map(lambda x: string_ops.string_split([x]).values
).map(table.lookup)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(table.initializer)
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@test_util.run_v1_only("b/123904513")
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
-1).map(lambda _: queue.dequeue())
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(enqueue_op)
self.evaluate(close_op)
for element in elements:
self.assertEqual(element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): Possible deadlock in eager mode, debug.
@test_util.run_v1_only("b/120545219")
def testSkipEagerCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
-1).map(lambda _: (queue.dequeue(), queue_2.dequeue()))
self.evaluate(enqueue_op)
self.evaluate(close_op)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testSeededStatefulOperatorIsProperlyStateful(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(
10).map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
get_next = self.getNext(dataset, requires_initialization=True)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(self.evaluate(get_next()))
self.assertLen(random_values, 10)
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
get_next = self.getNext(dataset, requires_initialization=True)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(self.evaluate(get_next()))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testStatefulMapKeepsStateAcrossIterators(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: random_ops.random_uniform((), seed=11)).repeat(1000).batch(10)
get_next = self.getNext(dataset)
random_values = self.evaluate(get_next())
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != self.evaluate(get_next())):
break
i += 1
self.assertLess(i, 99)
def testStatefulOperationInShortCircuit(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
def testMapDict(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x**2}).map(
lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset, expected_output=[i * 2 + i**2 for i in range(10)])
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = self.getNext(dataset_tuple)
next_namedtuple = self.getNext(dataset_namedtuple)
# make sure both datasets contain the same data
for i in range(count):
tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_namedtuple())
def testUseStepContainerInMap(self):
row = np.arange(6)
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(lambda x: x * x, elems))
self.assertDatasetProduces(dataset, expected_output=[row**2])
def testCaseAndCondInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensor_slices(
row).map(lambda x: control_map_fn(x, num))
return self.getNext(dataset)
row = np.arange(6)
for num in [2, 3, 4]:
get_next = build_dataset(row, num)
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaseInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), divide),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
# pylint: disable=g-long-lambda
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(
lambda x: control_map_fn(x, num), elems))
return self.getNext(dataset)
row = np.arange(6)
for num in [2, 3, 4]:
get_next = build_dataset(row, num)
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testCaseAndCondInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
# pylint: disable=g-long-lambda
dataset = dataset_ops.Dataset.from_tensors(
row).map(lambda elems: map_fn.map_fn(
lambda x: control_map_fn(x, num), elems))
# pylint: enable=g-long-lambda
get_next = self.getNext(dataset)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testNestedListMapDataset(self):
dataset = dataset_ops.Dataset.from_tensors(
[0, 1, 2]).repeat(10).map(lambda a: ([a[1], a[0] + a[2]], a[1]))
expected_output = [(np.array([1, 2]), 1)] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
def do_test(buffer_size):
dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(
buffer_size)
get_next = self.getNext(dataset)
# Simple test that prefetch yields the expected values in the
# expected order.
for i in range(100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for buffer_size in [1, 10, 100, 1000]:
do_test(buffer_size)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
def do_test_ev(buffer_size):
dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(
buffer_size)
get_next = self.getNext(dataset)
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, self.evaluate(get_next()))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
for buffer_size in range(1, set_event_during_invocation):
do_test_ev(buffer_size)
def testReturnList(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda x: [x, constant_op.constant(37.0)])
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
dataset = dataset_ops.Dataset.range(10).map(_map_fn)
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
dataset = dataset_ops.Dataset.range(10).map(_sparse)
self.assertDatasetProduces(
dataset, expected_output=[_sparse(i) for i in range(10)])
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).map(_check)
self.assertDatasetProduces(
dataset,
expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])
def testSparseMapShapeInference(self):
if not context.executing_eagerly():
self.skipTest("SparseTensor shape inference requires eager mode")
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=True)
dataset = dataset.map(lambda x: x)
self.assertEqual((32, 3), dataset.element_spec.shape)
def testSparseMapShapeInferencePartial(self):
if not context.executing_eagerly():
self.skipTest("SparseTensor shape inference requires eager mode")
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=False)
dataset = dataset.map(lambda x: x)
self.assertEqual([None, 3], dataset.element_spec.shape.as_list())
def testTensorArray(self):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
dataset = dataset_ops.Dataset.range(10).map(_tensor_array)
self.assertDatasetProduces(
dataset, expected_output=[list(range(i)) for i in range(10)])
def testTensorArrayChain(self):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
def _check(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return x.identity()
dataset = dataset_ops.Dataset.range(10).map(_tensor_array).map(_check)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(i)) for i in range(10)])
def testRagged(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(5).map(_ragged)
self.assertDatasetProduces(
dataset,
expected_output=[ragged_factory_ops.constant([[i]]) for i in range(5)])
def testRaggedChain(self):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
def _concat(i):
self.assertTrue(ragged_tensor.is_ragged(i))
return ragged_concat_ops.concat([i, i], 0)
dataset = dataset_ops.Dataset.range(10).map(_ragged).map(_concat)
self.assertDatasetProduces(
dataset,
expected_output=[
self.evaluate(_concat(ragged_factory_ops.constant([[i]])))
for i in range(10)
])
@test_util.run_v1_only("b/123904513")
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(105).map(
lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testConstantOutput(self):
dataset = dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10])
self.assertDatasetProduces(dataset, [(i, b"hello", 10) for i in range(10)])
def testWarnOnLookupTable(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1.]), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating resources inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
@test_util.run_v1_only("map_with_legacy_function v1 only")
def testWarnOnLookupTableLegacyFunction(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1.]), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map_with_legacy_function(
collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating resources inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def testWarnOnSeedFromOuterGraph(self):
with ops.Graph().as_default() as g:
g.seed = 10
warnings.simplefilter("always")
# map_fun doesn't use seed, so no warning is generated.
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(math_ops.square)
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertFalse(found_warning)
def random_func(x):
x = math_ops.add(x, 1)
random_ops.random_shuffle([x, math_ops.square(x)])
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(random_func)
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def random_func_seeded(x):
ops.get_default_graph().seed = None
random_ops.random_shuffle(x)
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(random_func_seeded)
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertFalse(found_warning)
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(
2).map(lambda x: random_ops.random_shuffle(x, seed=37))
found_warning = False
for warning in w:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertFalse(found_warning)
def testNestedDatasetMap(self):
# TODO(b/110122868): When iterators can yield a `tf.data.Dataset`, remove
# the `get_single_element()` call.
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]).map(
dataset_ops.Dataset.from_tensor_slices).map(
lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])
def testReturnValueError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\): None."):
_ = dataset.map(lambda x: None)
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type
# attr.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, "BrokenConst"))
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Map", lambda dataset, func:
dataset_ops.MapDataset(dataset, func, use_inter_op_parallelism=False)),
("ParallelMap", lambda dataset, func:
dataset_ops.ParallelMapDataset(dataset, func, num_parallel_calls=1,
use_inter_op_parallelism=False)),
)
def testNoInterOpParallelism(self, make_dataset_fn):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = make_dataset_fn(dataset, _map_fn)
get_next = self.getNext(dataset)
tids = self.evaluate(get_next())
self.assertTrue(all(tids[0] == tid for tid in tids))
# pylint: enable=g-long-lambda
@parameterized.named_parameters(
("SequentialIdentity", None, lambda x: x, None),
("SequentialReplicate", None, lambda x: (x, x), None),
("SequentialSwap", (None, None), lambda x, y: (y, x), None),
("SequentialProject", (None, None), lambda x, y: x, None),
("ParallelIdentity", None, lambda x: x, 10),
("ParallelReplicate", None, lambda x: (x, x), 10),
("ParallelSwap", (None, None), lambda x, y: (y, x), 10),
("ParallelProject", (None, None), lambda x, y: x, 10),
)
def testShortCircuit(self, structure, map_fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat().map(
map_fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = map_fn(*self.evaluate(self.structuredElement(structure)))
else:
expected = map_fn(self.evaluate(self.structuredElement(structure)))
self.assertEqual(expected, self.evaluate(get_next()))
@parameterized.named_parameters(
("Sequential", None),
("Parallel", 10),
)
def testShortCircuitCapturedInput(self, num_parallel_calls):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat().map(
lambda x: captured_t, num_parallel_calls=num_parallel_calls)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertEqual(42, self.evaluate(get_next()))
@parameterized.named_parameters(
("1", 1, 1),
("2", 10, 1),
("3", 10, 10),
("4", 100, 1),
("5", 100, 10),
("6", 100, 100),
)
def testSloppyInterleaveInOrder(self, num_elements, num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(num_elements):
coordination_events[i].set()
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", 10, 10),
("2", 100, 10),
("3", 100, 100),
)
def testSloppyInterleaveOutOfOrder(self, num_elements, num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
elements = [x for x in range(num_elements)]
for i in [1, 4, 7]:
elements[i], elements[i + 1] = elements[i + 1], elements[i]
for element in elements:
coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("Map", None),
("ParallelMap", 12),
)
def testPreserveCardinality(self, num_parallel_calls):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.DatasetV2.from_tensors(0).map(
lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),
num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# NOTE: collection test is specific to graph mode only, no eager coverage.
@test_util.run_v1_only("graph specific test")
def testSkipEagerCollectionCopy(self):
w = variable_scope.get_variable("w", [])
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
def func(x):
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
return x
dataset = dataset_ops.Dataset.from_tensors(constant_op.constant(1.0))
dataset.map(func)
@parameterized.named_parameters(
("Sequential", None),
("Parallel", 12),
)
@test_util.run_v1_only("graph-mode specific test")
def testSkipEagerMapCancellation(self, num_parallel_calls):
# Checks that a cancellation of is threaded through to map transformation.
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
def fn(_):
return queue.dequeue()
dataset = dataset_ops.Dataset.range(1).map(
fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset, requires_initialization=True)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
# TODO(shivaniagarwal): separate out `map` and `map_with_legacy_function` tests
# as later would not work in v2.
@test_util.run_all_in_graph_and_eager_modes
class MapWithCapturedVariableTests(test_base.DatasetTestBase,
parameterized.TestCase):
# TODO(b/126553094): map doesnt work with variable defined inside function in
# eager mode, possible Graph tensors leak out of the function building context
# from function graph in eager mode as variables are created in init_scope.
@test_util.run_v1_only("b/126553094")
def testSkipEagerCreateVariableInsideFunctionWithGetter(self):
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return counter_var.assign_add(1)
# NOTE: In the legacy function, resource is captured by value for variable
# getter.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
with self.assertRaisesWithPredicateMatch(
AttributeError, "'Tensor' object has no attribute 'assign_add'"):
dataset.map_with_legacy_function(func)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(10):
self.assertEqual(i + 1, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("MapLegacyFunction",
lambda dataset, func: dataset.map_with_legacy_function(func)),
("Map", lambda dataset, func: dataset.map(func)),
)
@test_util.run_v1_only("map_with_legacy_function is only available in v1.")
def testCaptureVariable(self, transformation_function):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = transformation_function(
dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i + 1, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
# NOTE: no need to explicitly initialize variables in eager mode.
@parameterized.named_parameters(
("MapLegacyFunction",
lambda dataset, func: dataset.map_with_legacy_function(func)),
("Map", lambda dataset, func: dataset.map(func)),
)
@test_util.run_v1_only("this test is meant to run in graph mode only.")
def testSkipEagerCaptureUninitializedVariableError(self,
transformation_function):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = transformation_function(
dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
with self.assertRaises(errors.NotFoundError):
self.evaluate(get_next())
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@parameterized.named_parameters(
("MapLegacyFunction",
lambda dataset, func: dataset.map_with_legacy_function(func)),
("Map", lambda dataset, func: dataset.map(func)),
)
@test_util.run_v1_only("b/121264236")
def testSkipEagerCaptureConstantsWithConflictingDevices(
self, transformation_function):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = constant_op.constant(3.0)
with ops.device("/device:CPU:1"):
b = constant_op.constant(5.0)
def func(_):
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = transformation_function(dataset, func)
expected_output = [8.0] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@test_util.run_v1_only("b/121264236")
def testSkipEagerRefVariablesWithMultipleDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
def func(_):
with ops.device("/device:CPU:0"):
a = variables.VariableV1(3.0)
with ops.device("/device:CPU:1"):
b = variables.VariableV1(5.0)
return math_ops.add(a, b)
# NOTE: Use the legacy function implementation as eager function will
# convert RefVariables to ResourceVariables.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = dataset.map_with_legacy_function(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [8.0] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@test_util.run_v1_only("b/121264236")
def testSkipEagerResourceVariablesWithMultipleDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
with ops.device("/device:CPU:0"):
a_var = variable_scope.get_variable(
"a", (), dtypes.int32, use_resource=True)
a_var = math_ops.add(a_var, 1)
with ops.device("/device:CPU:1"):
b_var = variable_scope.get_variable(
"b", (), dtypes.int32, use_resource=True)
return math_ops.add(a_var, b_var)
g_1 = ops.Graph()
with self.session(config=config, graph=g_1):
# The MapDataset node ends up with two ResourceVariable inputs, one on
# device CPU:0 and the other on device CPU:1.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
g_2 = ops.Graph()
with self.session(config=config, graph=g_2):
# In old-Defun variable is captured as value, hence there is no colocation
# error.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = dataset.map_with_legacy_function(func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
if __name__ == "__main__":
test.main()
| |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pathlib import Path, PurePath
from textwrap import dedent
import pytest
from pants.backend.codegen.protobuf.python import python_protobuf_module_mapper
from pants.backend.codegen.protobuf.target_types import ProtobufLibrary
from pants.backend.python.dependency_inference.module_mapper import (
FirstPartyPythonModuleMapping,
PythonModule,
PythonModuleOwners,
ThirdPartyPythonModuleMapping,
)
from pants.backend.python.dependency_inference.module_mapper import rules as module_mapper_rules
from pants.backend.python.target_types import PythonLibrary, PythonRequirementLibrary
from pants.core.util_rules import stripped_source_files
from pants.engine.addresses import Address
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.frozendict import FrozenDict
@pytest.mark.parametrize(
"stripped_path,expected",
[
(PurePath("top_level.py"), "top_level"),
(PurePath("dir", "subdir", "__init__.py"), "dir.subdir"),
(PurePath("dir", "subdir", "app.py"), "dir.subdir.app"),
(
PurePath("src", "python", "project", "not_stripped.py"),
"src.python.project.not_stripped",
),
],
)
def test_create_module_from_path(stripped_path: PurePath, expected: str) -> None:
assert PythonModule.create_from_stripped_path(stripped_path) == PythonModule(expected)
def test_first_party_modules_mapping() -> None:
root_addr = Address("", relative_file_path="root.py")
util_addr = Address("src/python/util", relative_file_path="strutil.py")
test_addr = Address("tests/python/project_test", relative_file_path="test.py")
mapping = FirstPartyPythonModuleMapping(
mapping=FrozenDict(
{"root": (root_addr,), "util.strutil": (util_addr,), "project_test.test": (test_addr,)}
),
ambiguous_modules=FrozenDict(
{"ambiguous": (root_addr, util_addr), "util.ambiguous": (util_addr, test_addr)}
),
)
assert mapping.addresses_for_module("root") == ((root_addr,), ())
assert mapping.addresses_for_module("root.func") == ((root_addr,), ())
assert mapping.addresses_for_module("root.submodule.func") == ((), ())
assert mapping.addresses_for_module("util.strutil") == ((util_addr,), ())
assert mapping.addresses_for_module("util.strutil.ensure_text") == ((util_addr,), ())
assert mapping.addresses_for_module("util") == ((), ())
assert mapping.addresses_for_module("project_test.test") == ((test_addr,), ())
assert mapping.addresses_for_module("project_test.test.TestDemo") == ((test_addr,), ())
assert mapping.addresses_for_module("project_test.test.TestDemo.method") == ((), ())
assert mapping.addresses_for_module("project_test") == ((), ())
assert mapping.addresses_for_module("project.test") == ((), ())
assert mapping.addresses_for_module("ambiguous") == ((), (root_addr, util_addr))
assert mapping.addresses_for_module("ambiguous.func") == ((), (root_addr, util_addr))
assert mapping.addresses_for_module("ambiguous.submodule.func") == ((), ())
assert mapping.addresses_for_module("util.ambiguous") == ((), (util_addr, test_addr))
assert mapping.addresses_for_module("util.ambiguous.Foo") == ((), (util_addr, test_addr))
assert mapping.addresses_for_module("util.ambiguous.Foo.method") == ((), ())
def test_third_party_modules_mapping() -> None:
colors_addr = Address("", target_name="ansicolors")
pants_addr = Address("", target_name="pantsbuild")
submodule_addr = Address("", target_name="submodule")
mapping = ThirdPartyPythonModuleMapping(
mapping=FrozenDict(
{"colors": colors_addr, "pants": pants_addr, "req.submodule": submodule_addr}
),
ambiguous_modules=FrozenDict({"ambiguous": (colors_addr, pants_addr)}),
)
assert mapping.address_for_module("colors") == (colors_addr, ())
assert mapping.address_for_module("colors.red") == (colors_addr, ())
assert mapping.address_for_module("pants") == (pants_addr, ())
assert mapping.address_for_module("pants.task") == (pants_addr, ())
assert mapping.address_for_module("pants.task.task") == (pants_addr, ())
assert mapping.address_for_module("pants.task.task.Task") == (pants_addr, ())
assert mapping.address_for_module("req.submodule") == (submodule_addr, ())
assert mapping.address_for_module("req.submodule.foo") == (submodule_addr, ())
assert mapping.address_for_module("req.another") == (None, ())
assert mapping.address_for_module("req") == (None, ())
assert mapping.address_for_module("unknown") == (None, ())
assert mapping.address_for_module("unknown.pants") == (None, ())
assert mapping.address_for_module("ambiguous") == (None, (colors_addr, pants_addr))
assert mapping.address_for_module("ambiguous.foo") == (None, (colors_addr, pants_addr))
assert mapping.address_for_module("ambiguous.foo.bar") == (None, (colors_addr, pants_addr))
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*stripped_source_files.rules(),
*module_mapper_rules(),
*python_protobuf_module_mapper.rules(),
QueryRule(FirstPartyPythonModuleMapping, []),
QueryRule(ThirdPartyPythonModuleMapping, []),
QueryRule(PythonModuleOwners, [PythonModule]),
],
target_types=[PythonLibrary, PythonRequirementLibrary, ProtobufLibrary],
)
def test_map_first_party_modules_to_addresses(rule_runner: RuleRunner) -> None:
rule_runner.set_options(
["--source-root-patterns=['src/python', 'tests/python', 'build-support']"]
)
# Two modules belonging to the same target. We should generate subtargets for each file.
rule_runner.create_files("src/python/project/util", ["dirutil.py", "tarutil.py"])
rule_runner.add_to_build_file("src/python/project/util", "python_library()")
# A module with two owners, meaning that neither should be resolved.
rule_runner.create_file("src/python/two_owners.py")
rule_runner.add_to_build_file("src/python", "python_library()")
rule_runner.create_file("build-support/two_owners.py")
rule_runner.add_to_build_file("build-support", "python_library()")
# A package module. Because there's only one source file belonging to the target, we should
# not generate subtargets.
rule_runner.create_file("tests/python/project_test/demo_test/__init__.py")
rule_runner.add_to_build_file("tests/python/project_test/demo_test", "python_library()")
# A module with both an implementation and a type stub. Even though the module is the same, we
# special-case it to be legal for both file targets to be inferred.
rule_runner.create_files("src/python/stubs", ["stub.py", "stub.pyi"])
rule_runner.add_to_build_file("src/python/stubs", "python_library()")
# Check that plugin mappings work. Note that we duplicate one of the files with a normal
# python_library(), which means neither the Protobuf nor Python targets should be used.
rule_runner.create_files("src/python/protos", ["f1.proto", "f2.proto", "f2_pb2.py"])
rule_runner.add_to_build_file(
"src/python/protos",
dedent(
"""\
protobuf_library(name='protos')
python_library(name='py')
"""
),
)
# If a module is ambiguous within a particular implementation, which means that it's not used
# in that implementation's final mapping, it should still trigger ambiguity with another
# implementation. Here, we have ambiguity with the Protobuf targets, but the Python file has
# no ambiguity with other Python files; the Protobuf ambiguity needs to result in Python
# being ambiguous.
rule_runner.create_files("src/python/protos_ambiguous", ["f.proto", "f_pb2.py"])
rule_runner.add_to_build_file(
"src/python/protos_ambiguous",
dedent(
"""\
protobuf_library(name='protos1')
protobuf_library(name='protos2')
python_library(name='py')
"""
),
)
result = rule_runner.request(FirstPartyPythonModuleMapping, [])
assert result == FirstPartyPythonModuleMapping(
mapping=FrozenDict(
{
"project.util.dirutil": (
Address("src/python/project/util", relative_file_path="dirutil.py"),
),
"project.util.tarutil": (
Address("src/python/project/util", relative_file_path="tarutil.py"),
),
"project_test.demo_test": (
Address(
"tests/python/project_test/demo_test", relative_file_path="__init__.py"
),
),
"protos.f1_pb2": (
Address(
"src/python/protos", relative_file_path="f1.proto", target_name="protos"
),
),
"stubs.stub": (
Address("src/python/stubs", relative_file_path="stub.py"),
Address("src/python/stubs", relative_file_path="stub.pyi"),
),
}
),
ambiguous_modules=FrozenDict(
{
"protos.f2_pb2": (
Address(
"src/python/protos", relative_file_path="f2.proto", target_name="protos"
),
Address("src/python/protos", relative_file_path="f2_pb2.py", target_name="py"),
),
"protos_ambiguous.f_pb2": (
Address(
"src/python/protos_ambiguous",
relative_file_path="f.proto",
target_name="protos1",
),
Address(
"src/python/protos_ambiguous",
relative_file_path="f.proto",
target_name="protos2",
),
Address(
"src/python/protos_ambiguous",
relative_file_path="f_pb2.py",
target_name="py",
),
),
"two_owners": (
Address("build-support", relative_file_path="two_owners.py"),
Address("src/python", relative_file_path="two_owners.py"),
),
}
),
)
def test_map_third_party_modules_to_addresses(rule_runner: RuleRunner) -> None:
rule_runner.add_to_build_file(
"3rdparty/python",
dedent(
"""\
python_requirement_library(
name='ansicolors',
requirements=['ansicolors==1.21'],
module_mapping={'ansicolors': ['colors']},
)
python_requirement_library(
name='req1',
requirements=['req1', 'two_owners'],
)
python_requirement_library(
name='un_normalized',
requirements=['Un-Normalized-Project>3', 'two_owners'],
)
python_requirement_library(
name='direct_references',
requirements=[
'pip@ git+https://github.com/pypa/pip.git', 'local_dist@ file:///path/to/dist.whl',
],
)
"""
),
)
result = rule_runner.request(ThirdPartyPythonModuleMapping, [])
assert result == ThirdPartyPythonModuleMapping(
mapping=FrozenDict(
{
"colors": Address("3rdparty/python", target_name="ansicolors"),
"local_dist": Address("3rdparty/python", target_name="direct_references"),
"pip": Address("3rdparty/python", target_name="direct_references"),
"req1": Address("3rdparty/python", target_name="req1"),
"un_normalized_project": Address("3rdparty/python", target_name="un_normalized"),
}
),
ambiguous_modules=FrozenDict(
{
"two_owners": (
Address("3rdparty/python", target_name="req1"),
Address("3rdparty/python", target_name="un_normalized"),
),
}
),
)
def test_map_module_to_address(rule_runner: RuleRunner) -> None:
rule_runner.set_options(["--source-root-patterns=['source_root1', 'source_root2', '/']"])
def assert_owners(
module: str, *, expected: list[Address], expected_ambiguous: list[Address] | None = None
) -> None:
owners = rule_runner.request(PythonModuleOwners, [PythonModule(module)])
assert list(owners.unambiguous) == expected
assert list(owners.ambiguous) == (expected_ambiguous or [])
# First check that we can map 3rd-party modules without ambiguity.
rule_runner.add_to_build_file(
"3rdparty/python",
dedent(
"""\
python_requirement_library(
name='ansicolors',
requirements=['ansicolors==1.21'],
module_mapping={'ansicolors': ['colors']},
)
"""
),
)
assert_owners("colors.red", expected=[Address("3rdparty/python", target_name="ansicolors")])
# Now test that we can handle first-party type stubs that go along with that third party
# requirement. Note that `colors.pyi` is at the top-level of the source root so that it strips
# to the module `colors`.
rule_runner.create_file("source_root1/colors.pyi")
rule_runner.add_to_build_file("source_root1", "python_library()")
assert_owners(
"colors.red",
expected=[
Address("3rdparty/python", target_name="ansicolors"),
Address("source_root1", relative_file_path="colors.pyi"),
],
)
# But don't allow a first-party implementation with the same module name.
Path(rule_runner.build_root, "source_root1/colors.pyi").unlink()
rule_runner.create_file("source_root1/colors.py")
assert_owners(
"colors.red",
expected=[],
expected_ambiguous=[
Address("3rdparty/python", target_name="ansicolors"),
Address("source_root1", relative_file_path="colors.py"),
],
)
# Check a first party module using a module path.
rule_runner.create_file("source_root1/project/app.py")
rule_runner.create_file("source_root1/project/file2.py")
rule_runner.add_to_build_file("source_root1/project", "python_library()")
assert_owners(
"project.app", expected=[Address("source_root1/project", relative_file_path="app.py")]
)
# Now check with a type stub.
rule_runner.create_file("source_root1/project/app.pyi")
assert_owners(
"project.app",
expected=[
Address("source_root1/project", relative_file_path="app.py"),
Address("source_root1/project", relative_file_path="app.pyi"),
],
)
# Check a package path
rule_runner.create_file("source_root2/project/subdir/__init__.py")
rule_runner.add_to_build_file("source_root2/project/subdir", "python_library()")
assert_owners(
"project.subdir",
expected=[Address("source_root2/project/subdir", relative_file_path="__init__.py")],
)
# Test a module with no owner (stdlib). This also smoke tests that we can handle when
# there is no parent module.
assert_owners("typing", expected=[])
# Test a module with a single owner with a top-level source root of ".". Also confirm we
# can handle when the module includes a symbol (like a class name) at the end.
rule_runner.create_file("script.py")
rule_runner.add_to_build_file("", "python_library(name='script')")
assert_owners(
"script.Demo", expected=[Address("", relative_file_path="script.py", target_name="script")]
)
# Ambiguous modules should be recorded.
rule_runner.create_files("source_root1/ambiguous", ["f1.py", "f2.py", "f3.py"])
rule_runner.add_to_build_file(
"source_root1/ambiguous",
dedent(
"""\
# Ambiguity purely within third-party deps.
python_requirement_library(name='thirdparty1', requirements=['foo'])
python_requirement_library(name='thirdparty2', requirements=['foo'])
# Ambiguity purely within first-party deps.
python_library(name="firstparty1", sources=["f1.py"])
python_library(name="firstparty2", sources=["f1.py"])
# Ambiguity within third-party, which should result in ambiguity for first-party too.
# These all share the module `ambiguous.f2`.
python_requirement_library(
name='thirdparty3', requirements=['bar'], module_mapping={'bar': ['ambiguous.f2']}
)
python_requirement_library(
name='thirdparty4', requirements=['bar'], module_mapping={'bar': ['ambiguous.f2']}
)
python_library(name="firstparty3", sources=["f2.py"])
# Ambiguity within first-party, which should result in ambiguity for third-party too.
# These all share the module `ambiguous.f3`.
python_library(name="firstparty4", sources=["f3.py"])
python_library(name="firstparty5", sources=["f3.py"])
python_requirement_library(
name='thirdparty5', requirements=['baz'], module_mapping={'baz': ['ambiguous.f3']}
)
"""
),
)
assert_owners(
"foo",
expected=[],
expected_ambiguous=[
Address("source_root1/ambiguous", target_name="thirdparty1"),
Address("source_root1/ambiguous", target_name="thirdparty2"),
],
)
assert_owners(
"ambiguous.f1",
expected=[],
expected_ambiguous=[
Address(
"source_root1/ambiguous", relative_file_path="f1.py", target_name="firstparty1"
),
Address(
"source_root1/ambiguous", relative_file_path="f1.py", target_name="firstparty2"
),
],
)
assert_owners(
"ambiguous.f2",
expected=[],
expected_ambiguous=[
Address("source_root1/ambiguous", target_name="thirdparty3"),
Address("source_root1/ambiguous", target_name="thirdparty4"),
Address(
"source_root1/ambiguous", relative_file_path="f2.py", target_name="firstparty3"
),
],
)
assert_owners(
"ambiguous.f3",
expected=[],
expected_ambiguous=[
Address("source_root1/ambiguous", target_name="thirdparty5"),
Address(
"source_root1/ambiguous", relative_file_path="f3.py", target_name="firstparty4"
),
Address(
"source_root1/ambiguous", relative_file_path="f3.py", target_name="firstparty5"
),
],
)
| |
import math as python_lib_Math
import math as Math
import builtins as python_lib_Builtins
import inspect as python_lib_Inspect
class Enum:
_hx_class_name = "Enum"
_hx_fields = ["tag", "index", "params"]
_hx_methods = ["__str__"]
def __init__(self,tag,index,params):
self.tag = None
self.index = None
self.params = None
self.tag = tag
self.index = index
self.params = params
def __str__(self):
if (self.params is None):
return self.tag
else:
return (((HxOverrides.stringOrNull(self.tag) + "(") + HxOverrides.stringOrNull(",".join([python_Boot.toString1(x1,'') for x1 in self.params]))) + ")")
class Script:
_hx_class_name = "Script"
_hx_statics = ["main", "getWeatherWithoutHeader", "getWeatherData", "findInArray"]
@staticmethod
def main():
weather = Script.getWeatherWithoutHeader()
print(str((weather[0] if 0 < len(weather) else None)))
print(str(python_internal_ArrayImpl._get(weather, (len(weather) - 1))))
@staticmethod
def getWeatherWithoutHeader():
weather_data = Script.getWeatherData()
weather = list()
_g = 0
while (_g < len(weather_data)):
entry = (weather_data[_g] if _g >= 0 and _g < len(weather_data) else None)
_g = (_g + 1)
weather.append((entry[1] if 1 < len(entry) else None))
return weather[1:len(weather)]
@staticmethod
def getWeatherData():
data = sys_io_File.getContent("la_weather.csv")
data = StringTools.replace(data,"\r","")
rows = data.split("\n")
weather_data = []
_g = 0
while (_g < len(rows)):
row = (rows[_g] if _g >= 0 and _g < len(rows) else None)
_g = (_g + 1)
comma_list = row.split(",")
weather_data.append(comma_list)
return weather_data
@staticmethod
def findInArray(value,array):
_g = 0
while (_g < len(array)):
entry = (array[_g] if _g >= 0 and _g < len(array) else None)
_g = (_g + 1)
if HxOverrides.eq(value,entry):
return True
break
return False
class StringTools:
_hx_class_name = "StringTools"
_hx_statics = ["replace"]
@staticmethod
def replace(s,sub,by):
_this = None
if (sub == ""):
_this = list(s)
else:
_this = s.split(sub)
return by.join([python_Boot.toString1(x1,'') for x1 in _this])
class haxe_io_Eof:
_hx_class_name = "haxe.io.Eof"
_hx_methods = ["toString"]
def toString(self):
return "Eof"
class python_Boot:
_hx_class_name = "python.Boot"
_hx_statics = ["keywords", "toString1", "fields", "simpleField", "getInstanceFields", "getSuperClass", "getClassFields", "prefixLength", "unhandleKeywords"]
@staticmethod
def toString1(o,s):
if (o is None):
return "null"
if isinstance(o,str):
return o
if (s is None):
s = ""
if (len(s) >= 5):
return "<...>"
if isinstance(o,bool):
if o:
return "true"
else:
return "false"
if isinstance(o,int):
return str(o)
if isinstance(o,float):
try:
if (o == int(o)):
def _hx_local_1():
def _hx_local_0():
v = o
return Math.floor((v + 0.5))
return str(_hx_local_0())
return _hx_local_1()
else:
return str(o)
except Exception as _hx_e:
_hx_e1 = _hx_e
e = _hx_e1
return str(o)
if isinstance(o,list):
o1 = o
l = len(o1)
st = "["
s = (("null" if s is None else s) + "\t")
_g = 0
while (_g < l):
i = _g
_g = (_g + 1)
prefix = ""
if (i > 0):
prefix = ","
st = (("null" if st is None else st) + HxOverrides.stringOrNull(((("null" if prefix is None else prefix) + HxOverrides.stringOrNull(python_Boot.toString1((o1[i] if i >= 0 and i < len(o1) else None),s))))))
st = (("null" if st is None else st) + "]")
return st
try:
if hasattr(o,"toString"):
return o.toString()
except Exception as _hx_e:
_hx_e1 = _hx_e
pass
if (python_lib_Inspect.isfunction(o) or python_lib_Inspect.ismethod(o)):
return "<function>"
if hasattr(o,"__class__"):
if isinstance(o,_hx_AnonObject):
toStr = None
try:
fields = python_Boot.fields(o)
fieldsStr = None
_g1 = []
_g11 = 0
while (_g11 < len(fields)):
f = (fields[_g11] if _g11 >= 0 and _g11 < len(fields) else None)
_g11 = (_g11 + 1)
x = ((("" + ("null" if f is None else f)) + " : ") + HxOverrides.stringOrNull(python_Boot.toString1(python_Boot.simpleField(o,f),(("null" if s is None else s) + "\t"))))
_g1.append(x)
fieldsStr = _g1
toStr = (("{ " + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr]))) + " }")
except Exception as _hx_e:
_hx_e1 = _hx_e
e2 = _hx_e1
return "{ ... }"
if (toStr is None):
return "{ ... }"
else:
return toStr
if isinstance(o,Enum):
o2 = o
l1 = len(o2.params)
hasParams = (l1 > 0)
if hasParams:
paramsStr = ""
_g2 = 0
while (_g2 < l1):
i1 = _g2
_g2 = (_g2 + 1)
prefix1 = ""
if (i1 > 0):
prefix1 = ","
paramsStr = (("null" if paramsStr is None else paramsStr) + HxOverrides.stringOrNull(((("null" if prefix1 is None else prefix1) + HxOverrides.stringOrNull(python_Boot.toString1((o2.params[i1] if i1 >= 0 and i1 < len(o2.params) else None),s))))))
return (((HxOverrides.stringOrNull(o2.tag) + "(") + ("null" if paramsStr is None else paramsStr)) + ")")
else:
return o2.tag
if hasattr(o,"_hx_class_name"):
if (o.__class__.__name__ != "type"):
fields1 = python_Boot.getInstanceFields(o)
fieldsStr1 = None
_g3 = []
_g12 = 0
while (_g12 < len(fields1)):
f1 = (fields1[_g12] if _g12 >= 0 and _g12 < len(fields1) else None)
_g12 = (_g12 + 1)
x1 = ((("" + ("null" if f1 is None else f1)) + " : ") + HxOverrides.stringOrNull(python_Boot.toString1(python_Boot.simpleField(o,f1),(("null" if s is None else s) + "\t"))))
_g3.append(x1)
fieldsStr1 = _g3
toStr1 = (((HxOverrides.stringOrNull(o._hx_class_name) + "( ") + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr1]))) + " )")
return toStr1
else:
fields2 = python_Boot.getClassFields(o)
fieldsStr2 = None
_g4 = []
_g13 = 0
while (_g13 < len(fields2)):
f2 = (fields2[_g13] if _g13 >= 0 and _g13 < len(fields2) else None)
_g13 = (_g13 + 1)
x2 = ((("" + ("null" if f2 is None else f2)) + " : ") + HxOverrides.stringOrNull(python_Boot.toString1(python_Boot.simpleField(o,f2),(("null" if s is None else s) + "\t"))))
_g4.append(x2)
fieldsStr2 = _g4
toStr2 = (((("#" + HxOverrides.stringOrNull(o._hx_class_name)) + "( ") + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr2]))) + " )")
return toStr2
if (o == str):
return "#String"
if (o == list):
return "#Array"
if callable(o):
return "function"
try:
if hasattr(o,"__repr__"):
return o.__repr__()
except Exception as _hx_e:
_hx_e1 = _hx_e
pass
if hasattr(o,"__str__"):
return o.__str__([])
if hasattr(o,"__name__"):
return o.__name__
return "???"
else:
return str(o)
@staticmethod
def fields(o):
a = []
if (o is not None):
if hasattr(o,"_hx_fields"):
fields = o._hx_fields
return list(fields)
if isinstance(o,_hx_AnonObject):
d = o.__dict__
keys = d.keys()
handler = python_Boot.unhandleKeywords
for k in keys:
a.append(handler(k))
elif hasattr(o,"__dict__"):
a1 = []
d1 = o.__dict__
keys1 = d1.keys()
for k in keys1:
a.append(k)
return a
@staticmethod
def simpleField(o,field):
if (field is None):
return None
field1 = None
if field in python_Boot.keywords:
field1 = ("_hx_" + field)
elif ((((len(field) > 2) and ((ord(field[0]) == 95))) and ((ord(field[1]) == 95))) and ((ord(field[(len(field) - 1)]) != 95))):
field1 = ("_hx_" + field)
else:
field1 = field
if hasattr(o,field1):
return getattr(o,field1)
else:
return None
@staticmethod
def getInstanceFields(c):
f = None
if hasattr(c,"_hx_fields"):
f = c._hx_fields
else:
f = []
if hasattr(c,"_hx_methods"):
a = c._hx_methods
f = (f + a)
sc = python_Boot.getSuperClass(c)
if (sc is None):
return f
else:
scArr = python_Boot.getInstanceFields(sc)
scMap = set(scArr)
res = []
_g = 0
while (_g < len(f)):
f1 = (f[_g] if _g >= 0 and _g < len(f) else None)
_g = (_g + 1)
if (not f1 in scMap):
scArr.append(f1)
return scArr
@staticmethod
def getSuperClass(c):
if (c is None):
return None
try:
if hasattr(c,"_hx_super"):
return c._hx_super
return None
except Exception as _hx_e:
_hx_e1 = _hx_e
pass
return None
@staticmethod
def getClassFields(c):
if hasattr(c,"_hx_statics"):
x = c._hx_statics
return list(x)
else:
return []
@staticmethod
def unhandleKeywords(name):
if (HxString.substr(name,0,python_Boot.prefixLength) == "_hx_"):
real = HxString.substr(name,python_Boot.prefixLength,None)
if real in python_Boot.keywords:
return real
return name
class _hx_AnonObject:
_hx_class_name = "_hx_AnonObject"
class python_internal_ArrayImpl:
_hx_class_name = "python.internal.ArrayImpl"
_hx_statics = ["_get"]
@staticmethod
def _get(x,idx):
if ((idx > -1) and ((idx < len(x)))):
return x[idx]
else:
return None
class HxOverrides:
_hx_class_name = "HxOverrides"
_hx_statics = ["eq", "stringOrNull"]
@staticmethod
def eq(a,b):
if (isinstance(a,list) or isinstance(b,list)):
return a is b
return (a == b)
@staticmethod
def stringOrNull(s):
if (s is None):
return "null"
else:
return s
class HxString:
_hx_class_name = "HxString"
_hx_statics = ["substr"]
@staticmethod
def substr(s,startIndex,_hx_len = None):
if (_hx_len is None):
return s[startIndex:]
else:
if (_hx_len == 0):
return ""
return s[startIndex:(startIndex + _hx_len)]
class sys_io_File:
_hx_class_name = "sys.io.File"
_hx_statics = ["getContent"]
@staticmethod
def getContent(path):
f = python_lib_Builtins.open(path,"r",-1,"utf-8",None,"")
content = f.read(-1)
f.close()
return content
Math.NEGATIVE_INFINITY = float("-inf")
Math.POSITIVE_INFINITY = float("inf")
Math.NaN = float("nan")
Math.PI = python_lib_Math.pi
python_Boot.keywords = set(["and", "del", "from", "not", "with", "as", "elif", "global", "or", "yield", "assert", "else", "if", "pass", "None", "break", "except", "import", "raise", "True", "class", "exec", "in", "return", "False", "continue", "finally", "is", "try", "def", "for", "lambda", "while"])
python_Boot.prefixLength = len("_hx_")
Script.main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from abc import abstractmethod, ABCMeta
from pyspark import since, keyword_only
from pyspark.ml.wrapper import JavaParams
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasLabelCol, HasPredictionCol, HasRawPredictionCol, \
HasFeaturesCol
from pyspark.ml.common import inherit_doc
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
__all__ = ['Evaluator', 'BinaryClassificationEvaluator', 'RegressionEvaluator',
'MulticlassClassificationEvaluator', 'ClusteringEvaluator']
@inherit_doc
class Evaluator(Params):
"""
Base class for evaluators that compute metrics from predictions.
.. versionadded:: 1.4.0
"""
__metaclass__ = ABCMeta
@abstractmethod
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and
predictions
:return: metric
"""
raise NotImplementedError()
@since("1.4.0")
def evaluate(self, dataset, params=None):
"""
Evaluates the output with optional parameters.
:param dataset: a dataset that contains labels/observations and
predictions
:param params: an optional param map that overrides embedded
params
:return: metric
"""
if params is None:
params = dict()
if isinstance(params, dict):
if params:
return self.copy(params)._evaluate(dataset)
else:
return self._evaluate(dataset)
else:
raise ValueError("Params must be a param map but got %s." % type(params))
@since("1.5.0")
def isLargerBetter(self):
"""
Indicates whether the metric returned by :py:meth:`evaluate` should be maximized
(True, default) or minimized (False).
A given evaluator may support multiple metrics which may be maximized or minimized.
"""
return True
@inherit_doc
class JavaEvaluator(JavaParams, Evaluator):
"""
Base class for :py:class:`Evaluator`s that wrap Java/Scala
implementations.
"""
__metaclass__ = ABCMeta
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and predictions.
:return: evaluation metric
"""
self._transfer_params_to_java()
return self._java_obj.evaluate(dataset._jdf)
def isLargerBetter(self):
self._transfer_params_to_java()
return self._java_obj.isLargerBetter()
@inherit_doc
class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPredictionCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for binary classification, which expects two input columns: rawPrediction and label.
The rawPrediction column can be of type double (binary 0/1 prediction, or probability of label
1) or of type vector (length-2 vector of raw predictions, scores, or label probabilities).
>>> from pyspark.ml.linalg import Vectors
>>> scoreAndLabels = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]),
... [(0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
>>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw")
>>> evaluator.evaluate(dataset)
0.70...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
0.83...
>>> bce_path = temp_path + "/bce"
>>> evaluator.save(bce_path)
>>> evaluator2 = BinaryClassificationEvaluator.load(bce_path)
>>> str(evaluator2.getRawPredictionCol())
'raw'
.. versionadded:: 1.4.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (areaUnderROC|areaUnderPR)",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC"):
"""
__init__(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC")
"""
super(BinaryClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.BinaryClassificationEvaluator", self.uid)
self._setDefault(metricName="areaUnderROC")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.4.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.4.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("1.4.0")
def setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC"):
"""
setParams(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC")
Sets params for binary classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class RegressionEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Regression, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [(-28.98343821, -27.0), (20.21491975, 21.5),
... (-25.98418959, -22.0), (30.69731842, 33.0), (74.69283752, 71.0)]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = RegressionEvaluator(predictionCol="raw")
>>> evaluator.evaluate(dataset)
2.842...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "r2"})
0.993...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "mae"})
2.649...
>>> re_path = temp_path + "/re"
>>> evaluator.save(re_path)
>>> evaluator2 = RegressionEvaluator.load(re_path)
>>> str(evaluator2.getPredictionCol())
'raw'
.. versionadded:: 1.4.0
"""
metricName = Param(Params._dummy(), "metricName",
"""metric name in evaluation - one of:
rmse - root mean squared error (default)
mse - mean squared error
r2 - r^2 metric
mae - mean absolute error.""",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="rmse"):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="rmse")
"""
super(RegressionEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.RegressionEvaluator", self.uid)
self._setDefault(metricName="rmse")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.4.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.4.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("1.4.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="rmse"):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="rmse")
Sets params for regression evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class MulticlassClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Multiclass Classification, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [(0.0, 0.0), (0.0, 1.0), (0.0, 0.0),
... (1.0, 0.0), (1.0, 1.0), (1.0, 1.0), (1.0, 1.0), (2.0, 2.0), (2.0, 0.0)]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["prediction", "label"])
...
>>> evaluator = MulticlassClassificationEvaluator(predictionCol="prediction")
>>> evaluator.evaluate(dataset)
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"})
0.66...
>>> mce_path = temp_path + "/mce"
>>> evaluator.save(mce_path)
>>> evaluator2 = MulticlassClassificationEvaluator.load(mce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
.. versionadded:: 1.5.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation "
"(f1|weightedPrecision|weightedRecall|accuracy)",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="f1"):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="f1")
"""
super(MulticlassClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator", self.uid)
self._setDefault(metricName="f1")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.5.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.5.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("1.5.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="f1"):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="f1")
Sets params for multiclass classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class ClusteringEvaluator(JavaEvaluator, HasPredictionCol, HasFeaturesCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Clustering results, which expects two input
columns: prediction and features. The metric computes the Silhouette
measure using the squared Euclidean distance.
The Silhouette is a measure for the validation of the consistency
within clusters. It ranges between 1 and -1, where a value close to
1 means that the points in a cluster are close to the other points
in the same cluster and far from the points of the other clusters.
>>> from pyspark.ml.linalg import Vectors
>>> featureAndPredictions = map(lambda x: (Vectors.dense(x[0]), x[1]),
... [([0.0, 0.5], 0.0), ([0.5, 0.0], 0.0), ([10.0, 11.0], 1.0),
... ([10.5, 11.5], 1.0), ([1.0, 1.0], 0.0), ([8.0, 6.0], 1.0)])
>>> dataset = spark.createDataFrame(featureAndPredictions, ["features", "prediction"])
...
>>> evaluator = ClusteringEvaluator(predictionCol="prediction")
>>> evaluator.evaluate(dataset)
0.9079...
>>> ce_path = temp_path + "/ce"
>>> evaluator.save(ce_path)
>>> evaluator2 = ClusteringEvaluator.load(ce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
.. versionadded:: 2.3.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (silhouette)",
typeConverter=TypeConverters.toString)
distanceMeasure = Param(Params._dummy(), "distanceMeasure", "The distance measure. " +
"Supported options: 'squaredEuclidean' and 'cosine'.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, predictionCol="prediction", featuresCol="features",
metricName="silhouette", distanceMeasure="squaredEuclidean"):
"""
__init__(self, predictionCol="prediction", featuresCol="features", \
metricName="silhouette", distanceMeasure="squaredEuclidean")
"""
super(ClusteringEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.ClusteringEvaluator", self.uid)
self._setDefault(metricName="silhouette", distanceMeasure="squaredEuclidean")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("2.3.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("2.3.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("2.3.0")
def setParams(self, predictionCol="prediction", featuresCol="features",
metricName="silhouette", distanceMeasure="squaredEuclidean"):
"""
setParams(self, predictionCol="prediction", featuresCol="features", \
metricName="silhouette", distanceMeasure="squaredEuclidean")
Sets params for clustering evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
if __name__ == "__main__":
import doctest
import tempfile
import pyspark.ml.evaluation
from pyspark.sql import SparkSession
globs = pyspark.ml.evaluation.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.evaluation tests")\
.getOrCreate()
globs['spark'] = spark
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| |
import ast
import errno
import os.path
import re
import stat
from fridge.cas import ContentAddressableStorage
import fridge.fs
from fridge.time import utc2timestamp, timestamp2utc, utc_time
class DataObject(object):
__slots__ = []
def __init__(self, *args, **kwargs):
if len(args) > len(self.__slots__):
raise TypeError("Takes {} arguments, but got {}.".format(
len(self.__slots__), len(args)))
for i, name in enumerate(self.__slots__):
if i < len(args):
if name in kwargs:
raise TypeError("Multiple arguments for {}.".format(name))
setattr(self, name, args[i])
else:
if name not in kwargs:
raise TypeError("Argument {} missing.".format(name))
setattr(self, name, kwargs.pop(name))
if len(kwargs) > 0:
raise TypeError("Unknown keyword argument {}.", kwargs.keys()[0])
def __eq__(self, other):
if hasattr(other, '__slots__') and self.__slots__ != other.__slots__:
return False
for name in self.__slots__:
try:
if getattr(self, name) != getattr(other, name):
return False
except AttributeError:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '{cls}({args})'.format(
cls=self.__class__.__name__,
args=', '.join('{name}={value!r}'.format(
name=n, value=getattr(self, n)) for n in self.__slots__))
class Serializable(object):
class DeserializationError(RuntimeError):
pass
@classmethod
def parse(cls, serialized):
raise NotImplementedError()
def serialize(self):
raise NotImplementedError()
class Stat(DataObject):
__slots__ = ['st_mode', 'st_size', 'st_atime', 'st_mtime']
class SnapshotItem(DataObject, Serializable):
__slots__ = ['checksum', 'path', 'status']
_SPLIT_REGEX = re.compile(r'\s+')
@classmethod
def parse(cls, serialized):
key, mode, size, atime, mtime, path_repr = cls._SPLIT_REGEX.split(
serialized, 5)
status = Stat(
st_mode=int(mode, 8) | stat.S_IFREG,
st_size=int(size), st_atime=utc2timestamp(float(atime)),
st_mtime=utc2timestamp(float(mtime)))
return cls(key, ast.literal_eval(path_repr), status)
def serialize(self):
# pylint: disable=no-member
return ('{key:s} {mode:0>4o} {size:d} {atime:.3f} {mtime:.3f} ' +
'{path!r}').format(
key=self.checksum,
mode=stat.S_IMODE(self.status.st_mode),
size=self.status.st_size,
atime=timestamp2utc(self.status.st_atime),
mtime=timestamp2utc(self.status.st_mtime),
path=self.path)
class Commit(DataObject, Serializable):
__slots__ = ['timestamp', 'snapshot', 'message', 'parent']
_NEWLINE_REGEX = re.compile(r'\r*\n\r*')
_SPLIT_MESSAGE_REGEX = re.compile('(?:{}){{2}}'.format(
_NEWLINE_REGEX.pattern))
_SPLIT_REGEX = re.compile(r'\s+')
@classmethod
def parse(cls, serialized):
serialized, message = cls._SPLIT_MESSAGE_REGEX.split(serialized, 1)
lines = cls._NEWLINE_REGEX.split(serialized)
kwargs = {'message': message}
for line in lines:
split = cls._SPLIT_REGEX.split(line.strip(), 1)
if len(split) > 1:
kw, value = split
else:
kw = split[0]
value = None
if kw in kwargs:
raise cls.DeserializationError("Duplicate key.")
if kw == 'timestamp':
value = float(value)
kwargs[kw] = value
try:
return cls(**kwargs)
except TypeError as err:
raise cls.DeserializationError(err.message)
def serialize(self):
# pylint: disable=no-member
return (
u'timestamp {timestamp:.3f}\nparent {parent}\n' +
u'snapshot {snapshot}\n\n{message}').format(
timestamp=self.timestamp, parent=self.parent or '',
snapshot=self.snapshot, message=self.message)
class Branch(DataObject, Serializable):
__slots__ = ['commit']
@classmethod
def parse(cls, serialized):
return cls(commit=serialized)
def serialize(self):
# pylint: disable=no-member
return u'{c}'.format(c=self.commit)
class Reference(DataObject, Serializable):
__slots__ = ['type', 'ref']
COMMIT = u'commit'
BRANCH = u'branch'
@classmethod
def parse(cls, serialized):
tp, ref = [s.strip() for s in serialized.split(':')]
return cls(type=tp, ref=ref)
def serialize(self):
# pylint: disable=no-member
return u'{t}: {r}'.format(t=self.type, r=self.ref)
class Diff(object):
def __init__(self):
self.removed = []
self.updated = []
self.added = []
class FridgeCore(object):
def __init__(
self, path, fs=fridge.fs, cas_factory=ContentAddressableStorage):
self._path = path
self._fs = fs
self._blobs = cas_factory(os.path.join(path, '.fridge', 'blobs'), fs)
self._snapshots = cas_factory(os.path.join(
path, '.fridge', 'snapshots'), fs)
self._commits = cas_factory(os.path.join(
path, '.fridge', 'commits'), fs)
self._branch_dir = os.path.join(self._path, '.fridge', 'branches')
@classmethod
def init(cls, path, fs=fridge.fs, cas_factory=ContentAddressableStorage):
fs.mkdir(os.path.join(path, '.fridge'))
obj = cls(path, fs, cas_factory)
obj.set_branch(u'master', '')
obj.set_head(Reference(Reference.BRANCH, u'master'))
return obj
def add_blob(self, path):
key = self._blobs.store(path)
return key
@staticmethod
def serialize_snapshot(snapshot):
return u'\n'.join(item.serialize() for item in snapshot)
def add_snapshot(self, snapshot):
tmp_file = os.path.join(self._path, '.fridge', 'tmp')
with self._fs.open(tmp_file, 'w') as f:
f.write(self.serialize_snapshot(snapshot))
return self._snapshots.store(tmp_file)
def add_commit(self, snapshot_key, message):
# pylint: disable=no-member
commit = self.resolve_ref(self.get_head())
c = Commit(utc_time(), snapshot_key, message, commit)
tmp_file = os.path.join(self._path, '.fridge', 'tmp')
with self._fs.open(tmp_file, 'w') as f:
f.write(c.serialize())
return self._commits.store(tmp_file)
def is_commit(self, key):
return self._fs.exists(self._commits.get_path(key))
@staticmethod
def parse_snapshot(serialized_snapshot):
return [SnapshotItem.parse(line)
for line in serialized_snapshot.split('\n')]
def read_snapshot(self, key):
with self._fs.open(self._snapshots.get_path(key)) as f:
return self.parse_snapshot(f.read())
def read_commit(self, key):
with self._fs.open(self._commits.get_path(key)) as f:
return Commit.parse(f.read())
def set_head(self, head):
path = os.path.join(self._path, '.fridge', 'head')
with self._fs.open(path, 'w') as f:
f.write(head.serialize())
def get_head(self):
path = os.path.join(self._path, '.fridge', 'head')
with self._fs.open(path, 'r') as f:
return Reference.parse(f.read())
def get_head_key(self):
return self.resolve_ref(self.get_head())
def set_branch(self, name, commit):
try:
self._fs.makedirs(self._branch_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(self._branch_dir, name)
with self._fs.open(path, 'w') as f:
f.write(Branch(commit).serialize())
def is_branch(self, name):
branch_path = os.path.join(self._branch_dir, name)
return self._fs.exists(branch_path)
def resolve_branch(self, name):
branch_path = os.path.join(self._branch_dir, name)
with self._fs.open(branch_path, 'r') as f:
# pylint: disable=no-member
return Branch.parse(f.read()).commit
def resolve_ref(self, ref):
if ref.type == Reference.COMMIT:
return ref.ref
elif ref.type == Reference.BRANCH:
return self.resolve_branch(ref.ref)
def checkout_blob(self, key, path):
source_path = self._blobs.get_path(key)
self._fs.copy(source_path, path)
class Fridge(object):
def __init__(self, fridge_core, fs=fridge.fs):
self._core = fridge_core
self._fs = fs
def _list_files(self):
for dirpath, dirnames, filenames in self._fs.walk('.'):
if '.fridge' in dirnames:
dirnames.remove('.fridge')
for filename in filenames:
yield os.path.join(dirpath, filename)
def refparse(self, ref):
potential_types = []
if self._core.is_branch(ref):
potential_types.append(Reference.BRANCH)
if self._core.is_commit(ref):
potential_types.append(Reference.COMMIT)
if len(potential_types) < 1:
raise UnknownReferenceError()
elif len(potential_types) > 1:
raise AmbiguousReferenceError()
else:
return Reference(potential_types[0], ref)
def commit(self, message=""):
if self.is_clean():
raise NothingToCommitError()
snapshot = []
for path in self._list_files():
stat = self._fs.stat(path)
checksum = self._core.add_blob(path)
snapshot.append(SnapshotItem(checksum, path, stat))
snapshot_hash = self._core.add_snapshot(snapshot)
commit_hash = self._core.add_commit(snapshot_hash, message)
head = self._core.get_head()
if head.type == Reference.COMMIT:
head.ref = commit_hash
self._core.set_head(head)
elif head.type == Reference.BRANCH:
self._core.set_branch(head.ref, commit_hash)
else:
raise AssertionError("Invalid head type '{t}'.".format(
t=head.type))
self.checkout()
def branch(self, name):
if self._core.is_branch(name):
raise BranchExistsError()
self._core.set_branch(name, self._core.get_head_key())
self._core.set_head(Reference(Reference.BRANCH, name))
def checkout(self, ref=None):
head_key = self._core.get_head_key()
if ref is None:
key = head_key
else:
ref = self.refparse(ref)
self._core.set_head(ref)
key = self._core.resolve_ref(ref)
commit = self._core.read_commit(key)
head_commit = self._core.read_commit(head_key)
snapshot = self._core.read_snapshot(commit.snapshot)
head_snapshot = self._core.read_snapshot(head_commit.snapshot)
# FIXME do not delete or overwrite non-restorable files
for item in head_snapshot:
try:
self._fs.unlink(item.path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
for item in snapshot:
self._core.checkout_blob(item.checksum, item.path)
self._fs.chmod(item.path, stat.S_IMODE(item.status.st_mode))
self._fs.utime(
item.path, (item.status.st_atime, item.status.st_mtime))
def log(self):
head = self._core.get_head_key()
commits = [(head, self._core.read_commit(head))]
while commits[-1][1].parent is not None:
key = commits[-1][1].parent
commits.append((key, self._core.read_commit(key)))
return commits
def diff(self):
head_key = self._core.get_head_key()
if head_key == '':
snapshot = []
else:
commit = self._core.read_commit(head_key)
snapshot = self._core.read_snapshot(commit.snapshot)
d = Diff()
for item in snapshot:
if self._fs.exists(item.path):
# FIXME possibility for strict check via SHA or compare
stat = self._fs.stat(item.path)
eq_size = stat.st_size == item.status.st_size
eq_mtime = stat.st_mtime == item.status.st_mtime
eq_mode = stat.st_mode == item.status.st_mode
if not (eq_size and eq_mtime and eq_mode):
d.updated.append(os.path.relpath(item.path))
else:
d.removed.append(os.path.relpath(item.path))
known_files = [item.path for item in snapshot]
for path in self._list_files():
if path not in known_files:
d.added.append(os.path.relpath(path))
return d
def is_clean(self):
d = self.diff()
return len(d.added) + len(d.removed) + len(d.updated) == 0
class FridgeError(RuntimeError):
pass
class FridgeReferenceError(FridgeError):
pass
class AmbiguousReferenceError(FridgeReferenceError):
pass
class BranchExistsError(FridgeError):
pass
class NothingToCommitError(FridgeError):
pass
class UnknownReferenceError(FridgeReferenceError):
pass
| |
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-argument
import datetime
import json
import re
import click
from tabulate import tabulate
from platformio.clients.account import AccountClient, AccountNotAuthorized
@click.group("account", short_help="Manage PlatformIO account")
def cli():
pass
def validate_username(value, field="username"):
value = str(value).strip()
if not re.match(r"^[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,37}$", value, flags=re.I):
raise click.BadParameter(
"Invalid %s format. "
"%s must contain only alphanumeric characters "
"or single hyphens, cannot begin or end with a hyphen, "
"and must not be longer than 38 characters."
% (field.lower(), field.capitalize())
)
return value
def validate_email(value):
value = str(value).strip()
if not re.match(r"^[a-z\d_.+-]+@[a-z\d\-]+\.[a-z\d\-.]+$", value, flags=re.I):
raise click.BadParameter("Invalid email address")
return value
def validate_password(value):
value = str(value).strip()
if not re.match(r"^(?=.*[a-z])(?=.*\d).{8,}$", value):
raise click.BadParameter(
"Invalid password format. "
"Password must contain at least 8 characters"
" including a number and a lowercase letter"
)
return value
@cli.command("register", short_help="Create new PlatformIO Account")
@click.option(
"-u",
"--username",
prompt=True,
callback=lambda _, __, value: validate_username(value),
)
@click.option(
"-e", "--email", prompt=True, callback=lambda _, __, value: validate_email(value)
)
@click.option(
"-p",
"--password",
prompt=True,
hide_input=True,
confirmation_prompt=True,
callback=lambda _, __, value: validate_password(value),
)
@click.option("--firstname", prompt=True)
@click.option("--lastname", prompt=True)
def account_register(username, email, password, firstname, lastname):
client = AccountClient()
client.registration(username, email, password, firstname, lastname)
return click.secho(
"An account has been successfully created. "
"Please check your mail to activate your account and verify your email address.",
fg="green",
)
@cli.command("login", short_help="Log in to PlatformIO Account")
@click.option("-u", "--username", prompt="Username or email")
@click.option("-p", "--password", prompt=True, hide_input=True)
def account_login(username, password):
client = AccountClient()
client.login(username, password)
return click.secho("Successfully logged in!", fg="green")
@cli.command("logout", short_help="Log out of PlatformIO Account")
def account_logout():
client = AccountClient()
client.logout()
return click.secho("Successfully logged out!", fg="green")
@cli.command("password", short_help="Change password")
@click.option("--old-password", prompt=True, hide_input=True)
@click.option("--new-password", prompt=True, hide_input=True, confirmation_prompt=True)
def account_password(old_password, new_password):
client = AccountClient()
client.change_password(old_password, new_password)
return click.secho("Password successfully changed!", fg="green")
@cli.command("token", short_help="Get or regenerate Authentication Token")
@click.option("-p", "--password", prompt=True, hide_input=True)
@click.option("--regenerate", is_flag=True)
@click.option("--json-output", is_flag=True)
def account_token(password, regenerate, json_output):
client = AccountClient()
auth_token = client.auth_token(password, regenerate)
if json_output:
return click.echo(json.dumps({"status": "success", "result": auth_token}))
return click.secho("Personal Authentication Token: %s" % auth_token, fg="green")
@cli.command("forgot", short_help="Forgot password")
@click.option("--username", prompt="Username or email")
def account_forgot(username):
client = AccountClient()
client.forgot_password(username)
return click.secho(
"If this account is registered, we will send the "
"further instructions to your email.",
fg="green",
)
@cli.command("update", short_help="Update profile information")
@click.option("--current-password", prompt=True, hide_input=True)
@click.option("--username")
@click.option("--email")
@click.option("--firstname")
@click.option("--lastname")
def account_update(current_password, **kwargs):
client = AccountClient()
profile = client.get_profile()
new_profile = profile.copy()
if not any(kwargs.values()):
for field in profile:
new_profile[field] = click.prompt(
field.replace("_", " ").capitalize(), default=profile[field]
)
if field == "email":
validate_email(new_profile[field])
if field == "username":
validate_username(new_profile[field])
else:
new_profile.update({key: value for key, value in kwargs.items() if value})
client.update_profile(new_profile, current_password)
click.secho("Profile successfully updated!", fg="green")
username_changed = new_profile["username"] != profile["username"]
email_changed = new_profile["email"] != profile["email"]
if not username_changed and not email_changed:
return None
try:
client.logout()
except AccountNotAuthorized:
pass
if email_changed:
return click.secho(
"Please check your mail to verify your new email address and re-login. ",
fg="yellow",
)
return click.secho("Please re-login.", fg="yellow")
@cli.command("destroy", short_help="Destroy account")
def account_destroy():
client = AccountClient()
click.confirm(
"Are you sure you want to delete the %s user account?\n"
"Warning! All linked data will be permanently removed and can not be restored."
% client.get_logged_username(),
abort=True,
)
client.destroy_account()
try:
client.logout()
except AccountNotAuthorized:
pass
return click.secho(
"User account has been destroyed.",
fg="green",
)
@cli.command("show", short_help="PlatformIO Account information")
@click.option("--offline", is_flag=True)
@click.option("--json-output", is_flag=True)
def account_show(offline, json_output):
client = AccountClient()
info = client.get_account_info(offline)
if json_output:
return click.echo(json.dumps(info))
click.echo()
if info.get("profile"):
print_profile(info["profile"])
if info.get("packages"):
print_packages(info["packages"])
if info.get("subscriptions"):
print_subscriptions(info["subscriptions"])
return click.echo()
def print_profile(profile):
click.secho("Profile", fg="cyan", bold=True)
click.echo("=" * len("Profile"))
data = []
if profile.get("username"):
data.append(("Username:", profile["username"]))
if profile.get("email"):
data.append(("Email:", profile["email"]))
if profile.get("firstname"):
data.append(("First name:", profile["firstname"]))
if profile.get("lastname"):
data.append(("Last name:", profile["lastname"]))
click.echo(tabulate(data, tablefmt="plain"))
def print_packages(packages):
click.echo()
click.secho("Packages", fg="cyan")
click.echo("=" * len("Packages"))
for package in packages:
click.echo()
click.secho(package.get("name"), bold=True)
click.echo("-" * len(package.get("name")))
if package.get("description"):
click.echo(package.get("description"))
data = []
expire = "-"
if "subscription" in package:
expire = datetime.datetime.strptime(
(
package["subscription"].get("end_at")
or package["subscription"].get("next_bill_at")
),
"%Y-%m-%dT%H:%M:%SZ",
).strftime("%Y-%m-%d")
data.append(("Expire:", expire))
services = []
for key in package:
if not key.startswith("service."):
continue
if isinstance(package[key], dict):
services.append(package[key].get("title"))
else:
services.append(package[key])
if services:
data.append(("Services:", ", ".join(services)))
click.echo(tabulate(data, tablefmt="plain"))
def print_subscriptions(subscriptions):
click.echo()
click.secho("Subscriptions", fg="cyan")
click.echo("=" * len("Subscriptions"))
for subscription in subscriptions:
click.echo()
click.secho(subscription.get("product_name"), bold=True)
click.echo("-" * len(subscription.get("product_name")))
data = [("State:", subscription.get("status"))]
begin_at = datetime.datetime.strptime(
subscription.get("begin_at"), "%Y-%m-%dT%H:%M:%SZ"
).strftime("%Y-%m-%d %H:%M:%S")
data.append(("Start date:", begin_at or "-"))
end_at = subscription.get("end_at")
if end_at:
end_at = datetime.datetime.strptime(
subscription.get("end_at"), "%Y-%m-%dT%H:%M:%SZ"
).strftime("%Y-%m-%d %H:%M:%S")
data.append(("End date:", end_at or "-"))
next_bill_at = subscription.get("next_bill_at")
if next_bill_at:
next_bill_at = datetime.datetime.strptime(
subscription.get("next_bill_at"), "%Y-%m-%dT%H:%M:%SZ"
).strftime("%Y-%m-%d %H:%M:%S")
data.append(("Next payment:", next_bill_at or "-"))
data.append(
("Edit:", click.style(subscription.get("update_url"), fg="blue") or "-")
)
data.append(
("Cancel:", click.style(subscription.get("cancel_url"), fg="blue") or "-")
)
click.echo(tabulate(data, tablefmt="plain"))
| |
# -*- coding: utf-8 -*-
"""
These are mainly for testing/demoing the library.
"""
import networkx as nx
def test_graph():
"""
The first tests will use this function I assume.
:returns: networkx.Graph
"""
g = nx.Graph([
(1, 2, {'type': 'works_at'}),
(1, 3, {'type': 'lives_in'}),
(2, 3, {'type': 'located_in'}),
(3, 4, {'type': 'connected_to'}),
(4, 5, {'type': 'connected_to'}),
(10, 4, {'type': 'connected_to'}),
(5, 6, {'type': 'lives_in'}),
(7, 3, {'type': 'lives_in'}),
(8, 5, {'type': 'works_at'}),
(7, 2, {'type': 'works_at'}),
(8, 4, {'type': 'lives_in'}),
(7, 4, {'type': 'works_at'}),
(9, 4, {'type': 'lives_in'}),
(9, 10, {'type': 'works_at'}),
(11, 3, {'type': 'lives_in'}),
(12, 5, {'type': 'lives_in'}),
(12, 13, {'type': 'works_at'}),
(13, 5, {'type': 'located_in'}),
(13, 14, {'type': 'works_at'})
])
g.node[1] = {'type': 'Person', 'label': 'davebshow'}
g.node[2] = {'type': 'Institution', 'label': 'western'}
g.node[3] = {'type': 'City', 'label': 'london'}
g.node[4] = {'type': 'Institution', 'label': 'the matrix'}
g.node[5] = {'type': 'City', 'label': 'toronto'}
g.node[6] = {'type': 'Person', 'label': 'gandalf'}
g.node[7] = {'type': 'Person', 'label': 'versae'}
g.node[8] = {'type': 'Person', 'label': 'neo'}
g.node[9] = {'type': 'Person', 'label': 'r2d2'}
g.node[10] = {'type': 'City', 'label': 'alderon'}
g.node[11] = {'type': 'Person', 'label': 'curly'}
g.node[12] = {'type': 'Person', 'label': 'adam'}
g.node[13] = {'type': 'Institution', 'label': 'canland'}
g.node[14] = {'type': 'Person', 'label': 'bro'}
return g
project_etl = {
"extractor": {
"networkx": {
"type": "subgraph",
"node_type_attr": "type",
"edge_type_attr": "type",
"traversal": [
{"node": { "alias": "p1", "type": "Person"}},
{"edge": {}},
{"node": {"alias": "wild"}},
{"edge": {}},
{"node": {"alias": "p2", "type": "Person"}}
]
}
},
"transformers": [
{
"project": {
"pattern": [
{"node": {"alias": "p1"}},
{"edge": {}},
{"node": {"alias": "p2"}}
],
"set": [
{"key": "name", "value_lookup": "wild.label"}
],
"method": {
"jaccard": {
"args": ["Institution", "City"]
}
},
"delete": {
"alias": ["wild"]
}
}
}
],
"loader": {
"nx2nx": {}
}
}
transfer_etl = {
"extractor": {
"networkx": {
"type": "graph",
"node_type_attr": "type",
"edge_type_attr": "type",
"traversal": [
{
"node": {
"alias": "c",
"type": "City"
}
},
{
"edge": {}
},
{
"node": {
"alias": "i",
"type": "Institution"
}
}
]
}
},
"transformers": [
{
"transfer": {
"pattern": [
{"node": {"alias": "c"}},
{"edge": {}},
{"node": {"alias": "i"}}
],
"set": [
{"key": "city", "value_lookup": "c.label"}
],
"method": {
"edges": {
"args": ["Person"]
}
},
"delete": {
"alias": ["c"]
}
}
}
],
"loader": {
"nx2nx": {}
}
}
combine_etl = {
"extractor": {
"networkx": {
"type": "graph",
"node_type_attr": "type",
"edge_type_attr": "type",
"traversal": [
{"node": {"type": "City", "alias": "c"}},
{"edge": {}},
{"node": {"type": "Institution", "alias": "i"}},
]
}
},
"transformers": [
{
"combine": {
"pattern": [
{"node": {"alias": "c"}},
{"edge": {}},
{"node": {"alias": "i"}}
],
"set": [
{
"key": "type",
"value":"GeoInst",
"value_lookup": ""
},
{
"key": "city_name",
"value":"",
"value_lookup": "c.label"
},
{
"key": "inst_name",
"value":"",
"value_lookup": "i.label"
}
],
"delete": {"alias": ["c", "i"]}
}
}
],
"loader": {
"nx2nx": {}
}
}
multi_transform_etl = {
"extractor": {
"networkx": {
"traversal": [
{"node": {"alias": "p1", "type": "Person"}},
{"edge": {}},
{"node": {"alias": "i", "type": "Institution"}},
{"edge": {}},
{"node": {"alias": "c", "type": "City"}}
],
"type": "subgraph"
}
},
"loader": {
"nx2nx": {}
},
"transformers": [
{
"transfer": {
"delete": {
"alias": []
},
"method": {
"attrs": {
"args": []
}
},
"pattern": [
{"node": {"alias": "i"}},
{"edge": {}},
{"node": {"alias": "c"}}
],
"set": [
{"key": "inst", "value_lookup": "i.label"}
]
}
},
{
"project": {
"delete": {
"alias": [
"i"
]
},
"method": {
"jaccard": {
"args": [
"Institution"
]
}
},
"pattern": [
{"node": {"alias": "p1"}},
{"edge": {}},
{"node": {"alias": "c"}}
],
"set": [{}]
}
}
]
}
neo4j2nx_etl = {
"extractor": {
"neo4j": {
"query": "match (n)--(r:Recipe)--(m) return n, r, m",
"uri": "http://localhost:7474/db/data/"
}
},
"transformers": [
{
"node": {
"pattern": [{"node": {"alias": "n", "unique": "UniqueId"}}],
"set": [
{"key": "name", "value_lookup": "n.UniqueId"},
{"key": "type", "value": "Ingredient"}
]
},
},
{
"node": {
"pattern": [{"node": {"alias": "m", "unique": "UniqueId"}}],
"set": [
{"key": "name", "value_lookup": "m.UniqueId"},
{"key": "type", "value": "Ingredient"}
]
},
},
{
"edge": {
"pattern": [
{"node": {"alias": "n", "unique": "UniqueId"}},
{"edge": {}},
{"node": {"alias": "m", "unique": "UniqueId"}}
],
"set": [
{"key": "name", "value_lookup": "r.UniqueId"}
],
}
}
],
"loader": {
"neo4j2nx": {}
}
}
neo4j2edgelist_etl = {
"extractor": {
"neo4j": {
"query": "match (n)--(r:Recipe)--(m) return n, r, m"
}
},
"transformers": [
{
"node": {
"pattern": [{"node": {"alias": "n", "unique": "UniqueId"}}],
"set": [
{"key": "name", "value_lookup": "n.UniqueId"},
{"key": "type", "value": "Ingredient"}
]
},
},
{
"node": {
"pattern": [{"node": {"alias": "m", "unique": "UniqueId"}}],
"set": [
{"key": "name", "value_lookup": "m.UniqueId"},
{"key": "type", "value": "Ingredient"}
]
},
},
{
"edge": {
"pattern": [
{"node": {"alias": "n", "unique": "UniqueId"}},
{"edge": {}},
{"node": {"alias": "m", "unique": "UniqueId"}}
],
"set": [
{"key": "name", "value_lookup": "r.UniqueId"}
],
}
}
],
"loader": {
"neo4j2edgelist": {"delim": ",", "filename": "demo.csv", "newline": "\n"}
}
}
edgelist2neo4j_etl = {
"extractor": {
"edgelist": {
"filename": "data/flickr-groupmemberships/out.flickr-groupmemberships",
"delim": " ",
"pattern": [
{"node": {"alias": "n"}},
{"edge": {}},
{"node": {"alias": "m"}}
]
}
},
"transformers": [
{
"edge": {
"pattern": [
{"node": {"alias": "n", "label": "User"}},
{"edge": {"label": "IN"}},
{"node": {"alias": "m", "label": "Group"}}
]
}
}
],
"loader": {
"edgelist2neo4j": {
"uri": "http://localhost:7474/db/data",
"stmt_per_req": 500,
"req_per_tx": 25,
"indicies": [
{"label": "User", "attr": "UniqueId"},
{"label": "Group", "attr": "UniqueId"}
]
}
}
}
def draw_simple_graph(graph, node_type_attr='type',
edge_label_attr='weight', show_edge_labels=True,
label_attrs=['label']):
"""
Utility function to draw a labeled, colored graph with Matplotlib.
:param graph: networkx.Graph
"""
lbls = labels(graph, label_attrs=label_attrs)
clrs = colors(graph, node_type_attr=node_type_attr)
pos = nx.spring_layout(graph, weight=None)
if show_edge_labels:
e_labels = edge_labels(graph, edge_label_attr=edge_label_attr)
else:
e_labels = {}
nx.draw_networkx(graph, pos=pos, node_color=clrs)
nx.draw_networkx_edge_labels(graph, pos=pos, edge_labels=e_labels)
nx.draw_networkx_labels(graph, pos=pos, labels=lbls)
def labels(graph, label_attrs=['label']):
"""
Utility function that aggreates node attributes as
labels for drawing graph in Ipython Notebook.
:param graph: networkx.Graph
:returns: Dict. Nodes as keys, labels as values.
"""
labels_dict = {}
for node, attrs in graph.nodes(data=True):
label = u''
for k, v in attrs.items():
if k in label_attrs:
try:
label += u'{0}: {1}\n'.format(k, v)
except:
label += u'{0}: {1}\n'.format(k, v).encode('utf-8')
labels_dict[node] = label
return labels_dict
def edge_labels(graph, edge_label_attr='weight'):
"""
Utility function that aggreates node attributes as
labels for drawing graph in Ipython Notebook.
:param graph: networkx.Graph
:returns: Dict. Nodes as keys, labels as values.
"""
labels_dict = {}
for i, j, attrs in graph.edges(data=True):
label = attrs.get(edge_label_attr, '')
labels_dict[(i, j)] = label
return labels_dict
def colors(graph, node_type_attr='type'):
"""
Utility function that generates colors for node
types for drawing graph in Ipython Notebook.
:param graph: networkx.Graph
:returns: Dict. Nodes as keys, colors as values.
"""
colors_dict = {}
colors = []
counter = 1
for node, attrs in graph.nodes(data=True):
if attrs[node_type_attr] not in colors_dict:
colors_dict[attrs[node_type_attr]] = float(counter)
colors.append(float(counter))
counter += 1
else:
colors.append(colors_dict[attrs[node_type_attr]])
return colors
def remove_edges(g, min_weight):
for edge in g.edges(data=True):
if edge[2]['weight'] < min_weight:
g.remove_edge(edge[0], edge[1])
for node, deg in g.degree().items():
if deg == 0:
g.remove_node(node)
return g
def proj_density(g, start_val, interval, num_proj):
dens = []
cutoffs = []
for i in range(num_proj):
proj = remove_edges(g.copy(), start_val)
dens.append(nx.density(proj))
cutoffs.append(start_val)
start_val += interval
return cutoffs, dens
| |
import inspect
from functools import partial
from typing import Any, Callable, Dict, Optional, Tuple, List, Union
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import (Activation, Add, AvgPool2D,
BatchNormalization, Concatenate)
from tensorflow.keras.layers import Conv2D as _Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import DepthwiseConv2D as _DepthwiseConv2D
from tensorflow.keras.layers import (Dropout, Flatten, GlobalAvgPool2D,
GlobalMaxPool2D, Lambda, Layer, MaxPool2D,
Reshape, UpSampling2D,
ZeroPadding2D, InputLayer)
from tensorflow.python.keras.applications.imagenet_utils import correct_pad
from tensorflow.python.keras.layers import Wrapper
from typing_extensions import Literal
Conv2D = partial(_Conv2D, padding='same')
DepthwiseConv2D = partial(_DepthwiseConv2D, padding='same')
def last_layer(inputs: tf.Tensor) -> Layer:
"""Return the last layer stored in the `_keras_history` of the output
tensor"""
if not hasattr(inputs, '_keras_history'):
raise ValueError(f'inputs of type {inputs} has no _keras_history')
return inputs._keras_history.layer
# ===========================================================================
# Helpers
# ===========================================================================
class Skip(Wrapper):
"""Skip connection"""
def __init__(self, layer, coef=1.0, name=None, **kwargs):
if name is None:
name = layer.name
super().__init__(layer, name=name, **kwargs)
self.input_spec = self.layer.input_spec
spec = inspect.getfullargspec(layer.call)
args = set(spec.args + spec.kwonlyargs)
self._call_args = args
self.coef = float(coef)
def get_config(self):
cfg = super().get_config()
cfg['coef'] = self.coef
return cfg
def call(self, inputs, **kwargs):
kwargs = {k: v for k, v in kwargs.items() if k in self._call_args}
outputs = self.layer.call(inputs, **kwargs)
if self.coef != 0.:
outputs = outputs + self.coef * inputs
return outputs
def __repr__(self):
return self.__str__()
def __str__(self):
return f'<Skip "{self.name}" coef:{self.coef}>'
class RemoveMCMCdim(Layer):
def call(self, x: tf.Tensor, **kwargs):
shape = tf.shape(x)
shape = tf.concat([[shape[0] * shape[1]], shape[2:]], axis=0)
return tf.reshape(x, shape)
class RestoreMCMCdim(Layer):
def __init__(self, n_mcmc, **kwargs):
super().__init__(**kwargs)
self.n_mcmc = int(n_mcmc)
def call(self, x: tf.Tensor, **kwargs):
shape = tf.shape(x)
shape = tf.concat([[self.n_mcmc, shape[0] // self.n_mcmc], shape[1:]],
axis=0)
return tf.reshape(x, shape)
class Resampling2D(Layer):
"""Support upsampling and downsampling"""
def __init__(
self,
size: Tuple[int, int] = (2, 2),
mode: Literal['max', 'avg', 'global',
'pad', 'nearest', 'bilinear'] = 'avg',
**kwargs):
super().__init__(**kwargs)
self.downsampling = False
if mode in ('max', 'avg', 'global'):
self.downsampling = True
self.size = size
self.mode = mode
self.pool = None
self.reshape = None
def get_config(self) -> Dict[str, Any]:
return dict(size=self.size, mode=self.mode)
def build(self, input_shape):
## downsampling
if self.downsampling:
if self.mode == 'max':
self.pool = MaxPool2D(self.size, padding='same')
elif self.mode == 'avg':
self.pool = AvgPool2D(self.size, padding='same')
elif self.mode == 'global':
self.pool = GlobalAvgPool2D()
else:
raise NotImplementedError(f'No downsampling mode={self.mode}')
## upsampling
else:
if self.mode == 'pad':
if not isinstance(self.size, (tuple, list)):
self.size = [self.size]
if len(self.size) == 1:
self.size = list(self.size) * 2
# this doesn't take into account odd number
self.pool = ZeroPadding2D(padding=[
(i - 1) * s // 2 for i, s in zip(self.size, input_shape[1:])
])
else:
self.pool = UpSampling2D(size=self.size, interpolation=self.mode)
self.reshape = Reshape((1, 1, input_shape[-1]))
return super().build(input_shape)
def call(self, inputs, **kwargs):
x = self.pool(inputs)
if self.mode == 'global':
x = self.reshape(x)
return x
class SigmoidGating(Layer):
"""Split the filters in two parts then applying sigmoid gating"""
def call(self, inputs, **kwargs):
activation, gate_logits = tf.split(inputs, 2, axis=-1)
gate = tf.nn.sigmoid(gate_logits)
return keras.layers.multiply([gate, activation])
class SqueezeExcitation(Layer):
"""Squeeze and Excitation"""
def __init__(self,
se_ratio: float = 0.25,
pool_mode: Literal['max', 'avg'] = 'avg',
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.swish,
conv_kw: Optional[Dict[str, Any]] = None,
name: str = 'squeeze_excitation',
**kwargs):
super().__init__(name=name, **kwargs)
if conv_kw is None:
conv_kw = {}
self.conv_k = conv_kw
self.se_ratio = se_ratio
self.pool_mode = pool_mode
self.activation = activation
def get_config(self):
return dict(se_ratio=self.se_ratio, pool_mode=self.pool_mode,
activation=self.activation)
def build(self, input_shape):
self.filters_in = input_shape[-1]
self.filters = max(1, int(self.filters_in * self.se_ratio))
if self.pool_mode == 'avg':
self.pool = GlobalAvgPool2D(name=f'{self.name}_pool')
elif self.pool_mode == 'max':
self.pool = GlobalMaxPool2D(name=f'{self.name}_pool')
else:
raise NotImplementedError(f'Invalid pool={self.pool_mode}')
self.reshape = Reshape((1, 1, self.filters_in), name=f'{self.name}_reshape')
self.conv1 = Conv2D(self.filters,
1,
activation=self.activation,
name=f'{self.name}_conv',
**self.conv_kw)
self.conv2 = Conv2D(self.filters_in,
1,
activation='sigmoid',
name=f'{self.name}_proj',
**self.conv_kw)
return super().build(input_shape)
def call(self, inputs, training=None, **kwargs):
x = inputs
if 0 < self.se_ratio <= 1:
x = self.pool(x)
x = self.reshape(x)
x = self.conv1(x)
x = self.conv2(x)
return tf.multiply(x, inputs)
class SkipAndForget(Layer):
"""Add skip connection then gradually forget the connection during training"""
def __init__(self, max_step: int = 10000, name: str = 'skip_and_forget'):
super().__init__(name=name)
self.max_step = tf.constant(max_step, dtype=self.dtype)
self.step = tf.Variable(0., dtype=self.dtype, trainable=False)
@property
def skip_gate(self) -> tf.Tensor:
return tf.maximum((self.max_step - self.step) / self.max_step, 0.)
def call(self, inputs, training=None, **kwargs):
if training:
x, skip = inputs
x = x + self.skip_gate * skip
self.step.assign_add(1.)
return x
else:
if isinstance(inputs, (tuple, list)):
inputs = inputs[0]
return inputs
class ResidualSequential(keras.Sequential):
def __init__(self,
layers: Optional[List[Layer]] = None,
skip_mode: Literal['add', 'concat', 'none'] = 'add',
skip_ratio: float = 1.0,
name: Optional[str] = None):
super().__init__(layers=layers, name=name)
self.track_outputs = False
self.skip_ratio = tf.convert_to_tensor(skip_ratio, dtype=self.dtype)
self.skip_mode = skip_mode
if skip_mode == 'add':
self.merger = Add()
elif skip_mode == 'concat':
self.merger = Concatenate(axis=-1)
else:
self.merger = None
def summary(self, line_length=None, positions=None, print_fn=None):
from odin.backend.keras_helpers import layer2text
return layer2text(self)
def __repr__(self):
text = f'Name: {self.name}\n'
text += f'skip_mode: {self.skip_mode}\n'
text += f'skip_ratio: {self.skip_ratio}\n'
text += f'track_outputs: {self.track_outputs}\n'
for layer in self.layers:
layer: Layer
text += f'{layer.__class__.__name__}:\n '
for k, v in layer.get_config().items():
if any(i in k for i in ('_initializer', '_regularizer', '_constraint')):
continue
text += f'{k}:{v} '
text += '\n'
return text[:-1]
def call(self, inputs, training=None, mask=None):
skip_inputs = inputs
# === 1. normal Sequential network
outputs = inputs # handle the corner case where self.layers is empty
last_outputs = []
for layer in self.layers:
# During each iteration, `inputs` are the inputs to `layer`, and `outputs`
# are the outputs of `layer` applied to `inputs`. At the end of each
# iteration `inputs` is set to `outputs` to prepare for the next layer.
kwargs = {}
argspec = self._layer_call_argspecs[layer].args
if 'mask' in argspec:
kwargs['mask'] = mask
if 'training' in argspec:
kwargs['training'] = training
outputs = layer(inputs, **kwargs)
last_outputs.append((layer, outputs))
if len(tf.nest.flatten(outputs)) != 1:
raise ValueError('Sequential layer only support single outputs')
# `outputs` will be the inputs to the next layer.
inputs = outputs
mask = getattr(outputs, '_keras_mask', None)
# === 2. skip connection
if self.merger is not None:
outputs = self.merger([self.skip_ratio * skip_inputs, outputs])
if self.track_outputs:
outputs._last_outputs = tuple(last_outputs)
return outputs
class MaskedConv2D(keras.layers.Conv2D):
"""Masked convolution 2D, type 'A' mask doesn't include the center entry,
while type 'B' include the center of the kernel.
References
----------
Aaron van den Oord, et al. Conditional Image Generation with
PixelCNN Decoders. In _Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.05328
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
mask_type: Literal['A', 'B'] = 'A',
padding='same',
data_format=None,
dilation_rate=(1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
args = dict(locals())
[args.pop(key) for key in ('self', '__class__', 'kwargs', 'mask_type')]
super(MaskedConv2D, self).__init__(**args, **kwargs)
h, w = self.kernel_size
mask = np.zeros((h, w))
mask[:h // 2, :] = 1.
if 'a' in mask_type.lower():
mask[h // 2, :w // 2] = 1.
elif 'b' in mask_type.lower():
mask[h // 2, :w // 2 + 1] = 1
else:
raise ValueError(f'mask_type must be "A" or "B", but given "{mask_type}"')
mask = tf.convert_to_tensor(mask[:, :, np.newaxis, np.newaxis],
dtype=self.dtype,
name='kernel_mask')
self.kernel_mask = mask
# initializer
old_initializer = self.kernel_initializer
def init_and_apply_mask(*a, **k):
return old_initializer(*a, **k) * mask
self.kernel_initializer = init_and_apply_mask
# constraint
old_constraint = self.kernel_constraint
def mask_constrain(w):
if old_constraint is not None:
w = old_constraint(w)
return w * mask
self.kernel_constraint = mask_constrain
# ===========================================================================
# Main layers
# ===========================================================================
def skip_and_forget(
inputs: Optional[tf.Tensor] = None,
max_step: int = 10000,
name: str = 'skip_and_forget',
) -> Union[tf.Tensor, SkipAndForget]:
""" Add skip connection then gradually forget the connection during
training"""
layer = SkipAndForget(max_step=max_step, name=name)
if inputs is None:
return layer
return layer(inputs)
def dense(
inputs: Optional[tf.Tensor] = None,
units: int = 256,
name: str = 'dense',
**kwargs,
) -> Union[tf.Tensor, Layer]:
layer = Dense(units=units, name=name, **kwargs)
if inputs is None:
return layer
return layer(inputs)
def normalize_image(
inputs: Optional[tf.Tensor] = None,
name: str = 'normalize_image',
) -> Union[tf.Tensor, Lambda]:
layer = Lambda(lambda x: 2. * x / 255. - 1., name=name)
if inputs is None:
return layer
return layer(inputs)
def flatten(
inputs: Optional[tf.Tensor] = None,
name: str = 'flatten',
) -> Union[tf.Tensor, Flatten]:
layer = Flatten(name=name)
if inputs is None:
return layer
return layer(inputs)
def remove_mcmc_dim(
inputs: Optional[tf.Tensor] = None,
name: str = 'remove_mcmc_dim',
) -> Union[tf.Tensor, RemoveMCMCdim]:
layer = RemoveMCMCdim(name=name)
if inputs is None:
return layer
return layer(inputs)
def restore_mcmc_dim(
inputs: Optional[tf.Tensor] = None,
n_mcmc: int = 1,
name: str = 'restore_mcmc_dim',
) -> Union[tf.Tensor, RestoreMCMCdim]:
layer = RestoreMCMCdim(n_mcmc=n_mcmc, name=name)
if inputs is None:
return layer
return layer(inputs)
def downsampling2D(
inputs: Optional[tf.Tensor] = None,
size: Tuple[int, int] = (2, 2),
mode: Literal['max', 'avg', 'global'] = 'avg',
name: Optional[str] = None,
) -> Union[tf.Tensor, Resampling2D]:
"""Pooling"""
layer = Resampling2D(size, mode, name=name)
if inputs is None:
return layer
return layer(inputs)
def upsampling2D(
inputs: Optional[tf.Tensor] = None,
size: Tuple[int, int] = (2, 2),
mode: Literal['pad', 'nearest', 'bilinear'] = 'nearest',
name: Optional[str] = None,
) -> Union[tf.Tensor, Resampling2D]:
""" Upsampling"""
layer = Resampling2D(size, mode, name=name)
if inputs is None:
return layer
return layer(inputs)
def project_1x1(
inputs: Optional[tf.Tensor] = None,
filters: int = 32,
activation: Optional[Callable[[tf.Tensor], tf.Tensor]] = None,
use_bias: bool = True,
name: str = 'project_1x1',
) -> Union[tf.Tensor, Layer]:
""" Projecting using (1, 1) convolution """
layer = Conv2D(filters=int(filters),
kernel_size=(1, 1),
activation=activation,
use_bias=use_bias,
name=name)
if inputs is None:
return layer
return layer(inputs)
def strides2D(
inputs: Optional[tf.Tensor] = None,
kernel_size: Tuple[int, int] = (3, 3),
strides: Tuple[int, int] = (2, 2),
filters: int = 32,
activation: Optional[Callable[[tf.Tensor], tf.Tensor]] = None,
use_bias: bool = True,
name: str = 'strides2D',
**kwargs,
) -> Union[tf.Tensor, Layer]:
""" Downsampling using convolutional strides """
layer = Conv2D(filters=int(filters),
kernel_size=kernel_size,
strides=strides,
activation=activation,
use_bias=use_bias,
name=name,
**kwargs)
if inputs is None:
return layer
return layer(inputs)
def dropout2D(
inputs: Optional[tf.Tensor] = None,
rate: float = 0.0,
name: Optional[str] = None,
) -> Union[tf.Tensor, Layer]:
if rate > 0:
layer = Dropout(rate, noise_shape=(None, 1, 1, 1), name=name)
if inputs is None:
return layer
inputs = layer(inputs)
elif inputs is None:
return Activation('linear', name=name)
return inputs
# ===========================================================================
# Main bottleneck
# ===========================================================================
def residual(
inputs: Optional[tf.Tensor] = None,
filters_in: Optional[int] = None,
filters_out: Optional[int] = None,
ratio: float = 2.0,
se_ratio: float = 0.25,
sigmoid_gating: bool = False,
batchnorm: bool = True,
batchnorm_kw: Optional[Dict[str, Any]] = None,
dropout: float = 0.0,
kernel_size: Tuple[int, int] = (3, 3),
order: Literal['baw', 'wba'] = 'wba',
design: Literal['bottleneck', 'inverted'] = 'inverted',
strides: Tuple[int, int] = (1, 1),
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.swish,
skip_mode: Literal['add', 'concat'] = 'add',
skip_ratio: float = 1.0,
name: Optional[str] = None,
) -> Union[tf.Tensor, ResidualSequential]:
"""A residual block, two designs are implemented:
- 'wba', i.e. weight-batchnorm-activation (Tan et al. 2019):
`X -> Conv -> BN -> ReLU -> DepthWise -> BN -> ReLU -> SE ->
Conv -> BN -> Dropout -> Add(X)`
- 'baw', i.e. batchnorm-activation-weight (He et al. 2016):
`X -> BN -> ReLU -> Conv -> BN -> ReLU -> Conv -> Add(X)`
All Convolutions are without biases if using BN
Parameters
----------
inputs : Tensor (optional)
inputs tensor, if not provided (None), return the `ResidualSequential`
layer
ratio : float
shrink ratio for bottleneck residual, and expand ratio for inverted
residual.
filters_in : int
number of input filter, must be provided if inputs is None
filters_out : int
number of output filter for the output convolution
se_ratio : float
squeeze-and-excitation shrink ratio
sigmoid_gating : boolean
sigmoid gating the output convolution
batchnorm : boolean
enable batch normalization
batchnorm_kw : Dict[str, Any] (optional)
keyword arguments for batch normalization
dropout : float
dropout value on outputs before skip connection
kernel_size : Tuple[int, int]
filters dimensions
order : {'baw', 'wba'}
specific order of the residual block, 'baw' is
batchnorm-activation-weight, and 'wba' is weight-batchnorm-activation,
default 'wba'
design : {'bottleneck', 'inverted'}
residual block design, bottleneck residual or inverted residual with
depthwise separated convolution.
strides : Tuple[int, int]
convolution strides
activation : Callable[[tf.Tensor], tf.Tensor]
activation function
skip_mode : {'add', 'concat'}
how to combine the outputs and the inputs in the final skip connection.
skip_ratio : float
scalar for scaling the inputs before adding to the skip connection
name : str
name for the layer
Returns
-------
`tf.Tensor` or `ResidualSequential`
References
----------
He, K., et al. Identity Mappings in Deep Residual Networks. 2016
Tan, M., et al. EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks. 2019
"""
if filters_in is None and inputs is None:
raise ValueError('Unknown number of inputs filters, '
'either filters_in or inputs must be provided')
if name is None:
name = (
'residual_bottleneck' if design == 'bottleneck' else 'residual_inverted')
kw = locals()
kw.pop('design')
kw.pop('ratio')
if design == 'bottleneck':
kw['shrink_ratio'] = ratio
return residual_bottleneck(**kw)
elif design == 'inverted':
kw['expand_ratio'] = ratio
return residual_inverted(**kw)
raise NotImplementedError(f'No support for residual design: "{design}"')
def residual_bottleneck(
inputs: Optional[tf.Tensor] = None,
filters_in: Optional[int] = None,
filters_out: Optional[int] = None,
shrink_ratio: float = 0.5,
se_ratio: float = 0.25,
sigmoid_gating: bool = False,
batchnorm: bool = True,
batchnorm_kw: Optional[Dict[str, Any]] = None,
dropout: float = 0.0,
kernel_size: Tuple[int, int] = (3, 3),
order: Literal['baw', 'wba'] = 'wba',
strides: Tuple[int, int] = (1, 1),
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.swish,
skip_mode: Literal['add', 'concat'] = 'add',
skip_ratio: float = 1.0,
name: str = 'residual_bottleneck',
**conv_kw,
) -> Union[tf.Tensor, ResidualSequential]:
if batchnorm_kw is None:
batchnorm_kw = {}
assert 0.0 < shrink_ratio <= 1.0, (
f'Bottleneck residual require 0 <= shrink_ratio <= 1, given {shrink_ratio}')
## prepare
layers = []
batchnorm_kw = dict(axis=3, **batchnorm_kw)
if filters_in is None:
filters_in = inputs.shape[-1] # assume NHWC
filters = max(1, int(filters_in * shrink_ratio))
if filters_out is None:
filters_out = filters_in
use_bias = not batchnorm
if np.any(np.asarray(strides) >= 2):
inputs = ZeroPadding2D(padding=correct_pad(inputs, kernel_size),
name=name + f'{name}_pad')(inputs)
pad_mode = 'valid'
else:
pad_mode = 'same'
## squeeze
if order == 'baw':
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn1'))
layers.append(Activation(activation, name=f'{name}_act1'))
layers.append(
Conv2D(filters=int(filters),
kernel_size=kernel_size,
use_bias=use_bias,
strides=strides,
padding=pad_mode,
name=f'{name}_conv1',
**conv_kw))
if order == 'wba':
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn1'))
layers.append(Activation(activation, name=f'{name}_act1'))
## squeeze
if order == 'baw':
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn2'))
layers.append(Activation(activation, name=f'{name}_act2'))
layers.append(
Conv2D(filters=int(filters),
kernel_size=kernel_size,
name=f'{name}_conv2',
**conv_kw))
if order == 'wba':
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn2'))
layers.append(Activation(activation, name=f'{name}_act2'))
## expand
if se_ratio:
layers.append(
SqueezeExcitation(se_ratio=se_ratio,
activation=activation,
name=f'{name}_se'))
layers.append(
Conv2D(filters=filters_out * (2 if sigmoid_gating else 1),
kernel_size=(1, 1),
use_bias=use_bias,
name=f'{name}_proj1',
**conv_kw))
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn3'))
if sigmoid_gating:
layers.append(SigmoidGating(name=f'{name}_gating'))
# no residual connection if strides > 1
if filters_out == filters_in and np.all(np.asarray(strides) == 1):
if dropout > 0:
layers.append(dropout2D(rate=dropout, name=f'{name}_drop'))
else:
skip_mode = 'none'
## final layer
res = ResidualSequential(skip_mode=skip_mode, skip_ratio=skip_ratio,
layers=layers, name=name)
if inputs is None:
return res
return res(inputs)
def residual_inverted(
inputs: Optional[tf.Tensor] = None,
filters_in: Optional[int] = None,
filters_out: Optional[int] = None,
expand_ratio: float = 2.,
se_ratio: float = 0.25,
sigmoid_gating: bool = False,
batchnorm: bool = True,
batchnorm_kw: Optional[Dict[str, Any]] = None,
dropout: float = 0.0,
kernel_size: Tuple[int, int] = (3, 3),
order: Literal['baw', 'wba'] = 'wba',
strides: Tuple[int, int] = (1, 1),
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.swish,
skip_mode: Literal['add', 'concat'] = 'add',
skip_ratio: float = 1.0,
name: str = 'residual_inverted',
**conv_kw,
) -> Union[tf.Tensor, ResidualSequential]:
if batchnorm_kw is None:
batchnorm_kw = {}
assert expand_ratio >= 1, (
f'Inverted residual only support expand_ratio >= 1, given {expand_ratio}')
## prepare
layers = []
batchnorm_kw = dict(axis=3, **batchnorm_kw)
if filters_in is None:
filters_in = inputs.shape[-1] # assume NHWC
filters = max(1, int(expand_ratio * filters_in))
if filters_out is None:
filters_out = filters_in
use_bias = not batchnorm
if np.any(np.asarray(strides) >= 2):
layers.append(
ZeroPadding2D(padding=correct_pad(inputs, kernel_size),
name=f'{name}_pad'))
pad_mode = 'valid'
else:
pad_mode = 'same'
## expand
if order == 'baw':
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn1'))
layers.append(Activation(activation, name=f'{name}_act1'))
layers.append(
Conv2D(filters=filters,
kernel_size=kernel_size,
padding=pad_mode,
strides=strides,
use_bias=use_bias,
name=f'{name}_conv1',
**conv_kw))
if order == 'wba':
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn1'))
layers.append(Activation(activation, name=f'{name}_act1'))
## squeeze
if order == 'baw':
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn2'))
layers.append(Activation(activation, name=f'{name}_act2'))
layers.append(DepthwiseConv2D(kernel_size=kernel_size,
name=f'{name}_dwconv1',
**conv_kw))
if order == 'wba':
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn2'))
layers.append(Activation(activation, name=f'{name}_act2'))
## final
if se_ratio:
layers.append(
SqueezeExcitation(se_ratio=se_ratio,
activation=activation,
name=f'{name}_se'))
layers.append(
Conv2D(filters=filters_out * (2 if sigmoid_gating else 1),
kernel_size=(1, 1),
use_bias=use_bias,
name=f'{name}_proj1',
**conv_kw))
if batchnorm:
layers.append(BatchNormalization(**batchnorm_kw, name=f'{name}_bn3'))
if sigmoid_gating:
layers.append(SigmoidGating(name=f'{name}_gating'))
# no residual connection if strides > 1
if filters_out == filters_in and np.all(np.asarray(strides) == 1):
if dropout > 0:
layers.append(dropout2D(rate=dropout, name=f'{name}_drop'))
else:
skip_mode = 'none'
## final layer
res = ResidualSequential(skip_mode=skip_mode, skip_ratio=skip_ratio,
layers=layers, name=name)
if inputs is None:
return res
return res(inputs)
| |
#!/usr/bin/env python
import argparse
import errno
import hashlib
import os
import subprocess
import sys
import tempfile
from io import StringIO
from lib.config import PLATFORM, get_target_arch, get_chromedriver_version, \
get_env_var, s3_config, get_zip_name
from lib.util import electron_gyp, execute, get_electron_version, \
parse_version, scoped_cwd, s3put
from lib.github import GitHub
ELECTRON_REPO = 'electron/electron'
ELECTRON_VERSION = get_electron_version()
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
DIST_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION)
SYMBOLS_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'symbols')
DSYM_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'dsym')
PDB_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'pdb')
def main():
args = parse_args()
if not args.publish_release:
if not dist_newer_than_head():
run_python_script('create-dist.py')
build_version = get_electron_build_version()
if not ELECTRON_VERSION.startswith(build_version):
error = 'Tag name ({0}) should match build version ({1})\n'.format(
ELECTRON_VERSION, build_version)
sys.stderr.write(error)
sys.stderr.flush()
return 1
github = GitHub(auth_token())
releases = github.repos(ELECTRON_REPO).releases.get()
tag_exists = False
for release in releases:
if not release['draft'] and release['tag_name'] == args.version:
tag_exists = True
break
release = create_or_get_release_draft(github, releases, args.version,
tag_exists)
if args.publish_release:
# Upload the Node SHASUMS*.txt.
run_python_script('upload-node-checksums.py', '-v', ELECTRON_VERSION)
# Upload the index.json.
run_python_script('upload-index-json.py')
# Create and upload the Electron SHASUMS*.txt
release_electron_checksums(github, release)
# Press the publish button.
publish_release(github, release['id'])
# Do not upload other files when passed "-p".
return
# Upload Electron with GitHub Releases API.
upload_electron(github, release, os.path.join(DIST_DIR, DIST_NAME))
upload_electron(github, release, os.path.join(DIST_DIR, SYMBOLS_NAME))
if PLATFORM == 'darwin':
upload_electron(github, release, os.path.join(DIST_DIR, DSYM_NAME))
elif PLATFORM == 'win32':
upload_electron(github, release, os.path.join(DIST_DIR, PDB_NAME))
# Upload free version of ffmpeg.
ffmpeg = get_zip_name('ffmpeg', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, ffmpeg))
# Upload chromedriver and mksnapshot for minor version update.
if parse_version(args.version)[2] == '0':
chromedriver = get_zip_name('chromedriver', get_chromedriver_version())
upload_electron(github, release, os.path.join(DIST_DIR, chromedriver))
mksnapshot = get_zip_name('mksnapshot', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, mksnapshot))
if PLATFORM == 'win32' and not tag_exists:
# Upload PDBs to Windows symbol server.
run_python_script('upload-windows-pdb.py')
# Upload node headers.
run_python_script('upload-node-headers.py', '-v', args.version)
def parse_args():
parser = argparse.ArgumentParser(description='upload distribution file')
parser.add_argument('-v', '--version', help='Specify the version',
default=ELECTRON_VERSION)
parser.add_argument('-p', '--publish-release',
help='Publish the release',
action='store_true')
return parser.parse_args()
def run_python_script(script, *args):
script_path = os.path.join(SOURCE_ROOT, 'script', script)
return execute([sys.executable, script_path] + list(args))
def get_electron_build_version():
if get_target_arch() == 'arm' or os.environ.has_key('CI'):
# In CI we just build as told.
return ELECTRON_VERSION
if PLATFORM == 'darwin':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'MacOS', PRODUCT_NAME)
elif PLATFORM == 'win32':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.exe'.format(PROJECT_NAME))
else:
electron = os.path.join(SOURCE_ROOT, 'out', 'R', PROJECT_NAME)
return subprocess.check_output([electron, '--version']).strip()
def dist_newer_than_head():
with scoped_cwd(SOURCE_ROOT):
try:
head_time = subprocess.check_output(['git', 'log', '--pretty=format:%at',
'-n', '1']).strip()
dist_time = os.path.getmtime(os.path.join(DIST_DIR, DIST_NAME))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
return dist_time > int(head_time)
def get_text_with_editor(name):
editor = os.environ.get('EDITOR', 'nano')
initial_message = '\n# Please enter the body of your release note for %s.' \
% name
t = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False)
t.write(initial_message)
t.close()
subprocess.call([editor, t.name])
text = ''
for line in open(t.name, 'r'):
if len(line) == 0 or line[0] != '#':
text += line
os.unlink(t.name)
return text
def create_or_get_release_draft(github, releases, tag, tag_exists):
# Search for existing draft.
for release in releases:
if release['draft']:
return release
if tag_exists:
tag = 'do-not-publish-me'
return create_release_draft(github, tag)
def create_release_draft(github, tag):
name = '{0} {1}'.format(PROJECT_NAME, tag)
if os.environ.has_key('CI'):
body = '(placeholder)'
else:
body = get_text_with_editor(name)
if body == '':
sys.stderr.write('Quit due to empty release note.\n')
sys.exit(0)
data = dict(tag_name=tag, name=name, body=body, draft=True)
r = github.repos(ELECTRON_REPO).releases.post(data=data)
return r
def release_electron_checksums(github, release):
checksums = run_python_script('merge-electron-checksums.py',
'-v', ELECTRON_VERSION)
upload_io_to_github(github, release, 'SHASUMS256.txt',
StringIO(checksums.decode('utf-8')), 'text/plain')
def upload_electron(github, release, file_path):
# Delete the original file before uploading in CI.
filename = os.path.basename(file_path)
if os.environ.has_key('CI'):
try:
for asset in release['assets']:
if asset['name'] == filename:
github.repos(ELECTRON_REPO).releases.assets(asset['id']).delete()
except Exception:
pass
# Upload the file.
with open(file_path, 'rb') as f:
upload_io_to_github(github, release, filename, f, 'application/zip')
# Upload the checksum file.
upload_sha256_checksum(release['tag_name'], file_path)
def upload_io_to_github(github, release, name, io, content_type):
params = {'name': name}
headers = {'Content-Type': content_type}
github.repos(ELECTRON_REPO).releases(release['id']).assets.post(
params=params, headers=headers, data=io, verify=False)
def upload_sha256_checksum(version, file_path):
bucket, access_key, secret_key = s3_config()
checksum_path = '{}.sha256sum'.format(file_path)
sha256 = hashlib.sha256()
with open(file_path, 'rb') as f:
sha256.update(f.read())
filename = os.path.basename(file_path)
with open(checksum_path, 'w') as checksum:
checksum.write('{} *{}'.format(sha256.hexdigest(), filename))
s3put(bucket, access_key, secret_key, os.path.dirname(checksum_path),
'atom-shell/tmp/{0}'.format(version), [checksum_path])
def publish_release(github, release_id):
data = dict(draft=False)
github.repos(ELECTRON_REPO).releases(release_id).patch(data=data)
def auth_token():
token = get_env_var('GITHUB_TOKEN')
message = ('Error: Please set the $ELECTRON_GITHUB_TOKEN '
'environment variable, which is your personal token')
assert token, message
return token
if __name__ == '__main__':
import sys
sys.exit(main())
| |
#!/usr/bin/env python
# Written by Bram Cohen
# modified for multitracker by John Hoffman
# see LICENSE.txt for license information
from BitTornado import PSYCO
if PSYCO.psyco:
try:
import psyco
assert psyco.__version__ >= 0x010100f0
psyco.full()
except:
pass
from sys import argv, version
from BitTornado.BT1.makemetafile import make_meta_file, completedir
from threading import Event, Thread
from BitTornado.bencode import bdecode
import sys
from os import getcwd
from os.path import join, isdir
try:
import wxversion
wxversion.select("2.6")
except Exception, e:
print >> sys.stderr, "%s: wxPython 2.6 not installed." %e
sys.exit(1)
from wxPython.wx import *
try:
True
except:
True = 1
False = 0
wxEVT_INVOKE = wxNewEventType()
def EVT_INVOKE(win, func):
win.Connect(-1, -1, wxEVT_INVOKE, func)
class InvokeEvent(wxPyEvent):
def __init__(self, func, args, kwargs):
wxPyEvent.__init__(self)
self.SetEventType(wxEVT_INVOKE)
self.func = func
self.args = args
self.kwargs = kwargs
class DownloadInfo:
def __init__(self):
frame = wxFrame(None, -1, 'BitTorrent Torrent File Maker', size = wxSize(550, 410))
self.frame = frame
panel = wxPanel(frame, -1)
gridSizer = wxFlexGridSizer(cols = 2, rows = 2, vgap = 0, hgap = 8)
gridSizer.Add(wxStaticText(panel, -1, 'make torrent of:'))
b = wxBoxSizer(wxHORIZONTAL)
self.dirCtl = wxTextCtrl(panel, -1, '')
b.Add(self.dirCtl, 1, wxEXPAND)
# b.Add(10, 10, 0, wxEXPAND)
button = wxButton(panel, -1, 'dir', size = (30,20))
EVT_BUTTON(frame, button.GetId(), self.selectdir)
b.Add(button, 0)
button2 = wxButton(panel, -1, 'file', size = (30,20))
EVT_BUTTON(frame, button2.GetId(), self.selectfile)
b.Add(button2, 0)
gridSizer.Add(b, 0, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, 'announce url:'))
self.annCtl = wxTextCtrl(panel, -1, 'http://my.tracker:6969/announce')
gridSizer.Add(self.annCtl, 0, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
a = wxFlexGridSizer(cols = 1)
a.Add(wxStaticText(panel, -1, 'announce list:'))
a.Add(wxStaticText(panel, -1, ''))
abutton = wxButton(panel, -1, 'copy\nannounces\nfrom\ntorrent', size = (50,70))
EVT_BUTTON(frame, abutton.GetId(), self.announcecopy)
a.Add(abutton, 0, wxEXPAND)
gridSizer.Add(a, 0, wxEXPAND)
self.annListCtl = wxTextCtrl(panel, -1, '\n\n\n\n\n', wxPoint(-1,-1), (400,120),
wxTE_MULTILINE|wxHSCROLL|wxTE_DONTWRAP)
gridSizer.Add(self.annListCtl, -1, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, ''))
exptext = wxStaticText(panel, -1,
"a list of announces separated by commas " +
"or whitespace and on several lines -\n" +
"trackers on the same line will be tried randomly," +
"and all the trackers on one line\n" +
"will be tried before the trackers on the next line.")
exptext.SetFont(wxFont(6, wxDEFAULT, wxNORMAL, wxNORMAL, False))
gridSizer.Add(exptext)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, 'piece size:'))
self.piece_length = wxChoice(panel, -1,
choices = ['automatic', '2MiB', '1MiB', '512KiB', '256KiB', '128KiB', '64KiB', '32KiB'])
self.piece_length_list = [0, 21, 20, 19, 18, 17, 16, 15]
self.piece_length.SetSelection(0)
gridSizer.Add(self.piece_length)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, 'comment:'))
self.commentCtl = wxTextCtrl(panel, -1, '')
gridSizer.Add(self.commentCtl, 0, wxEXPAND)
gridSizer.AddGrowableCol(1)
border = wxBoxSizer(wxVERTICAL)
border.Add(gridSizer, 0, wxEXPAND | wxNORTH | wxEAST | wxWEST, 25)
b2 = wxButton(panel, -1, 'make')
# border.Add(10, 10, 1, wxEXPAND)
border.Add(b2, 0, wxALIGN_CENTER | wxSOUTH, 20)
EVT_BUTTON(frame, b2.GetId(), self.complete)
panel.SetSizer(border)
panel.SetAutoLayout(True)
# panel.DragAcceptFiles(True)
# EVT_DROP_FILES(panel, self.selectdrop)
def selectdir(self, x):
dl = wxDirDialog(self.frame, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
if dl.ShowModal() == wxID_OK:
self.dirCtl.SetValue(dl.GetPath())
def selectfile(self, x):
dl = wxFileDialog (self.frame, 'Choose file or directory to use', '', '', '', wxOPEN)
if dl.ShowModal() == wxID_OK:
self.dirCtl.SetValue(dl.GetPath())
def selectdrop(self, x):
print x
list = x.m_files
self.dirCtl.SetValue(x[0])
def announcecopy(self, x):
dl = wxFileDialog (self.frame, 'Choose .torrent file to use', '', '', '*.torrent', wxOPEN)
if dl.ShowModal() == wxID_OK:
try:
h = open(dl.GetPath(), 'rb')
metainfo = bdecode(h.read())
h.close()
self.annCtl.SetValue(metainfo['announce'])
if metainfo.has_key('announce-list'):
list = []
for tier in metainfo['announce-list']:
for tracker in tier:
list += [tracker, ', ']
del list[-1]
list += ['\n']
liststring = ''
for i in list:
liststring += i
self.annListCtl.SetValue(liststring+'\n\n')
else:
self.annListCtl.SetValue('')
except:
return
def getannouncelist(self):
list = []
for t in self.annListCtl.GetValue().split('\n'):
tier = []
t = t.replace(',',' ')
for tr in t.split(' '):
if tr != '':
tier += [tr]
if len(tier)>0:
list.append(tier)
return list
def complete(self, x):
if self.dirCtl.GetValue() == '':
dlg = wxMessageDialog(self.frame, message = 'You must select a\n file or directory',
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
params = {'piece_size_pow2': self.piece_length_list[self.piece_length.GetSelection()]}
annlist = self.getannouncelist()
if len(annlist)>0:
params['real_announce_list'] = annlist
comment = self.commentCtl.GetValue()
if comment != '':
params['comment'] = comment
try:
CompleteDir(self.dirCtl.GetValue(), self.annCtl.GetValue(), params)
except:
print_exc()
from traceback import print_exc
class CompleteDir:
def __init__(self, d, a, params):
self.d = d
self.a = a
self.params = params
self.flag = Event()
self.separatetorrents = False
if isdir(d):
self.choicemade = Event()
frame = wxFrame(None, -1, 'BitTorrent make torrent', size = (1,1))
self.frame = frame
panel = wxPanel(frame, -1)
gridSizer = wxFlexGridSizer(cols = 1, vgap = 8, hgap = 8)
gridSizer.AddGrowableRow(1)
gridSizer.Add(wxStaticText(panel, -1,
'Do you want to make a separate .torrent'),0,wxALIGN_CENTER)
gridSizer.Add(wxStaticText(panel, -1,
'for every item in this directory?'),0,wxALIGN_CENTER)
gridSizer.Add(wxStaticText(panel, -1, ''))
b = wxFlexGridSizer(cols = 3, hgap = 10)
yesbut = wxButton(panel, -1, 'Yes')
def saidyes(e, self = self):
self.frame.Destroy()
self.separatetorrents = True
self.begin()
EVT_BUTTON(frame, yesbut.GetId(), saidyes)
b.Add(yesbut, 0)
nobut = wxButton(panel, -1, 'No')
def saidno(e, self = self):
self.frame.Destroy()
self.begin()
EVT_BUTTON(frame, nobut.GetId(), saidno)
b.Add(nobut, 0)
cancelbut = wxButton(panel, -1, 'Cancel')
def canceled(e, self = self):
self.frame.Destroy()
EVT_BUTTON(frame, cancelbut.GetId(), canceled)
b.Add(cancelbut, 0)
gridSizer.Add(b, 0, wxALIGN_CENTER)
border = wxBoxSizer(wxHORIZONTAL)
border.Add(gridSizer, 1, wxEXPAND | wxALL, 4)
panel.SetSizer(border)
panel.SetAutoLayout(True)
frame.Show()
border.Fit(panel)
frame.Fit()
else:
self.begin()
def begin(self):
if self.separatetorrents:
frame = wxFrame(None, -1, 'BitTorrent make directory', size = wxSize(550, 250))
else:
frame = wxFrame(None, -1, 'BitTorrent make torrent', size = wxSize(550, 250))
self.frame = frame
panel = wxPanel(frame, -1)
gridSizer = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
if self.separatetorrents:
self.currentLabel = wxStaticText(panel, -1, 'checking file sizes')
else:
self.currentLabel = wxStaticText(panel, -1, 'building ' + self.d + '.torrent')
gridSizer.Add(self.currentLabel, 0, wxEXPAND)
self.gauge = wxGauge(panel, -1, range = 1000, style = wxGA_SMOOTH)
gridSizer.Add(self.gauge, 0, wxEXPAND)
gridSizer.Add((10, 10), 1, wxEXPAND)
self.button = wxButton(panel, -1, 'cancel')
gridSizer.Add(self.button, 0, wxALIGN_CENTER)
gridSizer.AddGrowableRow(2)
gridSizer.AddGrowableCol(0)
g2 = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
g2.Add(gridSizer, 1, wxEXPAND | wxALL, 25)
g2.AddGrowableRow(0)
g2.AddGrowableCol(0)
panel.SetSizer(g2)
panel.SetAutoLayout(True)
EVT_BUTTON(frame, self.button.GetId(), self.done)
EVT_CLOSE(frame, self.done)
EVT_INVOKE(frame, self.onInvoke)
frame.Show(True)
Thread(target = self.complete).start()
def complete(self):
try:
if self.separatetorrents:
completedir(self.d, self.a, self.params, self.flag,
self.valcallback, self.filecallback)
else:
make_meta_file(self.d, self.a, self.params, self.flag,
self.valcallback, progress_percent = 1)
if not self.flag.isSet():
self.currentLabel.SetLabel('Done!')
self.gauge.SetValue(1000)
self.button.SetLabel('Close')
self.frame.Refresh()
except (OSError, IOError), e:
self.currentLabel.SetLabel('Error!')
self.button.SetLabel('Close')
dlg = wxMessageDialog(self.frame, message = 'Error - ' + str(e),
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def valcallback(self, amount):
self.invokeLater(self.onval, [amount])
def onval(self, amount):
self.gauge.SetValue(int(amount * 1000))
def filecallback(self, f):
self.invokeLater(self.onfile, [f])
def onfile(self, f):
self.currentLabel.SetLabel('building ' + join(self.d, f) + '.torrent')
def onInvoke(self, event):
if not self.flag.isSet():
apply(event.func, event.args, event.kwargs)
def invokeLater(self, func, args = [], kwargs = {}):
if not self.flag.isSet():
wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
def done(self, event):
self.flag.set()
self.frame.Destroy()
class btWxApp(wxApp):
def OnInit(self):
d = DownloadInfo()
d.frame.Show(True)
self.SetTopWindow(d.frame)
return True
if __name__ == '__main__':
btWxApp().MainLoop()
| |
"""Functionality to parse mjlog (XML) data"""
from __future__ import absolute_import
from __future__ import division
import logging
from tenhou_log_utils.io import ensure_unicode, unquote
_LG = logging.getLogger(__name__)
# TODO: Expose all parse_XX functions.
def _parse_str_list(val, type_):
return [type_(val) for val in val.split(',')] if val else []
###############################################################################
def _parse_shuffle(attrib):
return {
'seed': attrib['seed'],
'ref': attrib['ref'],
}
###############################################################################
def _parse_game_config(game_config):
_LG.debug(' Game Config: %s', bin(game_config))
test = not bool(game_config & 0x01)
tokujou = bool((game_config & 0x20) >> 5)
joukyu = bool((game_config & 0x80) >> 7)
if tokujou and joukyu:
table = 'tenhou'
elif test:
table = 'test'
elif tokujou:
table = 'tokujou'
elif joukyu:
table = 'joukyu'
else:
table = 'dan-i'
config = {
'red': not bool((game_config & 0x02) >> 1),
'kui': not bool((game_config & 0x04) >> 2),
'ton-nan': bool((game_config & 0x08) >> 3),
'sanma': bool((game_config & 0x10) >> 4),
'soku': bool((game_config & 0x40) >> 6),
}
return table, config
def _parse_go(attrib):
table, config = _parse_game_config(int(attrib['type']))
number_ = int(attrib['lobby']) if 'lobby' in attrib else None
return {'table': table, 'config': config, 'lobby': number_}
###############################################################################
def _parse_resume(attrib):
index = int(list(attrib.keys())[0][1])
name = unquote(list(attrib.values())[0])
return {'index': index, 'name': name}
def _parse_un(attrib):
keys = ['n0', 'n1', 'n2', 'n3']
names = [unquote(attrib[key]) for key in keys if key in attrib]
dans = _parse_str_list(attrib.get('dan', '-1,-1,-1,-1'), type_=int)
rates = _parse_str_list(attrib['rate'], type_=float)
sexes = _parse_str_list(attrib['sx'], type_=ensure_unicode)
return [
{'name': name, 'dan': dan, 'rate': rate, 'sex': sex}
for name, dan, rate, sex in zip(names, dans, rates, sexes)
]
################################################################################
def _parse_taikyoku(attrib):
return {'oya': attrib['oya']}
###############################################################################
def _parse_score(data):
return [score * 100 for score in _parse_str_list(data, type_=int)]
def _parse_init(attrib):
seed = _parse_str_list(attrib['seed'], type_=int)
scores = _parse_score(attrib['ten'])
hands = [
_parse_str_list(attrib[key], type_=int)
for key in ['hai0', 'hai1', 'hai2', 'hai3'] if key in attrib
]
return {
'oya': attrib['oya'],
'scores': scores,
'hands': hands,
'round': seed[0],
'combo': seed[1],
'reach': seed[2],
'dices': seed[3:5],
'dora': seed[5],
}
###############################################################################
def _parse_draw(tag):
player = ord(tag[0]) - ord('T')
tile = int(tag[1:])
return {'player': player, 'tile': tile}
###############################################################################
def _parse_discard(tag):
player = ord(tag[0]) - ord('D')
tile = int(tag[1:])
return {'player': player, 'tile': tile}
###############################################################################
def _parse_shuntsu(meld):
# Adopted from http://tenhou.net/img/tehai.js
t = (meld & 0xfc00) >> 10
r = t % 3
t = t // 3
t = 9 * (t // 7) + (t % 7)
t *= 4
h = [
t + 4*0 + ((meld & 0x0018)>>3),
t + 4*1 + ((meld & 0x0060)>>5),
t + 4*2 + ((meld & 0x0180)>>7),
]
if r == 1:
h = [h[1], h[0], h[2]]
elif r == 2:
h = [h[2], h[0], h[1]]
return h
def _parse_koutsu(meld):
# Adopted from http://tenhou.net/img/tehai.js
unused = (meld &0x0060) >> 5
t = (meld & 0xfe00) >> 9
r = t % 3
t = t // 3
t *= 4
h = [t, t, t]
if unused == 0:
h[0] += 1
h[1] += 2
h[2] += 3
elif unused == 1:
h[0] += 0
h[1] += 2
h[2] += 3
elif unused == 2:
h[0] += 0
h[1] += 1
h[2] += 3
elif unused == 3:
h[0] += 0
h[1] += 1
h[2] += 2
if r == 1:
h = [h[1], h[0], h[2]]
elif r == 2:
h = [h[2], h[0], h[1]]
kui = meld & 0x3
if kui < 3:
h = [h[2], h[0], h[1]]
if kui < 2:
h = [h[2], h[0], h[1]]
return h
def _parse_kakan(meld):
# Adopted from http://tenhou.net/img/tehai.js
added = (meld & 0x0060) >> 5
t = (meld & 0xFE00) >> 9
r = t % 3
t = t // 3
t *= 4
h = [t, t, t]
if added == 0:
h[0] += 1
h[1] += 2
h[2] += 3
elif added == 1:
h[0] += 0
h[1] += 2
h[2] += 3
elif added == 2:
h[0] += 0
h[1] += 1
h[2] += 3
elif added == 3:
h[0] += 0
h[1] += 1
h[2] += 2
if r == 1:
h = [h[1], h[0], h[2]]
elif r == 2:
h = [h[2], h[0], h[1]]
kui = meld & 0x3
if kui == 3:
h = [t + added, h[0], h[1], h[2]]
elif kui == 2:
h = [h[1], t + added, h[0], h[2]]
elif kui == 1:
h = [h[2], h[1], t + added, h[0]]
return h
def _parse_kan(meld):
# Adopted from http://tenhou.net/img/tehai.js
hai0 = (meld & 0xff00) >> 8
kui = meld & 0x3
if not kui: # Ankan
hai0 = (hai0 & ~3) +3
t = (hai0 // 4) * 4
h = [t, t, t]
rem = hai0 % 4
if rem == 0:
h[0] += 1
h[1] += 2
h[2] += 3
elif rem == 1:
h[0] += 0
h[1] += 2
h[2] += 3
elif rem == 2:
h[0] += 0
h[1] += 1
h[2] += 3
else:
h[0] += 0
h[1] += 1
h[2] += 2
if kui == 1:
hai0, h[2] = h[2], hai0
if kui == 2:
hai0, h[0] = h[0], hai0
return ([hai0] + h) if kui else h[:2]
def _parse_call(attrib):
caller = int(attrib['who'])
meld = int(attrib['m'])
callee_rel = meld & 0x3
_LG.debug(' Meld: %s', bin(meld))
if meld & (1 << 2):
mentsu = _parse_shuntsu(meld)
type_ = 'Chi'
elif meld & (1 << 3):
type_ = 'Pon'
mentsu = _parse_koutsu(meld)
elif meld & (1 << 4):
type_ = 'KaKan'
mentsu = _parse_kakan(meld)
elif meld & (1 << 5):
type_ = 'Nuki'
mentsu = [meld >> 8]
else:
type_ = 'MinKan' if callee_rel else 'AnKan'
mentsu = _parse_kan(meld)
callee_abs = (caller + callee_rel) % 4
return {
'caller': caller, 'callee': callee_abs,
'call_type': type_, 'mentsu': mentsu
}
###############################################################################
def _parse_reach(attrib):
who, step = int(attrib['who']), int(attrib['step'])
if step > 2:
raise NotImplementedError('Unexpected step value: {}'.format(attrib))
result = {'player': who, 'step': step}
# Old logs do not have ten values.
if 'ten' in attrib:
result['scores'] = _parse_score(attrib['ten'])
return result
################################################################################
def _nest_list(vals):
if len(vals) % 2:
raise RuntimeError('List with odd number of value was given.')
return list(zip(vals[::2], vals[1::2]))
def _parse_ba(val):
vals = _parse_str_list(val, type_=int)
return {'combo': vals[0], 'reach': vals[1]}
def _parse_owari(val):
vals = _parse_str_list(val, type_=float)
scores = [int(score * 100) for score in vals[::2]]
return {'scores': scores, 'uma': vals[1::2]}
def _parse_ten(ten):
vals = _parse_str_list(ten, type_=int)
return {'fu': vals[0], 'point': vals[1], 'limit': vals[2]}
def _parse_sc(sc_val):
vals = _parse_score(sc_val)
return vals[::2], vals[1::2]
def _parse_agari(attrib):
winner, from_who = int(attrib['who']), int(attrib['fromWho'])
scores, gain = _parse_sc(attrib['sc'])
result = {
'winner': winner,
'hand': _parse_str_list(attrib['hai'], type_=int),
'machi': _parse_str_list(attrib['machi'], type_=int),
'dora': _parse_str_list(attrib['doraHai'], type_=int),
'ura_dora': _parse_str_list(
attrib.get('doraHaiUra', ''), type_=int),
'yaku': _nest_list(_parse_str_list(attrib.get('yaku'), type_=int)),
'yakuman': _parse_str_list(attrib.get('yakuman', ''), type_=int),
'ten': _parse_ten(attrib['ten']),
'ba': _parse_ba(attrib['ba']),
'scores': scores,
'gains': gain,
}
if winner != from_who:
result['loser'] = from_who
if 'owari' in attrib:
result['result'] = _parse_owari(attrib['owari'])
return result
################################################################################
def _parse_dora(attrib):
return {'hai': int(attrib['hai'])}
###############################################################################
def _parse_ryuukyoku(attrib):
scores, gain = _parse_sc(attrib['sc'])
result = {
'hands': [
_parse_str_list(attrib[key], type_=int) if key in attrib else None
for key in ['hai0', 'hai1', 'hai2', 'hai3']
],
'ba': _parse_ba(attrib['ba']),
'scores': scores,
'gains': gain,
}
if 'type' in attrib:
result['reason'] = attrib['type']
if 'owari' in attrib:
result['result'] = _parse_owari(attrib['owari'])
return result
###############################################################################
def _parse_bye(attrib):
return {'index': int(attrib['who'])}
###############################################################################
def _ensure_unicode(data):
return {
ensure_unicode(key): ensure_unicode(value)
for key, value in data.items()
}
def parse_node(tag, attrib):
"""Parse individual XML node of tenhou mjlog.
Parameters
----------
tag : str
Tags such as 'GO', 'DORA', 'AGARI' etc...
attrib: dict or list
Attribute of the node
Returns
-------
dict
JSON object
"""
attrib = _ensure_unicode(attrib)
_LG.debug('Input: %s: %s', tag, attrib)
if tag == 'GO':
data = _parse_go(attrib)
elif tag == 'UN':
if len(attrib) == 1: # Disconnected player has returned
data = _parse_resume(attrib)
tag = 'RESUME'
else:
data = _parse_un(attrib)
elif tag == 'TAIKYOKU':
data = _parse_taikyoku(attrib)
elif tag == 'SHUFFLE':
data = _parse_shuffle(attrib)
elif tag == 'INIT':
data = _parse_init(attrib)
elif tag == 'DORA':
data = _parse_dora(attrib)
elif tag[0] in {'T', 'U', 'V', 'W'}:
data = _parse_draw(tag)
tag = 'DRAW'
elif tag[0] in {'D', 'E', 'F', 'G'}:
data = _parse_discard(tag)
tag = 'DISCARD'
elif tag == 'N':
data = _parse_call(attrib)
tag = 'CALL'
elif tag == 'REACH':
data = _parse_reach(attrib)
elif tag == 'AGARI':
data = _parse_agari(attrib)
elif tag == 'RYUUKYOKU':
data = _parse_ryuukyoku(attrib)
elif tag == 'BYE':
data = _parse_bye(attrib)
else:
raise NotImplementedError('{}: {}'.format(tag, attrib))
_LG.debug('Output: %s: %s', tag, data)
return {'tag': tag, 'data': data}
###############################################################################
def _validate_structure(parsed, meta, rounds):
# Verfiy all the items are passed
if not len(parsed) == len(meta) + sum(len(r) for r in rounds):
raise AssertionError('Not all the items are structured.')
# Verfiy all the rounds start with INIT tag
for round_ in rounds:
tag = round_[0]['tag']
if not tag == 'INIT':
raise AssertionError('Round must start with INIT tag; %s' % tag)
def _structure_parsed_result(parsed):
"""Add structure to parsed log data
Parameters
----------
parsed : list of dict
Each item in list corresponds to an XML node in original mjlog file.
Returns
-------
dict
On top level, 'meta' and 'rounds' key are defined. 'meta' contains
'SHUFFLE', 'GO', 'UN' and 'TAIKYOKU' keys and its parsed results as
values. 'rounds' is a list of which items correspond to one round of
game play.
"""
round_ = None
game = {'meta': {}, 'rounds': []}
for item in parsed:
tag, data = item['tag'], item['data']
if tag in ['SHUFFLE', 'GO', 'UN', 'TAIKYOKU']:
game['meta'][tag] = data
elif tag == 'INIT':
if round_ is not None:
game['rounds'].append(round_)
round_ = [item]
else:
round_.append(item)
game['rounds'].append(round_)
_validate_structure(parsed, game['meta'], game['rounds'])
return game
def parse_mjlog(root_node, tags=None):
"""Convert mjlog XML node into JSON
Parameters
----------
root_node (Element)
Root node of mjlog XML data.
tag : list of str
When present, only the given tags are parsed and no post-processing
is carried out.
Returns
-------
dict
Dictionary of of child nodes parsed.
"""
parsed = []
for node in root_node:
if tags is None or node.tag in tags:
parsed.append(parse_node(node.tag, node.attrib))
if tags is None:
return _structure_parsed_result(parsed)
return parsed
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 12:05:40 2016
@author: sjjoo
"""
import sys
import mne
import matplotlib.pyplot as plt
from mne.utils import run_subprocess, logger
import os
from os import path as op
import copy
import shutil
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import time
from functools import partial
from mne import set_config
set_config('MNE_MEMMAP_MIN_SIZE', '1M')
set_config('MNE_CACHE_DIR', '.tmp')
mne.set_config('MNE_USE_CUDA', 'true')
this_env = copy.copy(os.environ)
#fs_dir = '/mnt/diskArray/projects/freesurfer'
fs_dir = '/mnt/diskArray/projects/avg_fsurfer'
this_env['SUBJECTS_DIR'] = fs_dir
#this_env['FREESURFER_HOME'] = '/usr/local/freesurfer'
raw_dir = '/mnt/scratch/NLR_MEG4'
os.chdir(raw_dir)
subs = ['NLR_102_RS','NLR_105_BB','NLR_110_HH','NLR_127_AM',
'NLR_132_WP','NLR_145_AC','NLR_150_MG',
'NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_162_EF','NLR_163_LF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_201_GS','NLR_203_AM',
'NLR_204_AM','NLR_205_AC','NLR_207_AH','NLR_210_SB','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_GB267','NLR_JB420',
'NLR_HB275','NLR_GB355']
#for n, s in enumerate(subs):
# run_subprocess(['mne', 'watershed_bem', '--subject', subs[n],'--overwrite'], env=this_env)
# mne.bem.make_watershed_bem(subject = subs[n],subjects_dir=fs_dir,overwrite=True,preflood=20, show=True)
"""USE above code
mri_watershed -h 3 -useSRAS -surf /mnt/diskArray/projects/avg_fsurfer/NLR_205_AC/bem/watershed/NLR_205_AC /mnt/diskArray/projects/avg_fsurfer/NLR_205_AC/mri/T1.mgz /mnt/diskArray/projects/avg_fsurfer/NLR_205_AC/bem/watershed/ws
"""
"""
Run head_surf.m
"""
# Let's take a look...
#for n, s in enumerate(subs):
# mne.viz.plot_bem(subject=subs[n],subjects_dir=fs_dir,brain_surfaces='white', orientation='coronal')
#for n, s in enumerate(subs):
## os.chdir(os.path.join(fs_dir,subs[n],'bem'))
# run_subprocess(['mne', 'make_scalp_surfaces', '--subject', subs[n],
# '--overwrite','--no-decimate']) # Disable medium and sparse decimations (dense only)
# # otherwise, it gives errors
""" Co-register...
mne.gui.coregistration(tabbed=False,subject=subs[0],subjects_dir=fs_dir)
# Recommended way is to use mne coreg from terminal
"""
# Session 2
# subs are synced up with session1 folder names...
#
session2 = ['102_rs160815','105_bb161011','110_hh160809','127_am161004',
'132_wp161122','145_ac160823','150_mg160825',
'152_tc160623','160_ek160915','161_ak160916','162_ef160829','163_lf160920',
'164_sf160920','170_gm160822','172_th160825','174_hs160829','179_gm160913',
'180_zd160826','201_gs150925','203_am151029',
'204_am151120','205_ac160202','207_ah160809','210_sb160822','211_lb160823',
'nlr_gb310170829','nlr_kb218170829','nlr_gb267170911','nlr_jb420170828',
'nlr_hb275170828','nlr_gb355170907']
#subs = ['NLR_205_AC','NLR_206_LM',
# 'NLR_207_AH','NLR_210_SB','NLR_211_LB'
# ]
#session1 = ['205_ac151208','205_ac160202',
# '206_lm151119',
# '206_lm160113','207_ah160608','207_ah160809','210_sb160822','211_lb160617','211_lb160823'
# ]
#n_subjects = len(subs)
"""
Forward model...
"""
#sourceFlag = np.ones((n_subjects,1))
#%%
#for n, s in enumerate(session1):
# os.chdir(os.path.join(raw_dir,session1[n]))
#
# if s[0:3] == 'nlr':
# subject = s[0:9].upper()
# else:
# subject = 'NLR_' + s[0:6].upper()
#
# os.chdir('inverse')
# fn = 'All_40-sss_eq_'+session1[n]+'-ave.fif'
# evoked = mne.read_evokeds(fn, condition=0,
# baseline=(None,0), kind='average', proj=True)
#
# info = evoked.info
#
# if os.path.isdir('../forward'):
# os.chdir('../forward')
## else:
## temp_src = '/mnt/scratch/NLR_MEG2/' + session1[n] + '/forward'
## temp_dest = '/mnt/scratch/NLR_MEG3/' + session1[n] + '/forward'
## shutil.copytree(temp_src, temp_dest)
# trans = session1[n] + '-trans.fif'
## Take a look at the sensors
# mne.viz.plot_trans(info, trans, subject=subs[n], dig=True,
# meg_sensors=True, subjects_dir=fs_dir)
#%%
#n = 0
#os.chdir(os.path.join(raw_dir,session1[n]))
#os.chdir('raw_fif')
#pos = mne.chpi.read_head_pos('102_rs160618_1_raw.pos')
#mne.viz.plot_head_positions(pos, mode='traces')
#%%
for n, s in enumerate(session2):
os.chdir(os.path.join(raw_dir,session2[n]))
if s[0:3] == 'nlr':
subject = s[0:9].upper()
else:
subject = 'NLR_' + s[0:6].upper()
os.chdir('inverse')
fn = 'All_40-sss_eq_'+session2[n]+'-ave.fif'
evoked = mne.read_evokeds(fn, condition=0,
baseline=(None,0), kind='average', proj=True)
info = evoked.info
if os.path.isdir('../forward'):
os.chdir('../forward')
else:
temp_src = '/mnt/scratch/NLR_MEG2/' + session2[n] + '/forward'
temp_dest = '/mnt/scratch/NLR_MEG3/' + session2[n] + '/forward'
shutil.copytree(temp_src, temp_dest)
trans = session2[n] + '-trans.fif'
# Take a look at the sensors
# mne.viz.plot_trans(info, trans, subject=subs[n], dig=True,
# meg_sensors=True, subjects_dir=fs_dir)
### Read source space
# spacing='oct6' #'ico5' # 10242 * 2
fn2 = subject + '-' + 'ico-5' + '-src.fif' # ico-5
if s == '205_ac151123' or s == '205_ac160202' or s == 'nlr_jb227170811': # NLR_205 has too small head for ico-5
fn2 = subject + '-' + 'oct-6' + '-src.fif'
os.chdir(os.path.join(fs_dir,subject,'bem'))
src = mne.read_source_spaces(fn2)
os.chdir(os.path.join(raw_dir,session2[n]))
os.chdir('forward')
#import numpy as np # noqa
#from mayavi import mlab # noqa
#from surfer import Brain # noqa
#
#brain = Brain('sample', 'lh', 'inflated', subjects_dir=subjects_dir)
#surf = brain._geo
#
#vertidx = np.where(src[0]['inuse'])[0]
#
#mlab.points3d(surf.x[vertidx], surf.y[vertidx],
# surf.z[vertidx], color=(1, 1, 0), scale_factor=1.5)
# Create BEM model
conductivity = (0.3,) # for single layer
#conductivity = (0.3, 0.006, 0.3) # for three layers
model = mne.make_bem_model(subject=subject, ico=5, # 5=20484, 4=5120
conductivity=conductivity,
subjects_dir=fs_dir)
bem = mne.make_bem_solution(model)
fn = session2[n] + '-bem-sol.fif'
mne.write_bem_solution(fn,bem)
# Now create forward model
fwd = mne.make_forward_solution(info, trans=trans, src=src, bem=bem,
meg=True, eeg=False, mindist=3.0, n_jobs=18)
fwd = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True, copy=True)
fn = session2[n] + '-sss-fwd.fif'
mne.write_forward_solution(fn,fwd,overwrite=True)
#Inverse here
# os.chdir('../covariance')
# fn = session1[n] + '-40-sss-cov.fif'
# cov = mne.read_cov(fn)
#
# os.chdir('../inverse')
# # Free: loose = 1; Loose: loose = 0.2
# inv = mne.minimum_norm.make_inverse_operator(info, fwd, cov, loose=0., depth=0.8, use_cps=True)
#
# fn = session1[n] + '-fixed-depth8-inv.fif'
# mne.minimum_norm.write_inverse_operator(fn,inv)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Basic neural network layers."""
__all__ = ['Sequential', 'HybridSequential', 'Dense', 'Activation',
'Dropout', 'BatchNorm', 'LeakyReLU', 'Embedding', 'Flatten',
'Lambda', 'HybridLambda']
import warnings
import numpy as np
from ..block import Block, HybridBlock
from ..utils import _indent
from ... import nd, sym
class Sequential(Block):
"""Stacks Blocks sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(Sequential, self).__init__(prefix=prefix, params=params)
def add(self, *blocks):
"""Adds block on top of the stack."""
for block in blocks:
self.register_child(block)
def forward(self, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, key):
return self._children[key]
def __len__(self):
return len(self._children)
def hybridize(self, active=True):
"""Activates or deactivates `HybridBlock`s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
"""
if self._children and all(isinstance(c, HybridBlock) for c in self._children):
warnings.warn('All children of this Sequential layer are HybridBlocks. Consider ' \
'using HybridSequential for the best performance.')
super(Sequential, self).hybridize(active)
class HybridSequential(HybridBlock):
"""Stacks HybridBlocks sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
net.hybridize()
"""
def __init__(self, prefix=None, params=None):
super(HybridSequential, self).__init__(prefix=prefix, params=params)
def add(self, *blocks):
"""Adds block on top of the stack."""
for block in blocks:
self.register_child(block)
def hybrid_forward(self, F, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, key):
return self._children[key]
def __len__(self):
return len(self._children)
class Dense(HybridBlock):
r"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, weight) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `weight` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: the input must be a tensor with rank 2. Use `flatten` to convert it
to rank 2 manually if necessary.
Parameters
----------
units : int
Dimensionality of the output space.
activation : str
Activation function to use. See help on `Activation` layer.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
flatten: bool
Whether the input tensor should be flattened.
If true, all but the first axis of input data are collapsed together.
If false, all but the last axis of input data are kept the same, and the transformation
applies on the last axis.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
in_units : int, optional
Size of the input data. If not specified, initialization will be
deferred to the first time `forward` is called and `in_units`
will be inferred from the shape of input data.
prefix : str or None
See document of `Block`.
params : ParameterDict or None
See document of `Block`.
Inputs:
- **data**: if `flatten` is True, `data` should be a tensor with shape
`(batch_size, x1, x2, ..., xn)`, where x1 * x2 * ... * xn is equal to
`in_units`. If `flatten` is False, `data` should have shape
`(x1, x2, ..., xn, in_units)`.
Outputs:
- **out**: if `flatten` is True, `out` will be a tensor with shape
`(batch_size, units)`. If `flatten` is False, `out` will have shape
`(x1, x2, ..., xn, units)`.
"""
def __init__(self, units, activation=None, use_bias=True, flatten=True,
weight_initializer=None, bias_initializer='zeros',
in_units=0, **kwargs):
super(Dense, self).__init__(**kwargs)
self._flatten = flatten
with self.name_scope():
self._units = units
self._in_units = in_units
self.weight = self.params.get('weight', shape=(units, in_units),
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=(units,),
init=bias_initializer,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, weight, bias=None):
act = F.FullyConnected(x, weight, bias, no_bias=bias is None, num_hidden=self._units,
flatten=self._flatten, name='fwd')
if self.act is not None:
act = self.act(act)
return act
def __repr__(self):
s = '{name}({layout}, {act})'
shape = self.weight.shape
return s.format(name=self.__class__.__name__,
act=self.act if self.act else 'linear',
layout='{0} -> {1}'.format(shape[1] if shape[1] else None, shape[0]))
class Activation(HybridBlock):
r"""Applies an activation function to input.
Parameters
----------
activation : str
Name of activation function to use.
See :func:`~mxnet.ndarray.Activation` for available choices.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
"""
def __init__(self, activation, **kwargs):
self._act_type = activation
super(Activation, self).__init__(**kwargs)
def _alias(self):
return self._act_type
def hybrid_forward(self, F, x):
return F.Activation(x, act_type=self._act_type, name='fwd')
def __repr__(self):
s = '{name}({_act_type})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class Dropout(HybridBlock):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units
to 0 at each update during training time, which helps prevent overfitting.
Parameters
----------
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
References
----------
`Dropout: A Simple Way to Prevent Neural Networks from Overfitting
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_
"""
def __init__(self, rate, **kwargs):
super(Dropout, self).__init__(**kwargs)
self._rate = rate
def hybrid_forward(self, F, x):
return F.Dropout(x, p=self._rate, name='fwd')
def __repr__(self):
s = '{name}(p = {_rate})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class BatchNorm(HybridBlock):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
Parameters
----------
axis : int, default 1
The axis that should be normalized. This is typically the channels
(C) axis. For instance, after a `Conv2D` layer with `layout='NCHW'`,
set `axis=1` in `BatchNorm`. If `layout='NHWC'`, then set `axis=3`.
momentum: float, default 0.9
Momentum for the moving average.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
moving_mean_initializer: str or `Initializer`, default 'zeros'
Initializer for the moving mean.
moving_variance_initializer: str or `Initializer`, default 'ones'
Initializer for the moving variance.
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
"""
def __init__(self, axis=1, momentum=0.9, epsilon=1e-5, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
running_mean_initializer='zeros', running_variance_initializer='ones',
in_channels=0, **kwargs):
super(BatchNorm, self).__init__(**kwargs)
self._kwargs = {'axis': axis, 'eps': epsilon, 'momentum': momentum,
'fix_gamma': not scale}
if in_channels != 0:
self.in_channels = in_channels
self.gamma = self.params.get('gamma', grad_req='write' if scale else 'null',
shape=(in_channels,), init=gamma_initializer,
allow_deferred_init=True,
differentiable=scale)
self.beta = self.params.get('beta', grad_req='write' if center else 'null',
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True,
differentiable=center)
self.running_mean = self.params.get('running_mean', grad_req='null',
shape=(in_channels,),
init=running_mean_initializer,
allow_deferred_init=True,
differentiable=False)
self.running_var = self.params.get('running_var', grad_req='null',
shape=(in_channels,),
init=running_variance_initializer,
allow_deferred_init=True,
differentiable=False)
def cast(self, dtype):
if np.dtype(dtype).name == 'float16':
dtype = 'float32'
super(BatchNorm, self).cast(dtype)
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
return F.BatchNorm(x, gamma, beta, running_mean, running_var,
name='fwd', **self._kwargs)
def __repr__(self):
s = '{name}({content}'
in_channels = self.gamma.shape[0]
s += ', in_channels={0}'.format(in_channels if in_channels else None)
s += ')'
return s.format(name=self.__class__.__name__,
content=', '.join(['='.join([k, v.__repr__()])
for k, v in self._kwargs.items()]))
class LeakyReLU(HybridBlock):
r"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active
.. math::
f\left(x\right) = \left\{
\begin{array}{lr}
\alpha x & : x \lt 0 \\
x & : x \geq 0 \\
\end{array}
\right.\\
Parameters
----------
alpha : float
slope coefficient for the negative half axis. Must be >= 0.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
"""
def __init__(self, alpha, **kwargs):
assert alpha >= 0, "Slope coefficient for LeakyReLU must be no less than 0."
super(LeakyReLU, self).__init__(**kwargs)
self._alpha = alpha
def hybrid_forward(self, F, x):
return F.LeakyReLU(x, act_type='leaky', slope=self._alpha, name='fwd')
def __repr__(self):
s = '{name}({alpha})'
return s.format(name=self.__class__.__name__,
alpha=self._alpha)
class Embedding(HybridBlock):
r"""Turns non-negative integers (indexes/tokens) into dense vectors
of fixed size. eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
Parameters
----------
input_dim : int
Size of the vocabulary, i.e. maximum integer index + 1.
output_dim : int
Dimension of the dense embedding.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : Initializer
Initializer for the `embeddings` matrix.
Inputs:
- **data**: 2D tensor with shape: `(x1, x2)`.
Output:
- **out**: 3D tensor with shape: `(x1, x2, output_dim)`.
"""
def __init__(self, input_dim, output_dim, dtype='float32',
weight_initializer=None, **kwargs):
super(Embedding, self).__init__(**kwargs)
self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim,
'dtype': dtype}
self.weight = self.params.get('weight', shape=(input_dim, output_dim),
init=weight_initializer,
allow_deferred_init=True)
def hybrid_forward(self, F, x, weight):
return F.Embedding(x, weight, name='fwd', **self._kwargs)
def __repr__(self):
s = '{block_name}({input_dim} -> {output_dim}, {dtype})'
return s.format(block_name=self.__class__.__name__,
**self._kwargs)
class Flatten(HybridBlock):
r"""Flattens the input to two dimensional.
Inputs:
- **data**: input tensor with arbitrary shape `(N, x1, x2, ..., xn)`
Output:
- **out**: 2D tensor with shape: `(N, x1 \cdot x2 \cdot ... \cdot xn)`
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x.reshape((0, -1))
def __repr__(self):
return self.__class__.__name__
class Lambda(Block):
r"""Wraps an operator or an expression as a Block object.
Parameters
----------
function : str or function
Function used in lambda must be one of the following:
1) the name of an operator that is available in ndarray. For example::
block = Lambda('tanh')
2) a function that conforms to "def function(*args)". For example::
block = Lambda(lambda x: nd.LeakyReLU(x, slope=0.1))
Inputs:
- ** *args **: one or more input data. Their shapes depend on the function.
Output:
- ** *outputs **: one or more output data. Their shapes depend on the function.
"""
def __init__(self, function, prefix=None):
super(Lambda, self).__init__(prefix=prefix)
if isinstance(function, str):
assert hasattr(nd, function), \
"Function name %s is not found in ndarray." % function
self._func_impl = getattr(nd, function)
elif callable(function):
self._func_impl = function
else:
raise ValueError(
"Unrecognized function in lambda: {} of type {}"
.format(function, type(function)))
def forward(self, *args):
return self._func_impl(*args)
def __repr__(self):
return '{name}({function})'.format(name=self.__class__.__name__,
function=self._func_impl.__name__)
class HybridLambda(HybridBlock):
r"""Wraps an operator or an expression as a HybridBlock object.
Parameters
----------
function : str or function
Function used in lambda must be one of the following:
1) the name of an operator that is available in both symbol and ndarray. For example::
block = HybridLambda('tanh')
2) a function that conforms to "def function(F, data, *args)". For example::
block = HybridLambda(lambda F, x: F.LeakyReLU(x, slope=0.1))
Inputs:
- ** *args **: one or more input data. First argument must be symbol or ndarray.
Their shapes depend on the function.
Output:
- ** *outputs **: one or more output data. Their shapes depend on the function.
"""
def __init__(self, function, prefix=None):
super(HybridLambda, self).__init__(prefix=prefix)
if isinstance(function, str):
assert hasattr(nd, function) and hasattr(sym, function), \
"Function name %s is not found in symbol/ndarray." % function
func_dict = {sym: getattr(sym, function), nd: getattr(nd, function)}
self._func = lambda F, *args: func_dict[F](*args)
self._func_name = function
elif callable(function):
self._func = function
self._func_name = function.__name__
else:
raise ValueError(
"Unrecognized function in lambda: {} of type {}"
.format(function, type(function)))
def hybrid_forward(self, F, x, *args):
return self._func(F, x, *args)
def __repr__(self):
return '{name}({function})'.format(name=self.__class__.__name__,
function=self._func_name)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from numpy import linalg as LA
from theano import tensor as T
import theano
from theano.tensor.shared_randomstreams import RandomStreams
from deepy import NeuralClassifier
from deepy.utils import get_activation
from deepy.core.disconnected_grad import disconnected_grad
from deepy.utils.functions import FLOATX
from deepy.layers import NeuralLayer
from examples.attention_models.gaussian_sampler import SampleMultivariateGaussian
import theano.tensor.signal.downsample
class FirstGlimpseLayer(NeuralLayer):
def __init__(self, activation='tanh', std=0.1, disable_reinforce=False, random_glimpse=False):
self.disable_reinforce = disable_reinforce
self.random_glimpse = random_glimpse
self.gaussian_std = std
super(FirstGlimpseLayer, self).__init__(10, activation)
def init(self, config, vars, x, input_n, id="UNKNOWN"):
self._config = config
self._vars = vars
self.input_n = input_n
self.id = id
self.x = x
self._setup_params()
self._setup_functions()
self.connected = True
def _first_glimpse_sensor(self, x_t):
"""
Compute first glimpse position using down-sampled image.
"""
downsampled_img = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4))
downsampled_img = downsampled_img.flatten()
first_l = T.dot(downsampled_img, self.W_f)
if self.disable_reinforce:
wf_grad = self.W_f
if self.random_glimpse:
first_l = self.srng.uniform((2,), low=-1.7, high=1.7)
else:
sampled_l_t = self._sample_gaussian(first_l, self.cov)
sampled_pdf = self._multi_gaussian_pdf(disconnected_grad(sampled_l_t), first_l)
wf_grad = T.grad(T.log(sampled_pdf), self.W_f)
first_l = sampled_l_t
return first_l, wf_grad
def _refined_glimpse_sensor(self, x_t, l_p):
"""
Parameters:
x_t - 28x28 image
l_p - 2x1 focus vector
Returns:
7*14 matrix
"""
# Turn l_p to the left-top point of rectangle
l_p = l_p * 6.67 + 14 - 4
l_p = T.cast(T.round(l_p), "int32")
l_p = l_p * (l_p >= 0)
l_p = l_p * (l_p < 21) + (l_p >= 21) * 20
glimpse_1 = x_t[l_p[0]: l_p[0] + 7][:, l_p[1]: l_p[1] + 7]
return glimpse_1
def _multi_gaussian_pdf(self, vec, mean):
norm2d_var = ((1.0 / T.sqrt((2*np.pi)**2 * self.cov_det_var)) *
T.exp(-0.5 * ((vec-mean).T.dot(self.cov_inv_var).dot(vec-mean))))
return norm2d_var
def _glimpse_network(self, x_t, l_p):
"""
"""
sensor_output = self._refined_glimpse_sensor(x_t, l_p)
sensor_output = T.flatten(sensor_output)
h_g = self._relu(T.dot(sensor_output, self.W_g0))
h_l = self._relu(T.dot(l_p, self.W_g1))
g = self._relu(T.dot(h_g, self.W_g2_hg) + T.dot(h_l, self.W_g2_hl))
return g
def _location_network(self, h_t):
"""
Parameters:
h_t - 256x1 vector
Returns:
2x1 focus vector
"""
return T.dot(h_t, self.W_l)
def _action_network(self, h_t):
"""
Parameters:
h_t - 256x1 vector
Returns:
10x1 vector
"""
z = self._relu(T.dot(h_t, self.W_a) + self.B_a)
return self._softmax(z)
def _core_network(self, l_p, h_p, x_t):
"""
Parameters:
x_t - 28x28 image
l_p - 2x1 focus vector
h_p - 256x1 vector
Returns:
h_t, 256x1 vector
"""
g_t = self._glimpse_network(x_t, l_p)
h_t = self._tanh(T.dot(g_t, self.W_h_g) + T.dot(h_p, self.W_h) + self.B_h)
l_t = self._location_network(h_t)
if not self.disable_reinforce:
sampled_l_t = self._sample_gaussian(l_t, self.cov)
sampled_pdf = self._multi_gaussian_pdf(disconnected_grad(sampled_l_t), l_t)
wl_grad = T.grad(T.log(sampled_pdf), self.W_l)
else:
sampled_l_t = l_t
wl_grad = self.W_l
if self.random_glimpse and self.disable_reinforce:
sampled_l_t = self.srng.uniform((2,), low=-1.7, high=1.7)
a_t = self._action_network(h_t)
return sampled_l_t, h_t, a_t, wl_grad
def _output_func(self):
self.x = self.x.reshape((28, 28))
first_l, wf_grad = self._first_glimpse_sensor(self.x)
[l_ts, h_ts, a_ts, wl_grads], _ = theano.scan(fn=self._core_network,
outputs_info=[first_l, self.h0, None, None],
non_sequences=[self.x],
n_steps=5)
self.positions = l_ts
self.last_decision = T.argmax(a_ts[-1])
wl_grad = T.sum(wl_grads, axis=0) / wl_grads.shape[0]
self.wl_grad = wl_grad
self.wf_grad = wf_grad
return a_ts[-1].reshape((1,10))
def _setup_functions(self):
self._relu = get_activation("tanh")
self._tanh = get_activation("tanh")
self._softmax = get_activation("softmax")
self.output_func = self._output_func()
def _setup_params(self):
self.srng = RandomStreams(seed=234)
self.large_cov = np.array([[0.06,0],[0,0.06]], dtype=FLOATX)
self.small_cov = np.array([[self.gaussian_std,0],[0,self.gaussian_std]], dtype=FLOATX)
self.cov = theano.shared(np.array(self.small_cov, dtype=FLOATX))
self.cov_inv_var = theano.shared(np.array(LA.inv(self.small_cov), dtype=FLOATX))
self.cov_det_var = theano.shared(np.array(LA.det(self.small_cov), dtype=FLOATX))
self._sample_gaussian = SampleMultivariateGaussian()
self.W_g0 = self.create_weight(7 * 7, 128, label="g0")
self.W_g1 = self.create_weight(2, 128, label="g1")
self.W_g2_hg = self.create_weight(128, 256, label="g2_hg")
self.W_g2_hl = self.create_weight(128, 256, label="g2_hl")
self.W_h_g = self.create_weight(256, 256, label="h_g")
self.W_h = self.create_weight(256, 256, label="h")
self.B_h = self.create_bias(256, label="h")
self.h0 = self.create_vector(256, "h0")
self.l0 = self.create_vector(2, "l0")
self.l0.set_value(np.array([-1, -1], dtype=FLOATX))
self.W_l = self.create_weight(256, 2, label="l")
self.W_l.set_value(self.W_l.get_value() / 10)
self.B_l = self.create_bias(2, label="l")
self.W_a = self.create_weight(256, 10, label="a")
self.B_a = self.create_bias(10, label="a")
self.W_f = self.create_weight(7 * 7, 2, label="f")
self.W = [self.W_g0, self.W_g1, self.W_g2_hg, self.W_g2_hl, self.W_h_g, self.W_h, self.W_a]
self.B = [self.B_h, self.B_a]
self.parameters = [self.W_l, self.W_f]
def get_network(model=None, std=0.005, disable_reinforce=False, random_glimpse=False):
"""
Get baseline model.
Parameters:
model - model path
Returns:
network
"""
network = NeuralClassifier(input_dim=28 * 28)
network.stack_layer(FirstGlimpseLayer(std=std, disable_reinforce=disable_reinforce, random_glimpse=random_glimpse))
if model and os.path.exists(model):
network.load_params(model)
return network
| |
#!/usr/bin/env python
""" This runs a sequence of commands on a remote host using SSH. It runs a
simple system checks such as uptime and free to monitor the state of the remote
host.
./monitor.py [-s server_hostname] [-u username] [-p password]
-s : hostname of the remote server to login to.
-u : username to user for login.
-p : Password to user for login.
Example:
This will print information about the given host:
./monitor.py -s www.example.com -u mylogin -p mypassword
It works like this:
Login via SSH (This is the hardest part).
Run and parse 'uptime'.
Run 'iostat'.
Run 'vmstat'.
Run 'netstat'
Run 'free'.
Exit the remote host.
"""
import os, sys, time, re, getopt, getpass
import traceback
import pexpect
#
# Some constants.
#
COMMAND_PROMPT = '[>#\$] ' ### This is way too simple for industrial use -- we will change is ASAP.
TERMINAL_PROMPT = '(?i)terminal type\?'
TERMINAL_TYPE = 'vt100'
# This is the prompt we get if SSH does not have the remote host's public key stored in the cache.
SSH_NEWKEY = '(?i)are you sure you want to continue connecting'
def exit_with_usage():
print(globals()['__doc__'])
os._exit(1)
def main():
global COMMAND_PROMPT, TERMINAL_PROMPT, TERMINAL_TYPE, SSH_NEWKEY
######################################################################
## Parse the options, arguments, get ready, etc.
######################################################################
try:
optlist, args = getopt.getopt(sys.argv[1:], 'h?s:u:p:', ['help','h','?'])
except Exception as e:
print(str(e))
exit_with_usage()
options = dict(optlist)
if len(args) > 1:
exit_with_usage()
if [elem for elem in options if elem in ['-h','--h','-?','--?','--help']]:
print("Help:")
exit_with_usage()
if '-s' in options:
host = options['-s']
else:
host = input('hostname: ')
if '-u' in options:
user = options['-u']
else:
user = input('username: ')
if '-p' in options:
password = options['-p']
else:
password = getpass.getpass('password: ')
#
# Login via SSH
#
LOGFILE = open('monitor.log','wb')
child = pexpect.spawn('ssh -l %s %s'%(user, host),logfile=LOGFILE)
i = child.expect([pexpect.TIMEOUT, SSH_NEWKEY, COMMAND_PROMPT, '(?i)password: '])
if i == 0: # Timeout
print('ERROR! could not login with SSH. Here is what SSH said:')
print(child.before, child.after)
print(str(child))
sys.exit (1)
elif i == 1: # In this case SSH does not have the public key cached.
child.sendline ('yes')
child.expect ('(?i)password: ')
elif i == 2:
# This may happen if a public key was setup to automatically login.
# But beware, the COMMAND_PROMPT at this point is very trivial and
# could be fooled by some output in the MOTD or login message.
pass
# Reachs the password prompt
child.sendline(password)
# Now we are either at the command prompt or
# the login process is asking for our terminal type.
i = child.expect ([COMMAND_PROMPT, TERMINAL_PROMPT, '(?i)password: '])
if i == 1:
child.sendline (TERMINAL_TYPE)
child.expect (COMMAND_PROMPT)
elif i == 2:
print('ERROR! could not login with SSH. Here is what SSH said:')
print(child.buffer)
print(str(child))
sys.exit (1)
# Reachs the shell prompt.
#
# Set command prompt to something more unique.
#
COMMAND_PROMPT = "\[PEXPECT\]\$ "
child.sendline ("PS1='[PEXPECT]\$ '") # In case of sh-style
i = child.expect ([pexpect.TIMEOUT, COMMAND_PROMPT], timeout=10)
if i == 0:
print("# Couldn't set sh-style prompt -- trying csh-style.")
child.sendline ("set prompt='[PEXPECT]\$ '")
i = child.expect ([pexpect.TIMEOUT, COMMAND_PROMPT], timeout=10)
if i == 0:
print("Failed to set command prompt using sh or csh style.")
print("Response was:")
print(child.before)
sys.exit (1)
# Now we should be at the command prompt and ready to run some commands.
print('---------------------------------------')
print('Report of commands run on remote host.')
print('---------------------------------------')
# Run uname.
child.sendline ('uname -a')
child.expect (COMMAND_PROMPT)
print(child.before)
if 'linux' in child.before.lower():
LINUX_MODE = 1
else:
LINUX_MODE = 0
# Run and parse 'uptime'.
child.sendline ('uptime')
child.expect('up\s+(.*?),\s+([0-9]+) users?,\s+load averages?: ([0-9]+\.[0-9][0-9]),?\s+([0-9]+\.[0-9][0-9]),?\s+([0-9]+\.[0-9][0-9])')
duration, users, av1, av5, av15 = child.match.groups()
days = '0'
hours = '0'
mins = '0'
if 'day' in duration:
child.match = re.search('([0-9]+)\s+day',duration)
days = str(int(child.match.group(1)))
if ':' in duration:
child.match = re.search('([0-9]+):([0-9]+)',duration)
hours = str(int(child.match.group(1)))
mins = str(int(child.match.group(2)))
if 'min' in duration:
child.match = re.search('([0-9]+)\s+min',duration)
mins = str(int(child.match.group(1)))
print()
print('Uptime: %s days, %s users, %s (1 min), %s (5 min), %s (15 min)' % (
duration, users, av1, av5, av15))
child.expect (COMMAND_PROMPT)
# Run iostat.
child.sendline ('iostat')
child.expect (COMMAND_PROMPT)
print(child.before)
# Run vmstat.
child.sendline ('vmstat')
child.expect (COMMAND_PROMPT)
print(child.before)
# Run free.
if LINUX_MODE:
child.sendline ('free') # Linux systems only.
child.expect (COMMAND_PROMPT)
print(child.before)
# Run df.
child.sendline ('df')
child.expect (COMMAND_PROMPT)
print(child.before)
# Run lsof.
child.sendline ('lsof')
child.expect (COMMAND_PROMPT)
print(child.before)
# # Run netstat
# child.sendline ('netstat')
# child.expect (COMMAND_PROMPT)
# print child.before
# # Run MySQL show status.
# child.sendline ('mysql -p -e "SHOW STATUS;"')
# child.expect (PASSWORD_PROMPT_MYSQL)
# child.sendline (password_mysql)
# child.expect (COMMAND_PROMPT)
# print
# print child.before
# Now exit the remote host.
child.sendline ('exit')
index = child.expect([pexpect.EOF, "(?i)there are stopped jobs"])
if index==1:
child.sendline("exit")
child.expect(EOF)
if __name__ == "__main__":
try:
main()
except Exception as e:
print(str(e))
traceback.print_exc()
os._exit(1)
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 5 17:37:03 2014
@author: ibackus
"""
__version__ = "$Revision: 1 $"
# $Source$
# External modules
import subprocess
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
import os
import re
# ICgen modules
from ICglobal_settings import global_settings
import isaac
def Qeff(ICobj, bins=None):
if bins is None:
bins = ICobj.sigma.r_bins
# Constants
G = SimArray([1.0],'G')
kB = SimArray([1.0], 'k')
if not hasattr(ICobj, 'snapshot'):
raise ValueError('Could not find snapshot. Must generate ICs first')
snap = ICobj.snapshot
T = snap.g['temp']
r = snap.g['rxy']
M = snap.s['mass']
m = ICobj.settings.physical.m
cs = np.sqrt(kB*T/m)
omega = snap.g['vt']/r
sigma = ICobj.sigma(r)
r_edges, h_binned = isaac.height(snap, bins=bins)
r_cent = (r_edges[1:] + r_edges[0:-1])/2
h_spl = isaac.extrap1d(r_cent, h_binned)
h = SimArray(h_spl(r), h_binned.units)
Q = (cs*omega/(np.pi*G*sigma)).in_units('1')
Q1 = Q * ((h/r).in_units('1'))**0.192
dummy, Q1_binned, dummy2 = isaac.binned_mean(r, Q1, bins=r_edges)
return r_edges, Q1_binned
class larray(list):
"""
A simple subclass of list for containing listified arrays
(see ICgen_utils.listify)
Should be instantiated by ICgen_utils.listify
USAGE:
Creating an larray object:
a = larray() # blank larray
a = larray(shape) # sets a.shape = shape
Then one can append to an larray just as with a list
a.append(stuff)
To return return a normal array:
array = a.delistify()
"""
def __init__(self, shape = None):
list.__init__(self)
self.shape = shape
def delistify(self):
"""
Return an array made from self. See ICgen_utils.delistify
"""
return delistify(self)
def listify(array, max_element=10**7):
"""
Breaks up an array or SimArray into chunks and saves as an larray object
(essentially, a list). Useful for pickling very large arrays which otherwise
may throw an error with pickle
Whenever possible the array is NOT copied, rather a view is returned. This
depends on the functionality of array.ravel() (see numpy.ravel)
**ARGUMENTS**
array : array_like or SimArray
Input array to listify
max_element : int
Maximimum number of elements per chunk (i.e., per list item)
**RETURNS**
list_array : larray
A listified array.
See Also : ICgen_utils.delistify
"""
# Initialize
shape = array.shape
n_elements = np.prod(shape)
if n_elements <= max_element:
return array
else:
out_list = larray(shape)
array = array.ravel()
# Number per slice
N = int(max_element)
counter = 0
i = 0
while counter < n_elements:
out_list.append(array[counter:counter+N])
i += 1
counter += N
return out_list
def delistify(in_list):
"""
Reverses listify.
Takes an larray object (a listified array) and returns the original array
**ARGUMENTS**
in_list : larray
Listified array
**RETURNS**
array : array_like or SimArray
array from the original data
See Also : ICgen_utils.listify
"""
if isinstance(in_list, larray):
if isinstance(in_list[0], SimArray):
array = SimArray(np.concatenate(in_list), in_list[0].units)
else:
array = np.concatenate(in_list)
return array.reshape(in_list.shape)
else:
return in_list
def est_eps(smoothlength_file, nstar=1):
"""
Estimate gravitational softening length (eps) from a ChaNGa output .smoothlength
file. eps is estimated as 1/2 the mean smoothing length
**ARGUENTS**
smoothlength_file : str
Filename of the .smoothlength file
nstar : int
Number of star particles present
**RETURNS**
eps : float
Estimate of the gravitational softening length in simulation units
"""
# Open ChaNGa output file containing smoothing lengths for all particles
f = open(smoothlength_file, 'r')
# Total number of particles (incl. star)
nParticles = int(f.readline().strip())
# Allocate smoothing length array
smoothlength = np.zeros(nParticles, dtype=np.float32)
# Read smoothing lengths
for i, line in enumerate(f):
smoothlength[i] = float(line.strip())
# Calculate eps, ignoring star particle
mean_smooth = (smoothlength.sum() - smoothlength[-nstar])/(nParticles-nstar)
eps = mean_smooth/2
return eps
def est_time_step(param_name, preset='default', dDelta0=100, changa_args='', runner_args=''):
"""
A routine to automatically estimate a reasonable time-step size for ChaNGa.
The idea is to have about half the particles fall into the lowest rung (ie
the big time step). This is done by calculating the rung distribution for
a large time step by running ChaNGa and killing ChaNGa once it has output
rung distribution.
NOTE: this is still fairly alpha. A better version would probably not
just run ChaNGa and then try to kill it. To be safe, a local ChaNGa preset
should probably be used.
**ARGUMENTS**
param_name : str
Filename for a ChaNGa .param file which defines parameters for the
snapshot. The snapshot must already be saved to disk
preset : str
changa_runner preset to use. See ICglobal_settings.global_settings
dDelta0 : int or float
Some large time step that should place all the particles at higher
rungs.
changa_args : str
Additional command line arguments to pass to changa. CANNOT include
-n (number of time steps) or -dt (timestep size)
runner_args : str
Additional command line arguments to pass to the runner, ie to
charmrun or mpirun
**RETURNS**
dDelta : float
Estimated reasonable time step that places half the particles on the
lowest rung (ie the big time step)
"""
settings = global_settings['changa_presets'][preset]
changa_name = settings[2]
runner_name = settings[0]
changa_args += ' -n 1 -dt {0}'.format(dDelta0)
command = changa_command(param_name, preset, changa_args=changa_args, runner_args=runner_args)
rung_line = ''
p = changa_run(command, verbose=False)
for line in iter(p.stdout.readline, ''):
if 'rung distribution' in line.lower():
# Kill the runner
kill_command = 'pkill -9 ' + runner_name
pkill = subprocess.Popen(kill_command.split(), \
stdout=subprocess.PIPE)
pkill.wait()
# Kill ChaNGa
kill_command = 'pkill -9 ' + changa_name
pkill = subprocess.Popen(kill_command.split(), \
stdout=subprocess.PIPE)
pkill.wait()
rung_line = line.strip()
break
if rung_line == '':
raise RuntimeError('ChaNGa failed to output rung distribution')
rung_list = re.findall('\d+', rung_line)
rung_hist = np.array(rung_list).astype(float)
rung_edges = np.arange(len(rung_hist) + 1, dtype=float)
s = np.cumsum(rung_hist)
Ntot = s[-1]
# Find first bin which gives us more than half the total number
for i, n in enumerate(s):
if n >= 0.5*Ntot:
ind = i
break
# Calculate the median rung
rung_med = rung_edges[ind] + (0.5*Ntot - s[ind-1])/rung_hist[ind]
# Now estimate a time step that will fit about half the particles on the
# lowest rung (ie the big time step)
dDelta = dDelta0 * 2.0**(-rung_med+1)
return dDelta
def changa_run(command, verbose = True, logfile_name=None, force_wait=False):
"""
A wrapper for running ChaNGa
**ARGUMENTS**
command : str
A full command line command for running ChaNGa. Can be produced from
defaults using ICgen_utils.changa_command
verbose : bool
(optional) Flag for printing ChaNGa output to stdout.
If True - stdout is printed. This will effectively makes changa_run
wait on ChaNGa completion
logfile_name : str
(optional) If set, saves ChaNGa output to file
force_wait : bool
(optional) Default = False
If set, forces wait on ChaNGa before completion
**RETURNS**
p : subprocess.Popen
A process object created by subprocess.Popen for the ChaNGa command
"""
if logfile_name is not None:
logfile = open(logfile_name, 'w')
logfile.close()
logfile = open(logfile_name, 'a')
if verbose:
output = subprocess.PIPE
p = subprocess.Popen(command.split(), stderr=output, stdout=output)
for line in iter(p.stdout.readline, ''):
print line,
if logfile_name is not None:
logfile.write(line)
p.wait()
else:
if logfile_name is not None:
output = logfile
else:
output = subprocess.PIPE
p = subprocess.Popen(command.split(), stderr=output, stdout=output)
if force_wait:
p.wait()
return p
def changa_command(param_name, preset=None, changa_bin=None, changa_args='', runner_args=''):
"""
A utility for created command line commands for running ChaNGa
**ARGUMENTS**
param_name : str
Filename of the .param file used for ChaNGa
preset : str
if None, the default preset is used
Presets are defined in global_settings
changa_bin : str
Path to the ChaNGa binary to use. If None, defaults are used
Overrides preset binary
changa_args : str
Additional user supplied arguments for ChaNGa
runner_args : str
Additional user supplied arguments for the runner (ie charmrun or mpirun)
**RETURNS**
command : str
A command line command for running ChaNGa
"""
# Contains all the presets
preset_dict = global_settings['changa_presets']
# Load the preset
if preset is None:
preset = preset_dict['default']
preset_list = preset_dict[preset]
# Get full path to ChaNGa binary
if changa_bin is None:
changa_bin = preset_list[2]
changa_bin = os.popen('which ' + changa_bin).read().strip()
if '' == changa_bin:
raise RuntimeError, 'Could not find ChaNGa. Try different preset'
# Merge user defined extra arguments
runner_args = ' '.join([preset_list[1], runner_args])
changa_args = ' '.join([preset_list[3], changa_args])
runner = preset_list[0]
command = ' '.join([runner, runner_args, changa_bin, changa_args, param_name])
command = ' '.join(command.split())
return command
def arg_cat(arg_list):
"""
STILL ALPHA!!!
arg_str = arg_cat([args1, args2, ...])
Concatenates a list of various command line arguments. arg_list should
be a list containing command line argument strings.
Priority is given to later arguments. So arg_list[2] overwrites arg_list[1]
if they share any flags
**EXAMPLES**
args1 = '-n 20 +cd 13 ++b fire testit'
args2 = '-n 20 +cd 300'
print arg_cat([args1, args2])
returns:
+cd 300 -n 20 testit ++b fire
"""
args_dict = {}
# Loop through all sets of arguments
for args in arg_list:
# Split args if it's not already a list
if isinstance((args), str):
args = args.split()
# Parse arguments
counter = 0
while counter < len(args):
key = args[counter]
if (key[0] == '-') or (key[0] == '+'):
# We have a flag followed by its value
val = args[counter+1]
counter += 2
else:
# We just have an argument
val = ''
counter += 1
args_dict[key] = val
args_str = ''
for key, val in args_dict.iteritems():
if val == '':
# Tack on to the end
args_str = ' '.join([args_str, key])
else:
# Place at beginning
args_str = ' '.join([key, val, args_str])
args_str = ' '.join(args_str.split())
return args_str
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple replicas with synchronous update.
Please see accompanying README.md for details and instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('job_name', '', 'One of "ps", "worker"')
tf.app.flags.DEFINE_string('ps_hosts', '',
"""Comma-separated list of hostname:port for the """
"""parameter server jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('worker_hosts', '',
"""Comma-separated list of hostname:port for the """
"""worker jobs. e.g. """
"""'machine1:2222,machine2:1111,machine2:2222'""")
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.')
tf.app.flags.DEFINE_string('subset', 'train', 'Either "train" or "validation".')
tf.app.flags.DEFINE_boolean('log_device_placement', False,
'Whether to log device placement.')
# Task ID is used to select the chief and also to access the local_step for
# each replica to check staleness of the gradients in SyncReplicasOptimizer.
tf.app.flags.DEFINE_integer(
'task_id', 0, 'Task ID of the worker/replica running the training.')
# More details can be found in the SyncReplicasOptimizer class:
# tensorflow/python/training/sync_replicas_optimizer.py
tf.app.flags.DEFINE_integer('num_replicas_to_aggregate', -1,
"""Number of gradients to collect before """
"""updating the parameters.""")
tf.app.flags.DEFINE_integer('save_interval_secs', 10 * 60,
'Save interval seconds.')
tf.app.flags.DEFINE_integer('save_summaries_secs', 180,
'Save summaries interval seconds.')
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# Learning rate decay factor selected from https://arxiv.org/abs/1604.00981
tf.app.flags.DEFINE_float('initial_learning_rate', 0.045,
'Initial learning rate.')
tf.app.flags.DEFINE_float('num_epochs_per_decay', 2.0,
'Epochs after which learning rate decays.')
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.94,
'Learning rate decay factor.')
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def train(target, dataset, cluster_spec):
"""Train Inception on a dataset for a number of steps."""
# Number of workers and parameter servers are inferred from the workers and ps
# hosts string.
num_workers = len(cluster_spec.as_dict()['worker'])
num_parameter_servers = len(cluster_spec.as_dict()['ps'])
# If no value is given, num_replicas_to_aggregate defaults to be the number of
# workers.
if FLAGS.num_replicas_to_aggregate == -1:
num_replicas_to_aggregate = num_workers
else:
num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate
# Both should be greater than 0 in a distributed training.
assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and '
'num_parameter_servers'
' must be > 0.')
# Choose worker 0 as the chief. Note that any worker could be the chief
# but there should be only one chief.
is_chief = (FLAGS.task_id == 0)
# Ops are assigned to worker by default.
with tf.device('/job:worker/task:%d' % FLAGS.task_id):
# Variables and its related init/assign ops are assigned to ps.
with slim.scopes.arg_scope(
[slim.variables.variable, slim.variables.global_step],
device=slim.variables.VariableDeviceChooser(num_parameter_servers)):
# Create a variable to count the number of train() calls. This equals the
# number of updates applied to the variables.
global_step = slim.variables.global_step()
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
# Decay steps need to be divided by the number of replicas to aggregate.
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay /
num_replicas_to_aggregate)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Add a summary to track the learning rate.
tf.summary.scalar('learning_rate', lr)
# Create an optimizer that performs gradient descent.
opt = tf.train.RMSPropOptimizer(lr,
RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
images, labels = image_processing.distorted_inputs(
dataset,
batch_size=FLAGS.batch_size,
num_preprocess_threads=FLAGS.num_preprocess_threads)
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
logits = inception.inference(images, num_classes, for_training=True)
# Add classification loss.
inception.loss(logits, labels)
# Gather all of the losses including regularization losses.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses, name='total_loss')
if is_chief:
# Compute the moving average of all individual losses and the
# total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss;
# do the same for the averaged version of the losses.
for l in losses + [total_loss]:
loss_name = l.op.name
# Name each loss as '(raw)' and name the moving average version of the
# loss as the original loss name.
tf.summary.scalar(loss_name + ' (raw)', l)
tf.summary.scalar(loss_name, loss_averages.average(l))
# Add dependency to compute loss_averages.
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
# Track the moving averages of all trainable variables.
# Note that we maintain a 'double-average' of the BatchNormalization
# global statistics.
# This is not needed when the number of replicas are small but important
# for synchronous distributed training with tens of workers/replicas.
exp_moving_averager = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
# Add histograms for model variables.
for var in variables_to_average:
tf.summary.histogram(var.op.name, var)
# Create synchronous replica optimizer.
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=num_replicas_to_aggregate,
total_num_replicas=num_workers,
variable_averages=exp_moving_averager,
variables_to_average=variables_to_average)
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)
assert batchnorm_updates, 'Batchnorm updates are missing'
batchnorm_updates_op = tf.group(*batchnorm_updates)
# Add dependency to compute batchnorm_updates.
with tf.control_dependencies([batchnorm_updates_op]):
total_loss = tf.identity(total_loss)
# Compute gradients with respect to the loss.
grads = opt.compute_gradients(total_loss)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([apply_gradients_op]):
train_op = tf.identity(total_loss, name='train_op')
# Get chief queue_runners and init_tokens, which is used to synchronize
# replicas. More details can be found in SyncReplicasOptimizer.
chief_queue_runners = [opt.get_chief_queue_runner()]
init_tokens_op = opt.get_init_tokens_op()
# Create a saver.
saver = tf.train.Saver()
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Build an initialization operation to run below.
init_op = tf.global_variables_initializer()
# We run the summaries in the same thread as the training operations by
# passing in None for summary_op to avoid a summary_thread being started.
# Running summaries and training operations in parallel could run out of
# GPU memory.
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=FLAGS.train_dir,
init_op=init_op,
summary_op=None,
global_step=global_step,
saver=saver,
save_model_secs=FLAGS.save_interval_secs)
tf.logging.info('%s Supervisor' % datetime.now())
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement)
# Get a session.
sess = sv.prepare_or_wait_for_session(target, config=sess_config)
# Start the queue runners.
queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)
sv.start_queue_runners(sess, queue_runners)
tf.logging.info('Started %d queues for processing input data.',
len(queue_runners))
if is_chief:
sv.start_queue_runners(sess, chief_queue_runners)
sess.run(init_tokens_op)
# Train, checking for Nans. Concurrently run the summary operation at a
# specified interval. Note that the summary_op and train_op never run
# simultaneously in order to prevent running out of GPU memory.
next_summary_time = time.time() + FLAGS.save_summaries_secs
while not sv.should_stop():
try:
start_time = time.time()
loss_value, step = sess.run([train_op, global_step])
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step > FLAGS.max_steps:
break
duration = time.time() - start_time
if step % 30 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('Worker %d: %s: step %d, loss = %.2f'
'(%.1f examples/sec; %.3f sec/batch)')
tf.logging.info(format_str %
(FLAGS.task_id, datetime.now(), step, loss_value,
examples_per_sec, duration))
# Determine if the summary_op should be run on the chief worker.
if is_chief and next_summary_time < time.time():
tf.logging.info('Running Summary operation on the chief.')
summary_str = sess.run(summary_op)
sv.summary_computed(sess, summary_str)
tf.logging.info('Finished running Summary operation.')
# Determine the next time for running the summary.
next_summary_time += FLAGS.save_summaries_secs
except:
if is_chief:
tf.logging.info('Chief got exception while running!')
raise
# Stop the supervisor. This also waits for service threads to finish.
sv.stop()
# Save after the training ends.
if is_chief:
saver.save(sess,
os.path.join(FLAGS.train_dir, 'model.ckpt'),
global_step=global_step)
| |
"""
This version works for ESP game
THe program is to construct visual features from VLFEAT output SIFT
It doesnot use the temple file to store information to avoid overloading memory
Usage:
@param1: image feature director
@param2: word_feature target file
@param3: directory of unique labels
@param4: size (number of elemements) of the dictionary storing word - features
"""
#To compute the topic modelling of imange labels
#Note that the before that we have images represented as "bag of visual" words
import glob, math, os, re, sys
import numpy as np
import gc
from scipy import io
label_files = ''
num_descrs = -1
labels_dir = ''
words_in_dic = 0
MEMORY_LIMIT = 0
words_features_target_file = ''
DEBUG = False
#Initialization
def to_str(value):
if (value >0):
return "%.10f" %value
else:
return "%.0f" %value
#Store dictionary of words' features to temple file in the target dictionary
def store_to_file(words_features):
#open the current temple file and write to new file, then rename the file
global words_features_target_file
TMP_OUT = open ( words_features_target_file + '_wftmpout.txt','w')
mark = {}
if os.path.exists(words_features_target_file + '_wftmpin.txt'):
TMP_IN = open (words_features_target_file+ '_wftmpin.txt','r')
update_line = ''
for line in TMP_IN:
values = line.split()
word = values.pop(0)
update_line = ''
if words_features.has_key(word):
if (len(values) != len(words_features[word])):
print str(len(values)) + " " + str(len(words_features[word]))
print word
exit()
update_line = word + ' '
for i in range(0,len(words_features[word])):
update_line += to_str(words_features[word][i] + float(values[i])) + ' '
TMP_OUT.write(update_line + '\n')
mark[word] = True
else:
TMP_OUT.write(line + '\n')
TMP_IN.close()
os.remove(words_features_target_file + '_wftmpin.txt')
for word in words_features.keys():
if not mark.has_key(word):
update_line = word + ' '
for i in range(0, len(words_features[word])):
update_line += to_str(words_features[word][i]) + ' '
TMP_OUT.write(update_line + '\n')
TMP_OUT.close()
#Delete the old temple file and rename the new temple file to wftmpin.txt
os.rename(words_features_target_file + '_wftmpout.txt', words_features_target_file + '_wftmpin.txt')
def get_label_info(index):
global labels_dir
file_name = os.path.join(labels_dir, label_files[index])
tmp = []
F = open (file_name, 'r')
for token in F:
token = token.replace('\n', '')
tmp.append(token)
F.close()
return tmp
#Read the image feature given its key
def get_image_features( filename):
matfile = io.matlab.loadmat(filename)
k = matfile.keys()
key = ""
for i in k:
if len(i) >19:
key = i
return matfile.get(key)
def get_words_features(images_features_path, words_features_target_file):
global num_descrs
global MEMORY_LIMIT
file_map = {}
file_list = []
for fname in os.listdir(images_features_path):
if (str.endswith(fname, '.mat')):
key, ext = fname.split('.')
if (len(key) > 20):
file_list.append(fname)
print len(file_list)
print "Finish reading file list....."
iter = 0
file_list.sort()
words_features = {}
if num_descrs == -1:
filename = os.path.join(images_features_path, file_list[0])
images_features = get_image_features(filename)
num_descrs = len(images_features[0])
images_block_size = len(images_features)
print "image block size is " + str(images_block_size) + "\n"
totFeats = np.zeros((num_descrs), np.float32)
print "Reading the information of images .....\n"
words_in_dic = 0
count_null_image = 0
for key in file_list:
filename = os.path.join(images_features_path, key)
print "reading file name : " + filename
images_features = get_image_features(filename)
image_idx = 0
image_idx_mult = iter*images_block_size
for i in range(len(images_features)):
image_idx = i + image_idx_mult
image_row = get_label_info(image_idx); #
for word in image_row:
if (len(word.split())>1):
print "Exists a tag having 2 tokens!!!! - " + word
#exit()
#word = word.replace(' ','')
if not words_features.has_key(word) and not math.isnan(images_features[i][0]):
words_features[word] = images_features[i]
totFeats = np.add(totFeats, images_features[i])
words_in_dic +=1
elif not words_features.has_key(word) and math.isnan(images_features[i][0]):
count_null_image +=1
elif not math.isnan(images_features[i][0]):
words_features[word] = np.add(words_features[word], images_features[i])
totFeats = np.add(totFeats, images_features[i])
#endfor
"""
If the dictionary consume a lot of RAM, we store it to the file
Then restore the original state
"""
"""
if words_in_dic >= MEMORY_LIMIT:
store_to_file(words_features)
words_in_dic = 0
words_features = {}
gc.collect()
"""
iter +=1
del images_features
gc.collect()
#endfor
"""
#Now print the rest of dictionary words_features to the file
store_to_file(words_features)
words_features = {}
gc.collect()
"""
print "Finished reading images infor. - there's total " + str(count_null_image) + " images having NULL feature"
N = np.sum(totFeats)
print "Constructing words (image labels) BOW visual features.....\n"
#RAW_WF = open (words_features_target_file + '_wftmpin.txt','r')
OFILE = open(words_features_target_file, 'w')
progress_count = 0
for word in words_features.keys():
update_line = word + ' '
raw_word_features = words_features[word]
progress_count +=1
if (DEBUG == True) and (progress_count % 500 == 0):
print "DEBUG MODE: " + str(progress_count) + " words indexed...."
wordOcc = sum(raw_word_features)
colIdx = 0
for word_feature in raw_word_features:
if wordOcc != 0 and word_feature != 0.0 and totFeats[colIdx] != 0.0:
totFeat = float(word_feature * N) / float(totFeats[colIdx] * wordOcc)
lmi_score = float(word_feature) * float(math.log(totFeat))
if lmi_score < 0:
lmi_score = 0.0
else:
lmi_score = 0.0
colIdx +=1
update_line += str(lmi_score) + ' '
OFILE.write(update_line + '\n')
OFILE.close()
#RAW_WF.close()
#os.remove(words_features_target_file + '_wftmpin.txt')
def read_label_file(labels_dir):
temp = []
for fname in os.listdir(labels_dir):
if (len(fname) > 30):
temp.append(fname)
temp.sort()
return temp
if __name__ == "__main__":
global labels_dir
global MEMORY_LIMIT
global DEBUG
global images_features_path
global words_features_target_file
global label_files
gc.enable()
DEBUG = True
images_features_path = sys.argv[1]
words_features_target_file = sys.argv[2]
labels_dir = sys.argv[3]
MEMORY_LIMIT = int(sys.argv[4])
label_files = read_label_file(labels_dir)
print "Number of label files is " + str(len(label_files));
get_words_features(images_features_path, words_features_target_file)
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for creating variables in TF-Slim.
The variables module is typically used for defining model variables from the
ops routines (see slim.ops). Such variables are used for training, evaluation
and inference of models.
All the variables created through this module would be added to the
MODEL_VARIABLES collection, if you create a model variable outside slim, it can
be added with slim.variables.add_variable(external_variable, reuse).
Usage:
weights_initializer = tf.truncated_normal_initializer(stddev=0.01)
l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005)
weights = variables.variable('weights',
shape=[100, 100],
initializer=weights_initializer,
regularizer=l2_regularizer,
device='/cpu:0')
biases = variables.variable('biases',
shape=[100],
initializer=tf.zeros_initializer,
device='/cpu:0')
# More complex example.
net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1')
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2')
with slim.arg_scope([variables.variable], restore=False):
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3')
# Get all model variables from all the layers.
model_variables = slim.variables.get_variables()
# Get all model variables from a specific the layer, i.e 'conv1'.
conv1_variables = slim.variables.get_variables('conv1')
# Get all weights from all the layers.
weights = slim.variables.get_variables_by_name('weights')
# Get all bias from all the layers.
biases = slim.variables.get_variables_by_name('biases')
# Get all variables to restore.
# (i.e. only those created by 'conv1' and 'conv2')
variables_to_restore = slim.variables.get_variables_to_restore()
************************************************
* Initializing model variables from a checkpoint
************************************************
# Create some variables.
v1 = slim.variables.variable(name="v1", ..., restore=False)
v2 = slim.variables.variable(name="v2", ...) # By default restore=True
...
# The list of variables to restore should only contain 'v2'.
variables_to_restore = slim.variables.get_variables_to_restore()
restorer = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")
print("Model restored.")
# Do some work with the model
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import scopes
# Collection containing all the variables created using slim.variables
MODEL_VARIABLES = '_model_variables_'
# Collection containing the slim.variables that are created with restore=True.
VARIABLES_TO_RESTORE = '_variables_to_restore_'
def add_variable(var, restore=True):
"""Adds a variable to the MODEL_VARIABLES collection.
Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.
Args:
var: a variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
"""
collections = [MODEL_VARIABLES]
if restore:
collections.append(VARIABLES_TO_RESTORE)
for collection in collections:
if var not in tf.get_collection(collection):
tf.add_to_collection(collection, var)
def get_variables(scope=None, suffix=None):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a copied list of variables with scope and suffix.
"""
candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]
if suffix is not None:
candidates = [var for var in candidates if var.op.name.endswith(suffix)]
return candidates
def get_variables_to_restore():
"""Gets the list of variables to restore.
Returns:
a copied list of variables.
"""
return tf.get_collection(VARIABLES_TO_RESTORE)[:]
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=given_name)
def get_unique_variable(name):
"""Gets the variable uniquely identified by that name.
Args:
name: a name that uniquely identifies the variable.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = tf.get_collection(tf.GraphKeys.VARIABLES, name)
if not candidates:
raise ValueError('Couldnt find variable %s' % name)
for candidate in candidates:
if candidate.op.name == name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable', name)
class VariableDeviceChooser(object):
"""Slim device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU:0 placement otherwise CPU:0.
"""
def __init__(self,
num_parameter_servers=0,
ps_device='/job:ps',
placement='CPU:0'):
"""Initialize VariableDeviceChooser.
Args:
num_parameter_servers: number of parameter servers.
ps_device: string representing the parameter server device.
placement: string representing the placement of the variable either CPU:0
or GPU:0. When using parameter servers forced to CPU:0.
"""
self._num_ps = num_parameter_servers
self._ps_device = ps_device
self._placement = placement if num_parameter_servers == 0 else 'CPU:0'
self._next_task_id = 0
def __call__(self, op):
device_string = ''
if self._num_ps > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_ps
device_string = '%s/task:%d' % (self._ps_device, task_id)
device_string += '/%s' % self._placement
return device_string
# TODO(sguada) Remove once get_variable is able to colocate op.devices.
def variable_device(device, name):
"""Fix the variable device to colocate its ops."""
if callable(device):
var_name = tf.get_variable_scope().name + '/' + name
var_def = tf.NodeDef(name=var_name, op='Variable')
device = device(var_def)
if device is None:
device = ''
return device
@scopes.add_arg_scope
def global_step(device=''):
"""Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
"""
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variable.
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer,
trainable=False, collections=collections)
@scopes.add_arg_scope
def variable(name, shape=None, dtype=tf.float32, initializer=None,
regularizer=None, trainable=True, collections=None, device='',
restore=True):
"""Gets an existing variable with these parameters or creates a new one.
It also add itself to a group with its name.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the tf.GraphKeys.VARIABLES
and MODEL_VARIABLES collections.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
# Make sure variables are added to tf.GraphKeys.VARIABLES and MODEL_VARIABLES
collections += [tf.GraphKeys.VARIABLES, MODEL_VARIABLES]
# Add to VARIABLES_TO_RESTORE if necessary
if restore:
collections.append(VARIABLES_TO_RESTORE)
# Remove duplicates
collections = set(collections)
# Get the device for the variable.
with tf.device(variable_device(device, name)):
return tf.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
class VariableScopeTest(tf.test.TestCase):
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
assert v == v1
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
assert v == v1
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(set(expected_names),
set([v.name for v in vs._vars.values()]))
def testVarScopeInitializer(self):
with self.test_session() as sess:
init = tf.constant_initializer(0.3)
with tf.variable_scope("tower") as tower:
with tf.variable_scope("foo", initializer=init):
v = tf.get_variable("v", [])
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
with tf.variable_scope(tower, initializer=init):
w = tf.get_variable("w", [])
sess.run(tf.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.3)
def testVarScopeDType(self):
with self.test_session():
with tf.variable_scope("tower") as tower:
with tf.variable_scope("foo", dtype=tf.float16):
v = tf.get_variable("v", [])
self.assertEqual(v.dtype, dtypes.float16_ref)
with tf.variable_scope(tower, dtype=tf.float16):
w = tf.get_variable("w", [])
self.assertEqual(w.dtype, dtypes.float16_ref)
def testInitFromNonTensorValue(self):
with self.test_session() as sess:
v = tf.get_variable("v", initializer=4, dtype=tf.int32)
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 4)
w = tf.get_variable("w",
initializer=numpy.array([1, 2, 3]),
dtype=tf.int64)
sess.run(tf.initialize_variables([w]))
self.assertAllClose(w.eval(), [1, 2, 3])
with self.assertRaises(TypeError):
tf.get_variable("x", initializer={})
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
with tf.variable_scope("tower"):
with tf.variable_scope("caching", caching_device=caching_device):
v = tf.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with tf.variable_scope("child"):
v2 = tf.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with tf.variable_scope("not_cached", caching_device=""):
v2_not_cached = tf.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with tf.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = tf.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with tf.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = tf.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = tf.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
def testVarScopeRegularizer(self):
with self.test_session() as sess:
init = tf.constant_initializer(0.3)
def regularizer1(v):
return tf.reduce_mean(v) + 0.1
def regularizer2(v):
return tf.reduce_mean(v) + 0.2
with tf.variable_scope("tower", regularizer=regularizer1) as tower:
with tf.variable_scope("foo", initializer=init):
v = tf.get_variable("v", [])
sess.run(tf.initialize_variables([v]))
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(losses[0].eval(), 0.4)
with tf.variable_scope(tower, initializer=init) as vs:
u = tf.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = tf.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = tf.get_variable("x", [], regularizer=tf.no_regularizer)
with tf.variable_scope("baz", regularizer=tf.no_regularizer):
y = tf.get_variable("y", [])
vs.set_regularizer(tf.no_regularizer)
z = tf.get_variable("z", [])
# Check results.
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
sess.run(tf.initialize_variables([u, w, x, y, z]))
self.assertAllClose(losses[0].eval(), 0.4)
self.assertAllClose(losses[1].eval(), 0.4)
self.assertAllClose(losses[2].eval(), 0.5)
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", []) # "v" is alredy there, reused
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
def testInitializeFromValue(self):
with self.test_session() as sess:
init = tf.constant(0.1)
w = tf.get_variable("v", initializer=init)
sess.run(tf.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
tf.get_variable("u", [1], initializer=init)
with tf.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = tf.get_variable("v")
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = tf.constant(1, dtype=tf.int32)
t = tf.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, tf.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
tf.get_variable("s", initializer=init, dtype=tf.float64)
def testControlDeps(self):
with self.test_session() as sess:
v0 = tf.get_variable("v0", [1], initializer=tf.constant_initializer(0))
with tf.control_dependencies([v0.value()]):
v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlow(self):
with self.test_session() as sess:
v0 = tf.get_variable("v0", [], initializer=tf.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = tf.get_variable("v2", [1], initializer=tf.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(tf.less(v0, 10),
var_in_then_clause,
var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
with self.test_session() as sess:
init = tf.constant_initializer(0.3)
with tf.variable_scope("foo"):
new_init1 = tf.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
tf.get_variable_scope().set_initializer(init)
v = tf.get_variable("v", [])
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
# Check that we can set reuse.
tf.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
tf.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = tf.get_variable_scope().initializer
self.assertEqual(new_init, None)
def testVarScope(self):
with self.test_session():
with tf.variable_scope("tower") as tower:
self.assertEqual(tower.name, "tower")
with tf.name_scope("scope") as sc:
self.assertEqual(sc, "tower/scope/")
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as bar:
self.assertEqual(bar.name, "foo/bar")
with tf.name_scope("scope") as sc:
self.assertEqual(sc, "foo/bar/scope/")
with tf.variable_scope("foo"):
with tf.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower")
with tf.name_scope("scope") as sc:
self.assertEqual(sc, "foo_1/tower/scope/")
def testVarScopeNameScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_scope("tower") as tower:
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.variable_scope(tower): # Re-entering acts like another "tower".
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with tf.variable_scope("tower"): # Re-entering by string acts the same.
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_2/scope2/")
with tf.name_scope("scope3"):
with tf.variable_scope("tower"):
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower/scope2/")
with tf.variable_scope(tower):
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower_1/scope2/")
root_var_scope = tf.get_variable_scope()
with tf.name_scope("scope4"):
with tf.variable_scope(root_var_scope):
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope4/scope2/")
def testVarScopeOriginalNameScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.name_scope("scope2"):
with tf.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with tf.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with tf.name_scope(tower.original_name_scope):
with tf.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with tf.name_scope("scope2"):
with tf.variable_scope(tower):
with tf.name_scope(tower.original_name_scope):
with tf.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
def testVarScopeObjectReuse(self):
with self.test_session():
vs = None
with tf.variable_scope("jump", reuse=True) as scope:
vs = scope
with tf.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with tf.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with tf.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
with tf.variable_scope("jump", reuse=False) as scope:
vs = scope
with tf.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with tf.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with tf.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
def testVarOpScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
tf.get_variable("w", [])
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with tf.name_scope("scope2"):
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"default_1/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default_1/scope2/")
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.test_session():
with tf.variable_scope(None, "defaultScope1"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with tf.variable_scope(None, "defaultScope1"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with tf.variable_scope(None, "defaultScope"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope/layer/w:0")
with tf.variable_scope(None, "defaultScope1"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
def testVarOpScopeReuse(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, reuse=True) as outer:
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarScopeGetVar(self):
with self.test_session():
with tf.variable_scope("root"):
with tf.variable_scope("towerA") as tower_a:
va = tf.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with tf.variable_scope(tower_a, reuse=True):
va2 = tf.get_variable("v", [1])
self.assertEqual(va2, va)
with tf.variable_scope("towerB"):
vb = tf.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with tf.variable_scope("towerA"):
va2 = tf.get_variable("v", [1])
with tf.variable_scope("towerA", reuse=True):
va2 = tf.get_variable("v", [1])
self.assertEqual(va2, va)
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with tf.variable_scope(tower_a, reuse=True):
va3 = tf.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with tf.variable_scope(tower_a, reuse=True):
with tf.variable_scope("baz"):
tf.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with tf.variable_scope(tower_a, reuse=True):
tf.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with tf.variable_scope(tower_a, reuse=True):
tf.get_variable("v", [1], dtype=tf.int32)
self.assertEqual("dtype" in str(exc.exception), True)
def testVarScopeOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
pass
with tf.variable_scope(outer):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_scope("default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with tf.variable_scope(outer, reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with tf.variable_scope("default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarScopeNestedOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope(outer):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with tf.variable_scope("default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with tf.variable_scope("default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
def testVarOpScopeReuseParam(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer) as outer:
with tf.variable_scope("tower", "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarOpScopeReuseError(self):
with self.test_session():
with self.assertRaises(ValueError):
with tf.variable_scope(None, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
def testVarOpScopeOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
pass
with tf.variable_scope(outer, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with tf.variable_scope(outer, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarOpScopeNestedOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope(outer, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testGetLocalVar(self):
with self.test_session():
# Check that local variable respects naming.
with tf.variable_scope("outer") as outer:
with tf.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
# Since variable is local, it should be in the local variable collection
# but not the the trainable collection.
self.assertIn(local_var, tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, tf.get_collection("foo"))
self.assertNotIn(
local_var, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with tf.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_local_variable("w", []).name,
"outer/w:0")
def testGetVarWithDevice(self):
g = tf.Graph()
varname_type = []
def device_func(op):
if op.type == "Variable":
varname_type.append((op.name, op.get_attr("dtype")))
return "/gpu:0"
with g.as_default():
with tf.device(device_func):
_ = tf.get_variable("x", (100, 200))
_ = tf.get_variable("y", dtype=tf.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", tf.float32))
self.assertEqual(varname_type[1], ("y", tf.int64))
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(tf.test.TestCase):
def testResultNameMatchesRequested(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v = tf.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.assertTrue("scope0/name0/part_0:0" in [x.name for x in variables])
self.assertTrue("scope0/name0/part_1:0" in [x.name for x in variables])
self.assertFalse("scope0/name0/part_2:0" in [x.name for x in variables])
def testBreaksIfPartitioningChanges(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
tf.get_variable("name0", shape=(3, 1, 1))
with tf.variable_scope("scope0",
partitioner=axis0_into3_partitioner,
reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
tf.get_variable("name0", shape=(3, 1, 1))
with tf.variable_scope("scope0",
partitioner=axis0_into1_partitioner,
reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
tf.get_variable("name0", shape=(3, 1, 1))
def testReturnsExistingConcatenatedValueIfReuse(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v_concat = tf.get_variable("name0", shape=(3, 1, 1))
tf.get_variable_scope().reuse_variables()
v_concat_2 = tf.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
def testAllowsReuseWithoutPartitioner(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v = tf.get_variable("name0", shape=(3, 1, 1))
with tf.variable_scope("scope0", reuse=True):
v_reused = tf.get_variable("name0")
self.assertEqual(v, v_reused)
def testPropagatePartitionerOnReopening(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with tf.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
def testPartitionConcatenatesAlongCorrectAxis(self):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with tf.variable_scope("root"):
v0 = tf.get_variable("n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = tf.get_variable("n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = tf.get_default_graph().get_tensor_by_name("root/n0/part_0:0")
n0_1 = tf.get_default_graph().get_tensor_by_name("root/n0/part_1:0")
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = tf.get_default_graph().get_tensor_by_name("root/n1/part_0:0")
n1_1 = tf.get_default_graph().get_tensor_by_name("root/n1/part_1:0")
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
class VariableScopeWithCustomGetterTest(tf.test.TestCase):
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
with tf.variable_scope("scope0", custom_getter=3):
tf.get_variable("name0")
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
tf.get_variable("name0", custom_getter=3)
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with tf.variable_scope("scope", custom_getter=custom_getter) as scope:
v = tf.get_variable("v", [1])
with tf.variable_scope(scope, reuse=True):
v2 = tf.get_variable("v", [1])
with tf.variable_scope("new_scope") as new_scope:
v3 = tf.get_variable("v3", [1])
with tf.variable_scope(new_scope, reuse=True, custom_getter=custom_getter):
v4 = tf.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with tf.name_scope("custom_getter"):
return g_0 + g_1
with tf.variable_scope("scope", custom_getter=custom_getter):
v = tf.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = tf.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.test_session() as sess:
tf.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
class PartitionInfoTest(tf.test.TestCase):
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
if __name__ == "__main__":
tf.test.main()
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
One-dimensional Gamma-Gaussian mixture density classes : Given a set
of points the algo provides approcumate maximum likelihood estimates
of the mixture distribution using an EM algorithm.
Author: Bertrand Thirion and Merlin Keller 2005-2008
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import scipy.stats as st
import scipy.special as sp
#############################################################################
# Auxiliary functions #######################################################
#############################################################################
def _dichopsi_log(u, v, y, eps=0.00001):
""" Implements the dichotomic part of the solution of psi(c)-log(c)=y
"""
if u > v:
u, v = v, u
t = (u + v) / 2
if np.absolute(u - v) < eps:
return t
else:
if sp.psi(t) - np.log(t) > y:
return _dichopsi_log(u, t, y, eps)
else:
return _dichopsi_log(t, v, y, eps)
def _psi_solve(y, eps=0.00001):
""" Solve psi(c)-log(c)=y by dichotomy
"""
if y > 0:
print("y", y)
raise ValueError("y>0, the problem cannot be solved")
u = 1.
if y > sp.psi(u) - np.log(u):
while sp.psi(u) - np.log(u) < y:
u *= 2
u /= 2
else:
while sp.psi(u) - np.log(u) > y:
u /= 2
return _dichopsi_log(u, 2 * u, y, eps)
def _compute_c(x, z, eps=0.00001):
"""
this function returns the mle of the shape parameter if a 1D gamma
density
"""
eps = 1.e-7
y = np.dot(z, np.log(x)) / np.sum(z) - np.log(np.dot(z, x) / np.sum(z))
if y > - eps:
c = 10
else:
c = _psi_solve(y, eps=0.00001)
return c
def _gaus_dens(mean, var, x):
""" evaluate the gaussian density (mean,var) at points x
"""
Q = - (x - mean) ** 2 / (2 * var)
return 1. / np.sqrt(2 * np.pi * var) * np.exp(Q)
def _gam_dens(shape, scale, x):
"""evaluate the gamma density (shape,scale) at points x
Notes
-----
Returns 0 on negative subspace
"""
ng = np.zeros(np.size(x))
cst = - shape * np.log(scale) - sp.gammaln(shape)
i = np.ravel(np.nonzero(x > 0))
if np.size(i) > 0:
lz = cst + (shape - 1) * np.log(x[i]) - x[i] / scale
ng[i] = np.exp(lz)
return ng
def _gam_param(x, z):
""" Compute the parameters of a gamma density from data weighted points
Parameters
----------
x: array of shape(nbitem) the learning points
z: array of shape(nbitem), their membership within the class
Notes
-----
if no point is positive then the couple (1, 1) is returned
"""
eps = 1.e-5
i = np.ravel(np.nonzero(x > 0))
szi = np.sum(z[i])
if szi > 0:
shape = _compute_c(x[i], z[i], eps)
scale = np.dot(x[i], z[i]) / (szi * shape)
else:
shape = 1
scale = 1
return shape, scale
##############################################################################
# class `Gamma`
##############################################################################
class Gamma(object):
""" Basic one dimensional Gaussian-Gamma Mixture estimation class
Note that it can work with positive or negative values,
as long as there is at least one positive value.
NB : The gamma distribution is defined only on positive values.
5 parameters are used:
- mean: gaussian mean
- var: gaussian variance
- shape: gamma shape
- scale: gamma scale
- mixt: mixture parameter (weight of the gamma)
"""
def __init__(self, shape=1, scale=1):
self.shape = shape
self.scale = scale
def parameters(self):
print("shape: ", self.shape, "scale: ", self.scale)
def check(self, x):
if (x.min() < 0):
raise ValueError("negative values in input")
def estimate(self, x, eps=1.e-7):
"""
ML estimation of the Gamma parameters
"""
self.check(x)
n = np.size(x)
y = np.sum(np.log(x)) / n - np.log(np.sum(x) / n)
if y > - eps:
self.shape = 1
else:
self.shape = _psi_solve(y)
self.scale = np.sum(x) / (n * self.shape)
##############################################################################
# Gamma-Gaussian Mixture class
##############################################################################
class GGM(object):
"""
This is the basic one dimensional Gaussian-Gamma Mixture estimation class
Note that it can work with positive or negative values,
as long as there is at least one positive value.
NB : The gamma distribution is defined only on positive values.
5 scalar members
- mean: gaussian mean
- var: gaussian variance (non-negative)
- shape: gamma shape (non-negative)
- scale: gamma scale (non-negative)
- mixt: mixture parameter (non-negative, weight of the gamma)
"""
def __init__(self, shape=1, scale=1, mean=0, var=1, mixt=0.5):
self.shape = shape
self.scale = scale
self.mean = mean
self.var = var
self.mixt = mixt
def parameters(self):
""" print the paramteres of self
"""
print("Gaussian: mean: ", self.mean, "variance: ", self.var)
print("Gamma: shape: ", self.shape, "scale: ", self.scale)
print("Mixture gamma: ", self.mixt, "Gaussian: ", 1 - self.mixt)
def Mstep(self, x, z):
"""
Mstep of the model: maximum likelihood
estimation of the parameters of the model
Parameters
----------
x : array of shape (nbitems,)
input data
z array of shape(nbitrems, 2)
the membership matrix
"""
# z[0,:] is the likelihood to be generated by the gamma
# z[1,:] is the likelihood to be generated by the gaussian
tiny = 1.e-15
sz = np.maximum(tiny, np.sum(z, 0))
self.shape, self.scale = _gam_param(x, z[:, 0])
self.mean = np.dot(x, z[:, 1]) / sz[1]
self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1]
self.mixt = sz[0] / np.size(x)
def Estep(self, x):
"""
E step of the estimation:
Estimation of ata membsership
Parameters
----------
x: array of shape (nbitems,)
input data
Returns
-------
z: array of shape (nbitems, 2)
the membership matrix
"""
eps = 1.e-15
z = np.zeros((np.size(x), 2), 'd')
z[:, 0] = _gam_dens(self.shape, self.scale, x)
z[:, 1] = _gaus_dens(self.mean, self.var, x)
z = z * np.array([self.mixt, 1. - self.mixt])
sz = np.maximum(np.sum(z, 1), eps)
L = np.sum(np.log(sz)) / np.size(x)
z = (z.T / sz).T
return z, L
def estimate(self, x, niter=10, delta=0.0001, verbose=False):
""" Complete EM estimation procedure
Parameters
----------
x : array of shape (nbitems,)
the data to be processed
niter : int, optional
max nb of iterations
delta : float, optional
criterion for convergence
verbose : bool, optional
If True, print values during iterations
Returns
-------
LL, float
average final log-likelihood
"""
if x.max() < 0:
# all the values are generated by the Gaussian
self.mean = np.mean(x)
self.var = np.var(x)
self.mixt = 0.
L = 0.5 * (1 + np.log(2 * np.pi * self.var))
return L
# proceed with standard estimate
z, L = self.Estep(x)
L0 = L - 2 * delta
for i in range(niter):
self.Mstep(x, z)
z, L = self.Estep(x)
if verbose:
print(i, L)
if (L < L0 + delta):
break
L0 = L
return L
def show(self, x):
""" Visualization of the mm based on the empirical histogram of x
Parameters
----------
x : array of shape (nbitems,)
the data to be processed
"""
step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3)
bins = max(10, int((x.max() - x.min()) / step))
h, c = np.histogram(x, bins)
h = h.astype(np.float) / np.size(x)
p = self.mixt
dc = c[1] - c[0]
y = (1 - p) * _gaus_dens(self.mean, self.var, c) * dc
z = np.zeros(np.size(c))
z = _gam_dens(self.shape, self.scale, c) * p * dc
import matplotlib.pylab as mp
mp.figure()
mp.plot(0.5 * (c[1:] + c[:-1]), h)
mp.plot(c, y, 'r')
mp.plot(c, z, 'g')
mp.plot(c, z + y, 'k')
mp.title('Fit of the density with a Gamma-Gaussians mixture')
mp.legend(('data', 'gaussian acomponent', 'gamma component',
'mixture distribution'))
def posterior(self, x):
"""Posterior probability of observing the data x for each component
Parameters
----------
x: array of shape (nbitems,)
the data to be processed
Returns
-------
y, pg : arrays of shape (nbitem)
the posterior probability
"""
p = self.mixt
pg = p * _gam_dens(self.shape, self.scale, x)
y = (1 - p) * _gaus_dens(self.mean, self.var, x)
return y / (y + pg), pg / (y + pg)
##############################################################################
# double-Gamma-Gaussian Mixture class
##############################################################################
class GGGM(object):
"""
The basic one dimensional Gamma-Gaussian-Gamma Mixture estimation
class, where the first gamma has a negative sign, while the second
one has a positive sign.
7 parameters are used:
- shape_n: negative gamma shape
- scale_n: negative gamma scale
- mean: gaussian mean
- var: gaussian variance
- shape_p: positive gamma shape
- scale_p: positive gamma scale
- mixt: array of mixture parameter
(weights of the n-gamma,gaussian and p-gamma)
"""
def __init__(self, shape_n=1, scale_n=1, mean=0, var=1,
shape_p=1, scale_p=1, mixt=np.array([1.0, 1.0, 1.0]) / 3):
""" Constructor
Parameters
-----------
shape_n : float, optional
scale_n: float, optional
parameters of the nehative gamma; must be positive
mean : float, optional
var : float, optional
parameters of the gaussian ; var must be positive
shape_p : float, optional
scale_p : float, optional
parameters of the positive gamma; must be positive
mixt : array of shape (3,), optional
the mixing proportions; they should be positive and sum to 1
"""
self.shape_n = shape_n
self.scale_n = scale_n
self.mean = mean
self.var = var
self.shape_p = shape_p
self.scale_p = scale_p
self.mixt = mixt
def parameters(self):
""" Print the parameters
"""
print("Negative Gamma: shape: ", self.shape_n,
"scale: ", self.scale_n)
print("Gaussian: mean: ", self.mean, "variance: ", self.var)
print("Poitive Gamma: shape: ", self.shape_p, "scale: ", self.scale_p)
mixt = self.mixt
print("Mixture neg. gamma: ", mixt[0], "Gaussian: ", mixt[1],
"pos. gamma: ", mixt[2])
def init(self, x, mixt=None):
"""
initialization of the differnt parameters
Parameters
----------
x: array of shape(nbitems)
the data to be processed
mixt : None or array of shape(3), optional
prior mixing proportions. If None, the classes have equal weight
"""
if mixt is not None:
if np.size(mixt) == 3:
self.mixt = np.ravel(mixt)
else:
raise ValueError('bad size for mixt')
# gaussian
self.mean = np.mean(x)
self.var = np.var(x)
# negative gamma
i = np.ravel(np.nonzero(x < 0))
if np.size(i) > 0:
mn = - np.mean(x[i])
vn = np.var(x[i])
self.scale_n = vn / mn
self.shape_n = mn ** 2 / vn
else:
self.mixt[0] = 0
# positive gamma
i = np.ravel(np.nonzero(x > 0))
if np.size(i) > 0:
mp = np.mean(x[i])
vp = np.var(x[i])
self.scale_p = vp / mp
self.shape_p = mp ** 2 / vp
else:
self.mixt[2] = 0
# mixing proportions
self.mixt = self.mixt / np.sum(self.mixt)
def init_fdr(self, x, dof=-1, copy=True):
"""
Initilization of the class based on a fdr heuristic: the
probability to be in the positive component is proportional to
the 'positive fdr' of the data. The same holds for the
negative part. The point is that the gamma parts should model
nothing more that the tails of the distribution.
Parameters
----------
x: array of shape(nbitem)
the data under consideration
dof: integer, optional
number of degrees of freedom if x is thought to be a student
variate. By default, it is handeled as a normal
copy: boolean, optional
If True, copy the data.
"""
# Safeguard ourselves against modifications of x, both by our
# code, and by external code.
if copy:
x = x.copy()
# positive gamma
i = np.ravel(np.nonzero(x > 0))
from ..statistics.empirical_pvalue import fdr
if np.size(i) > 0:
if dof < 0:
pvals = st.norm.sf(x)
else:
pvals = st.t.sf(x, dof)
q = fdr(pvals)
z = 1 - q[i]
self.mixt[2] = np.maximum(0.5, z.sum()) / np.size(x)
self.shape_p, self.scale_p = _gam_param(x[i], z)
else:
self.mixt[2] = 0
# negative gamma
i = np.ravel(np.nonzero(x < 0))
if np.size(i) > 0:
if dof < 0:
pvals = st.norm.cdf(x)
else:
pvals = st.t.cdf(x, dof)
q = fdr(pvals)
z = 1 - q[i]
self.shape_n, self.scale_n = _gam_param( - x[i], z)
self.mixt[0] = np.maximum(0.5, z.sum()) / np.size(x)
else:
self.mixt[0] = 0
self.mixt[1] = 1 - self.mixt[0] - self.mixt[2]
def Mstep(self, x, z):
"""
Mstep of the estimation:
Maximum likelihood update the parameters of the three components
Parameters
------------
x: array of shape (nbitem,)
input data
z: array of shape (nbitems,3)
probabilistic membership
"""
tiny = 1.e-15
sz = np.maximum(np.sum(z, 0), tiny)
self.mixt = sz / np.sum(sz)
# negative gamma
self.shape_n, self.scale_n = _gam_param( - x, z[:, 0])
# gaussian
self.mean = np.dot(x, z[:, 1]) / sz[1]
self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1]
# positive gamma
self.shape_p, self.scale_p = _gam_param(x, z[:, 2])
def Estep(self, x):
""" Update probabilistic memberships of the three components
Parameters
----------
x: array of shape (nbitems,)
the input data
Returns
-------
z: ndarray of shape (nbitems, 3)
probabilistic membership
Notes
-----
z[0,:] is the membership the negative gamma
z[1,:] is the membership of the gaussian
z[2,:] is the membership of the positive gamma
"""
tiny = 1.e-15
z = np.array(self.component_likelihood(x)).T * self.mixt
sz = np.maximum(tiny, np.sum(z, 1))
L = np.mean(np.log(sz))
z = (z.T / sz).T
return z, L
def estimate(self, x, niter=100, delta=1.e-4, bias=0, verbose=0,
gaussian_mix=0):
""" Whole EM estimation procedure:
Parameters
----------
x: array of shape (nbitem)
input data
niter: integer, optional
max number of iterations
delta: float, optional
increment in LL at which convergence is declared
bias: float, optional
lower bound on the gaussian variance (to avoid shrinkage)
gaussian_mix: float, optional
if nonzero, lower bound on the gaussian mixing weight
(to avoid shrinkage)
verbose: 0, 1 or 2
verbosity level
Returns
-------
z: array of shape (nbitem, 3)
the membership matrix
"""
z, L = self.Estep(x)
L0 = L - 2 * delta
for i in range(niter):
self.Mstep(x, z)
# Constraint the Gaussian variance
if bias > 0:
self.var = np.maximum(bias, self.var)
# Constraint the Gaussian mixing ratio
if gaussian_mix > 0 and self.mixt[1] < gaussian_mix:
upper, gaussian, lower = self.mixt
upper_to_lower = upper / (lower + upper)
gaussian = gaussian_mix
upper = (1 - gaussian_mix) * upper_to_lower
lower = 1 - gaussian_mix - upper
self.mixt = lower, gaussian, upper
z, L = self.Estep(x)
if verbose:
print(i, L)
if (L < L0 + delta):
break
L0 = L
return z
def posterior(self, x):
"""
Compute the posterior probability of the three components
given the data
Parameters
-----------
x: array of shape (nbitem,)
the data under evaluation
Returns
--------
ng,y,pg: three arrays of shape(nbitem)
the posteriori of the 3 components given the data
Notes
-----
ng + y + pg = np.ones(nbitem)
"""
p = self.mixt
ng, y, pg = self.component_likelihood(x)
total = ng * p[0] + y * p[1] + pg * p[2]
return ng * p[0] / total, y * p[1] / total, pg * p[2] / total
def component_likelihood(self, x):
"""
Compute the likelihood of the data x under
the three components negative gamma, gaussina, positive gaussian
Parameters
-----------
x: array of shape (nbitem,)
the data under evaluation
Returns
--------
ng,y,pg: three arrays of shape(nbitem)
The likelihood of the data under the 3 components
"""
ng = _gam_dens(self.shape_n, self.scale_n, - x)
y = _gaus_dens(self.mean, self.var, x)
pg = _gam_dens(self.shape_p, self.scale_p, x)
return ng, y, pg
def show(self, x, mpaxes=None):
""" Visualization of mixture shown on the empirical histogram of x
Parameters
----------
x: ndarray of shape (nditem,)
data
mpaxes: matplotlib axes, optional
axes handle used for the plot if None, new axes are created.
"""
import matplotlib.pylab as mp
step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3)
bins = max(10, int((x.max() - x.min()) / step))
h, c = np.histogram(x, bins)
h = h.astype(np.float) / np.size(x)
dc = c[1] - c[0]
ng = self.mixt[0] * _gam_dens(self.shape_n, self.scale_n, - c)
y = self.mixt[1] * _gaus_dens(self.mean, self.var, c)
pg = self.mixt[2] * _gam_dens(self.shape_p, self.scale_p, c)
z = y + pg + ng
if mpaxes is None:
mp.figure()
ax = mp.subplot(1, 1, 1)
else:
ax = mpaxes
ax.plot(0.5 * (c[1:] + c[:-1]), h / dc, linewidth=2, label='data')
ax.plot(c, ng, 'c', linewidth=2, label='negative gamma component')
ax.plot(c, y, 'r', linewidth=2, label='Gaussian component')
ax.plot(c, pg, 'g', linewidth=2, label='positive gamma component')
ax.plot(c, z, 'k', linewidth=2, label='mixture distribution')
ax.set_title('Fit of the density with a Gamma-Gaussian mixture',
fontsize=12)
l = ax.legend()
for t in l.get_texts():
t.set_fontsize(12)
ax.set_xticklabels(ax.get_xticks(), fontsize=12)
ax.set_yticklabels(ax.get_yticks(), fontsize=12)
| |
#*-* coding: utf-8 *-*
"""Wrapper for ROSETTA software for structure prediction of small
RNA sequences"""
import os, shutil, re
import subprocess
import os
from shutil import copyfile
from rna_tools.tools.mq.lib.wrappers.SubprocessUtils import run_command
from rna_tools.tools.pdb_formatix.PDBFile import PDBFile#resname_check_and_3to1, set_residues_bfactor
from rna_tools.tools.mq.lib.wrappers.base_wrappers import ProgramWrapper, WrapperError
from rna_tools.tools.pdb_formatix.RosettaUtils import RosettaPDBFile
from rna_tools.tools.pdb_formatix.rebuilder import check_and_rebuild
from rna_tools.rna_tools_config import FARNA_PATH, FARNA_DB_PATH, FARNA_LORES
class FARNA(ProgramWrapper):
"""
Wrapper class for running ROSETTA scoring function automatically.
"""
program_name = 'farna'
src_bin = FARNA_PATH
db_path = FARNA_DB_PATH
input_fn = 'seq.fasta'
input_file = ''
best_energy = ''
executable = 'rna_minimize'
def __init__(self, sequence='test', seq_name='test', job_id=None):
try:
self.start_dir = os.getcwd()
except OSError: # directory was deleted or something like that
pass
super(FARNA, self).__init__(sequence, seq_name, job_id=job_id)
def _prepare_stderr_stdout(self):
# create output file
self.output_file = os.path.join(self.path, 'stdout.txt')
self.stdout = open(self.output_file, 'w')
# create error file
self.error_file = os.path.join(self.path, 'stderr.txt')
self.stderr = open(self.error_file, 'w')
def _prepare_files(self):
# create input file
self.input_file = open(os.path.join \
(self.sandbox_dir, self.input_fn), 'w')\
.write('>seq.fasta\n'+str(self.sequence).lower())
self._prepare_stderr_stdout()
def sandbox(self):
#shutil.copytree(self.src_bin + os.sep + 'rosetta_source',
# self.sandbox_dir + os.sep + 'rosetta_source',
# symlinks=True)
os.symlink(self.src_bin, self.sandbox_dir + os.sep + self.executable)
#symlinks=True)
#os.symlink(FARNA_DB_PATH,
# self.sandbox_dir + os.sep + 'rosetta_database')
#os.system('chmod +x %s' % \
# os.path.join(self.sandbox_dir, self.executable))
def run(self, pdb_file, hires, verbose=False, system=False):#, global_energy_score=True):
"""Compute FARNA potential for a single file
Arguments:
* pdb_file = path to pdb file
* global_energy_score = True/False (See Output), default=True
Output:
* A list of energies, e.g::
['-21.721', '-0.899', '-20.961', '-84.498', '-16.574', '-180.939', '11.549', '7.475', '-17.257', '-306.324', '0.0', '0.0', '17.503', '0.0']
??? or a dictionary of lists of local scores, eg::
{
'N_BS': [17.0, -0.70039, -0.720981, -0.685238, -0.734146, ... ],
'atom_pair_constraint': [0.0, -0.764688, -0.773833, ...],
...
}
"""
global_energy_score=True
ftxt = open(pdb_file).read()
ftxt = re.sub('TER\s+END\s+', 'TER', ftxt)
ftxt = re.sub('END', 'TER', ftxt).strip()
f = open(self.sandbox_dir + os.sep + 'tmp.pdb', 'w')
f.write(ftxt)
f.close()
pdb_file = self.sandbox_dir + os.sep + 'tmp.pdb'
if check_and_rebuild(pdb_file, self.sandbox_dir + os.sep + 'query.pdb'):
self.pdb_fixes.append('rebuild_full_atom')
pdb_file = RosettaPDBFile(pdb_path=self.sandbox_dir + os.sep + 'query.pdb')
# get sequence from PDB file
with open(self.sandbox_dir + os.sep + 'query.fasta', 'w') as f:
f.write(pdb_file.get_fasta(lowercase=True))
# create a ROSETTA ready PDB file
pdb_file.make_rna_rosetta_ready()
pdb_file.save(self.sandbox_dir + os.sep + 'query.pdb')
self.pdb_fixes = pdb_file.fixes
# run
# os.chdir(self.sandbox_dir)
self.flags = [self.sandbox_dir + os.sep + self.executable]
# hires = True
if hires == True: # False: # must be a string
minimize_cmd = ' ' # -minimize_rna '
else:
minimize_cmd = ' -score:weights ' + FARNA_LORES + ' -minimize_rna '
## MM minimize_rna should be off or by option
## 2021 i'm not sure why? keep -minimize_rna on here
cmd = ' '.join([FARNA_PATH, '-constant_seed -database', self.db_path,
minimize_cmd,
' -ignore_zero_occupancy false ',
'-s', self.sandbox_dir + os.sep + 'query.pdb',
'-out:file:silent', self.sandbox_dir + os.sep + 'SCORE.out'])
if verbose:
print(cmd)
self.log(cmd, 'debug')
self.log('Running program')
if system:
os.system(cmd)
else:
out = subprocess.getoutput(cmd)
self.log('Run finished')
self.log(out, 'debug')
self.get_result()
#if global_energy_score: # ???
results = []
for i in list(self.result.keys()):
results.append(str(self.result[i][0]))
#return '\t'.join(results)
return results
def get_result(self):
"""Parse and get result from score file created during ROSETTA run
All results are kept in self.result, but only global score is returned
"""
f = open(self.sandbox_dir + os.sep + 'SCORE.out')
output = f.read()
f.close()
lines = output.split('\n')
lines = [l for l in lines if not l.startswith('REMARK')]
# get names of different scores
keys = lines[1].split()[1:-1]
# get global scores
global_scores = lines[2].split()[1:]
# global scores are at index 0 in result, local are at 1--len(sequence)
self.result = dict(list(zip(keys, [[float(s)] for s in global_scores[:len(keys)]])))
##for l in lines[3:-1]:
# scores_res = l.split()[2:-1] # scores for a single residue
# for i in xrange(len(keys)):
# self.result[keys[i]].append(float(scores_res[i]))
#return self.result['score'][0]
return self.result
def mqap(self, pdb):
"Total weighted score:\s+(?P<ROSETTA_SCORE>[-\d.]+)"
pass
def cleanup(self):
super(FARNA, self).cleanup()
# main
if __name__ == '__main__':
fns = ['test.pdb']
fns = ['1xjrA_M1.pdb', 'test.pdb']
fns = ['3e5f_output4_01-000001_AA+ResnShift.pdb'] #2pcw_1_2chains.pdb'] # two chains
for f in fns:
f = 'test' + os.sep + f
print('processing %s' % f)
if 1:
# mini false
farna = FARNA()
try:
result = farna.run(f, False)
except:
result = 'error'
print(result)
if 1:
# mini true
farna = FARNA('', '')
try:
result = farna.run(f, True)
except:
result = 'error'
print(result)
#farna.cleanup()
| |
import subprocess
from subprocess import PIPE, STDOUT
from unittest import TestCase
from testfixtures.mock import call
from testfixtures import ShouldRaise, compare, Replacer
from testfixtures.popen import MockPopen, PopenBehaviour
from testfixtures.compat import BytesLiteral, PY2
import signal
class Tests(TestCase):
def test_command_min_args(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE)
# process started, no return code
compare(process.pid, 1234)
compare(None, process.returncode)
out, err = process.communicate()
# test the rest
compare(out, b'')
compare(err, b'')
compare(process.returncode, 0)
# test call list
compare([
call.Popen('a command', stderr=-1, stdout=-1),
call.Popen_instance.communicate(),
], Popen.mock.method_calls)
def test_command_max_args(self):
Popen = MockPopen()
Popen.set_command('a command', b'out', b'err', 1, 345)
process = Popen('a command', stdout=PIPE, stderr=PIPE)
compare(process.pid, 345)
compare(None, process.returncode)
out, err = process.communicate()
# test the rest
compare(out, b'out')
compare(err, b'err')
compare(process.returncode, 1)
# test call list
compare([
call.Popen('a command', stderr=-1, stdout=-1),
call.Popen_instance.communicate(),
], Popen.mock.method_calls)
def test_callable_default_behaviour(self):
def some_callable(command, stdin):
return PopenBehaviour(BytesLiteral(command), BytesLiteral(stdin), 1, 345, 0)
Popen = MockPopen()
Popen.set_default(behaviour=some_callable)
process = Popen('a command', stdin='some stdin', stdout=PIPE, stderr=PIPE)
compare(process.pid, 345)
out, err = process.communicate()
compare(out, b'a command')
compare(err, b'some stdin')
compare(process.returncode, 1)
def test_command_is_sequence(self):
Popen = MockPopen()
Popen.set_command('a command')
process = Popen(['a', 'command'], stdout=PIPE, stderr=PIPE)
compare(process.wait(), 0)
compare([
call.Popen(['a', 'command'], stderr=-1, stdout=-1),
call.Popen_instance.wait(),
], Popen.mock.method_calls)
def test_communicate_with_input(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
out, err = process.communicate('foo')
# test call list
compare([
call.Popen('a command', shell=True, stderr=-1, stdout=-1),
call.Popen_instance.communicate('foo'),
], Popen.mock.method_calls)
def test_communicate_with_timeout(self):
Popen = MockPopen()
Popen.set_command('a command', returncode=3)
process = Popen('a command')
if PY2:
with ShouldRaise(TypeError):
process.communicate(timeout=1)
with ShouldRaise(TypeError):
process.communicate('foo', 1)
else:
process.communicate(timeout=1)
process.communicate('foo', 1)
compare([
call.Popen('a command'),
call.Popen_instance.communicate(timeout=1),
call.Popen_instance.communicate('foo', 1),
], expected=Popen.mock.method_calls)
def test_read_from_stdout(self):
# setup
Popen = MockPopen()
Popen.set_command('a command', stdout=b'foo')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
self.assertTrue(isinstance(process.stdout.fileno(), int))
compare(process.stdout.read(), b'foo')
# test call list
compare([
call.Popen('a command', shell=True, stderr=-1, stdout=-1),
], Popen.mock.method_calls)
def test_read_from_stderr(self):
# setup
Popen = MockPopen()
Popen.set_command('a command', stderr=b'foo')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
self.assertTrue(isinstance(process.stdout.fileno(), int))
compare(process.stderr.read(), b'foo')
# test call list
compare([
call.Popen('a command', shell=True, stderr=-1, stdout=-1),
], Popen.mock.method_calls)
def test_read_from_stdout_with_stderr_redirected_check_stdout_contents(self):
# setup
Popen = MockPopen()
Popen.set_command('a command', stdout=b'foo', stderr=b'bar')
# usage
process = Popen('a command', stdout=PIPE, stderr=STDOUT, shell=True)
# test stdout contents
compare(b'foobar', process.stdout.read())
compare(process.stderr, None)
def test_read_from_stdout_with_stderr_redirected_check_stdout_stderr_interleaved(self):
# setup
Popen = MockPopen()
Popen.set_command('a command', stdout=b'o1\no2\no3\no4\n', stderr=b'e1\ne2\n')
# usage
process = Popen('a command', stdout=PIPE, stderr=STDOUT, shell=True)
self.assertTrue(isinstance(process.stdout.fileno(), int))
# test stdout contents
compare(b'o1\ne1\no2\ne2\no3\no4\n', process.stdout.read())
def test_communicate_with_stderr_redirected_check_stderr_is_none(self):
# setup
Popen = MockPopen()
Popen.set_command('a command', stdout=b'foo', stderr=b'bar')
# usage
process = Popen('a command', stdout=PIPE, stderr=STDOUT, shell=True)
out, err = process.communicate()
# test stderr is None
compare(out, b'foobar')
compare(err, None)
def test_read_from_stdout_and_stderr(self):
# setup
Popen = MockPopen()
Popen.set_command('a command', stdout=b'foo', stderr=b'bar')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
compare(process.stdout.read(), b'foo')
compare(process.stderr.read(), b'bar')
# test call list
compare([
call.Popen('a command', shell=True, stderr=PIPE, stdout=PIPE),
], Popen.mock.method_calls)
def test_communicate_text_mode(self):
Popen = MockPopen()
Popen.set_command('a command', stdout=b'foo', stderr=b'bar')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, text=True)
actual = process.communicate()
# check
compare(actual, expected=(u'foo', u'bar'))
def test_communicate_universal_newlines(self):
Popen = MockPopen()
Popen.set_command('a command', stdout=b'foo', stderr=b'bar')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, universal_newlines=True)
actual = process.communicate()
# check
compare(actual, expected=(u'foo', u'bar'))
def test_communicate_encoding(self):
Popen = MockPopen()
Popen.set_command('a command', stdout=b'foo', stderr=b'bar')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, encoding='ascii')
actual = process.communicate()
# check
compare(actual, expected=(u'foo', u'bar'))
def test_communicate_encoding_with_errors(self):
Popen = MockPopen()
Popen.set_command('a command', stdout=b'\xa3', stderr=b'\xa3')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, encoding='ascii', errors='ignore')
actual = process.communicate()
# check
if PY2:
compare(actual, expected=(b'\xa3', b'\xa3'))
else:
compare(actual, expected=(u'', u''))
def test_read_from_stdout_and_stderr_text_mode(self):
Popen = MockPopen()
Popen.set_command('a command', stdout=b'foo', stderr=b'bar')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, text=True)
actual = process.stdout.read(), process.stderr.read()
# check
compare(actual, expected=(u'foo', u'bar'))
def test_write_to_stdin(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
process = Popen('a command', stdin=PIPE, shell=True)
process.stdin.write('some text')
# test call list
compare(Popen.mock.method_calls, expected=[
call.Popen('a command', shell=True, stdin=PIPE),
call.Popen_instance.stdin.write('some text'),
])
compare(Popen.all_calls, expected=[
call.Popen('a command', shell=True, stdin=PIPE),
call.Popen('a command', shell=True, stdin=PIPE).stdin.write('some text'),
])
compare(process.mock.method_calls, expected=[
call.stdin.write('some text'),
])
compare(process.calls, expected=[
call.stdin.write('some text'),
])
repr(call.stdin.write('some text'))
def test_wait_and_return_code(self):
# setup
Popen = MockPopen()
Popen.set_command('a command', returncode=3)
# usage
process = Popen('a command')
compare(process.returncode, None)
# result checking
compare(process.wait(), 3)
compare(process.returncode, 3)
# test call list
compare([
call.Popen('a command'),
call.Popen_instance.wait(),
], Popen.mock.method_calls)
def test_wait_timeout(self):
Popen = MockPopen()
Popen.set_command('a command', returncode=3)
process = Popen('a command')
if PY2:
with ShouldRaise(TypeError):
process.wait(timeout=1)
with ShouldRaise(TypeError):
process.wait(1)
else:
process.wait(timeout=1)
process.wait(1)
compare([
call.Popen('a command'),
call.Popen_instance.wait(timeout=1),
call.Popen_instance.wait(1)
], expected=Popen.mock.method_calls)
def test_multiple_uses(self):
Popen = MockPopen()
Popen.set_command('a command', b'a')
Popen.set_command('b command', b'b')
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
out, err = process.communicate('foo')
compare(out, b'a')
process = Popen(['b', 'command'], stdout=PIPE, stderr=PIPE, shell=True)
out, err = process.communicate('foo')
compare(out, b'b')
compare([
call.Popen('a command', shell=True, stderr=-1, stdout=-1),
call.Popen_instance.communicate('foo'),
call.Popen(['b', 'command'], shell=True, stderr=-1, stdout=-1),
call.Popen_instance.communicate('foo'),
], Popen.mock.method_calls)
def test_send_signal(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
process.send_signal(0)
# result checking
compare([
call.Popen('a command', shell=True, stderr=-1, stdout=-1),
call.Popen_instance.send_signal(0),
], Popen.mock.method_calls)
def test_terminate(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
process.terminate()
# result checking
compare([
call.Popen('a command', shell=True, stderr=-1, stdout=-1),
call.Popen_instance.terminate(),
], Popen.mock.method_calls)
def test_kill(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
process.kill()
# result checking
compare([
call.Popen('a command', shell=True, stderr=-1, stdout=-1),
call.Popen_instance.kill(),
], Popen.mock.method_calls)
def test_all_signals(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
process = Popen('a command')
process.send_signal(signal.SIGINT)
process.terminate()
process.kill()
# test call list
compare([
call.Popen('a command'),
call.Popen_instance.send_signal(signal.SIGINT),
call.Popen_instance.terminate(),
call.Popen_instance.kill(),
], Popen.mock.method_calls)
def test_poll_no_setup(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
compare(process.poll(), None)
compare(process.poll(), None)
compare(process.wait(), 0)
compare(process.poll(), 0)
# result checking
compare([
call.Popen('a command', shell=True, stderr=-1, stdout=-1),
call.Popen_instance.poll(),
call.Popen_instance.poll(),
call.Popen_instance.wait(),
call.Popen_instance.poll(),
], Popen.mock.method_calls)
def test_poll_setup(self):
# setup
Popen = MockPopen()
Popen.set_command('a command', poll_count=1)
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
compare(process.poll(), None)
compare(process.poll(), 0)
compare(process.wait(), 0)
compare(process.poll(), 0)
# result checking
compare([
call.Popen('a command', shell=True, stderr=-1, stdout=-1),
call.Popen_instance.poll(),
call.Popen_instance.poll(),
call.Popen_instance.wait(),
call.Popen_instance.poll(),
], Popen.mock.method_calls)
def test_poll_until_result(self):
# setup
Popen = MockPopen()
Popen.set_command('a command', returncode=3, poll_count=2)
# example usage
process = Popen('a command')
while process.poll() is None:
# you'd probably have a sleep here, or go off and
# do some other work.
pass
# result checking
compare(process.returncode, 3)
compare([
call.Popen('a command'),
call.Popen_instance.poll(),
call.Popen_instance.poll(),
call.Popen_instance.poll(),
], Popen.mock.method_calls)
def test_command_not_specified(self):
Popen = MockPopen()
with ShouldRaise(KeyError(
"Nothing specified for command 'a command'"
)):
Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
def test_default_command_min_args(self):
# setup
Popen = MockPopen()
Popen.set_default()
# usage
process = Popen('a command', stdout=PIPE, stderr=PIPE)
# process started, no return code
compare(process.pid, 1234)
compare(None, process.returncode)
out, err = process.communicate()
# test the rest
compare(out, b'')
compare(err, b'')
compare(process.returncode, 0)
# test call list
compare([
call.Popen('a command', stderr=-1, stdout=-1),
call.Popen_instance.communicate(),
], Popen.mock.method_calls)
def test_default_command_max_args(self):
Popen = MockPopen()
Popen.set_default(b'out', b'err', 1, 345)
process = Popen('a command', stdout=PIPE, stderr=PIPE)
compare(process.pid, 345)
compare(None, process.returncode)
out, err = process.communicate()
# test the rest
compare(out, b'out')
compare(err, b'err')
compare(process.returncode, 1)
# test call list
compare([
call.Popen('a command', stderr=-1, stdout=-1),
call.Popen_instance.communicate(),
], Popen.mock.method_calls)
def test_invalid_parameters(self):
Popen = MockPopen()
with ShouldRaise(TypeError(
"__init__() got an unexpected keyword argument 'foo'"
)):
Popen(foo='bar')
def test_invalid_method_or_attr(self):
Popen = MockPopen()
Popen.set_command('command')
process = Popen('command')
with ShouldRaise(AttributeError):
process.foo()
def test_invalid_attribute(self):
Popen = MockPopen()
Popen.set_command('command')
process = Popen('command')
with ShouldRaise(AttributeError):
process.foo
def test_invalid_communicate_call(self):
Popen = MockPopen()
Popen.set_command('bar')
process = Popen('bar')
with ShouldRaise(TypeError(
"communicate() got an unexpected keyword argument 'foo'"
)):
process.communicate(foo='bar')
def test_invalid_wait_call(self):
Popen = MockPopen()
Popen.set_command('bar')
process = Popen('bar')
with ShouldRaise(TypeError(
"wait() got an unexpected keyword argument 'foo'"
)):
process.wait(foo='bar')
def test_invalid_send_signal(self):
Popen = MockPopen()
Popen.set_command('bar')
process = Popen('bar')
with ShouldRaise(TypeError(
"send_signal() got an unexpected keyword argument 'foo'"
)):
process.send_signal(foo='bar')
def test_invalid_terminate(self):
Popen = MockPopen()
Popen.set_command('bar')
process = Popen('bar')
with ShouldRaise(TypeError(
"terminate() got an unexpected keyword argument 'foo'"
)):
process.terminate(foo='bar')
def test_invalid_kill(self):
Popen = MockPopen()
Popen.set_command('bar')
process = Popen('bar')
if PY2:
text = 'kill() takes exactly 1 argument (2 given)'
else:
text = 'kill() takes 1 positional argument but 2 were given'
with ShouldRaise(TypeError(text)):
process.kill('moo')
def test_invalid_poll(self):
Popen = MockPopen()
Popen.set_command('bar')
process = Popen('bar')
if PY2:
text = 'poll() takes exactly 1 argument (2 given)'
else:
text = 'poll() takes 1 positional argument but 2 were given'
with ShouldRaise(TypeError(text)):
process.poll('moo')
def test_non_pipe(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
process = Popen('a command')
# checks
compare(process.stdout, expected=None)
compare(process.stderr, expected=None)
out, err = process.communicate()
# test the rest
compare(out, expected=None)
compare(err, expected=None)
# test call list
compare([
call.Popen('a command'),
call.Popen_instance.communicate(),
], Popen.mock.method_calls)
def test_use_as_context_manager(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
if PY2:
process = Popen('a command')
with ShouldRaise(AttributeError):
process.__enter__
with ShouldRaise(AttributeError):
process.__exit__
else:
# usage
with Popen('a command', stdout=PIPE, stderr=PIPE) as process:
# process started, no return code
compare(process.pid, 1234)
compare(None, process.returncode)
out, err = process.communicate()
# test the rest
compare(out, b'')
compare(err, b'')
compare(process.returncode, 0)
compare(process.stdout.closed, expected=True)
compare(process.stderr.closed, expected=True)
# test call list
compare([
call.Popen('a command', stderr=-1, stdout=-1),
call.Popen_instance.communicate(),
call.Popen_instance.wait(),
], Popen.mock.method_calls)
def test_start_new_session(self):
# setup
Popen = MockPopen()
Popen.set_command('a command')
# usage
Popen('a command', start_new_session=True)
# test call list
compare([
call.Popen('a command', start_new_session=True),
], Popen.mock.method_calls)
def test_simultaneous_processes(self):
Popen = MockPopen()
Popen.set_command('a command', b'a', returncode=1)
Popen.set_command('b command', b'b', returncode=2)
process_a = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
process_b = Popen(['b', 'command'], stdout=PIPE, stderr=PIPE, shell=True)
compare(process_a.wait(), expected=1)
compare(process_b.wait(), expected=2)
a_call = call.Popen('a command', stdout=PIPE, stderr=PIPE, shell=True)
b_call = call.Popen(['b', 'command'], stdout=PIPE, stderr=PIPE, shell=True)
compare(Popen.all_calls, expected=[
a_call,
b_call,
a_call.wait(),
b_call.wait(),
])
compare(process_a.mock.method_calls, expected=[
call.wait()
])
compare(process_b.mock.method_calls, expected=[
call.wait()
])
def test_pass_executable(self):
Popen = MockPopen()
Popen.set_command('a command', b'a', returncode=1)
Popen('a command', executable='/foo/bar')
compare(Popen.all_calls, expected=[
call.Popen('a command', executable='/foo/bar')
])
def test_set_command_with_list(self):
Popen = MockPopen()
Popen.set_command(['a', 'command'])
Popen(['a', 'command'], stdout=PIPE, stderr=PIPE)
compare([call.Popen(['a', 'command'], stderr=-1, stdout=-1)],
actual=Popen.all_calls)
class IntegrationTests(TestCase):
def setUp(self):
self.popen = MockPopen()
replacer = Replacer()
replacer.replace('testfixtures.tests.test_popen.subprocess.Popen', self.popen)
self.addCleanup(replacer.restore)
def test_command_called_with_check_call_check_returncode(self):
self.popen.set_command('ls')
compare(0, subprocess.check_call(['ls']))
def test_command_called_with_check_output_check_stdout_returned(self):
self.popen.set_command('ls', stdout=b'abc')
compare(b'abc', subprocess.check_output(['ls']))
def test_command_called_with_check_output_stderr_to_stdout_check_returned(self):
self.popen.set_command('ls', stderr=b'xyz')
compare(b'xyz', subprocess.check_output(['ls'], stderr=STDOUT))
def test_command_called_with_check_call_failing_command_check_exception(self):
self.popen.set_command('ls', returncode=1)
with self.assertRaises(subprocess.CalledProcessError):
subprocess.check_output(['ls'])
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for creating sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn import linear
from tensorflow.models.rnn import rnn
from tensorflow.models.rnn import rnn_cell
def rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,
scope=None):
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x cell.output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x cell.input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing generated outputs.
states: The state of each cell in each time-step. This is a list with
length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with tf.variable_scope(scope or "rnn_decoder"):
states = [initial_state]
outputs = []
prev = None
for i in xrange(len(decoder_inputs)):
inp = decoder_inputs[i]
if loop_function is not None and prev is not None:
with tf.variable_scope("loop_function", reuse=True):
# We do not propagate gradients over the loop function.
inp = tf.stop_gradient(loop_function(prev, i))
if i > 0:
tf.get_variable_scope().reuse_variables()
output, new_state = cell(inp, states[-1])
outputs.append(output)
states.append(new_state)
if loop_function is not None:
prev = tf.stop_gradient(output)
return outputs, states
def basic_rnn_seq2seq(
encoder_inputs, decoder_inputs, cell, dtype=tf.float32, scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope(scope or "basic_rnn_seq2seq"):
_, enc_states = rnn.rnn(cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_states[-1], cell)
def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
loop_function=None, dtype=tf.float32, scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_states = rnn.rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
tf.get_variable_scope().reuse_variables()
return rnn_decoder(decoder_inputs, enc_states[-1], cell,
loop_function=loop_function, scope=scope)
def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,
output_projection=None, feed_previous=False,
scope=None):
"""RNN decoder with embedding and a pure-decoding option.
Args:
decoder_inputs: a list of 1D batch-sized int32-Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: integer, how many symbols come into the embedding.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
If False, decoder_inputs are used as given (the standard decoder case).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=tf.float32)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=tf.float32)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_rnn_decoder"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
def extract_argmax_and_embed(prev, _):
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
loop_function = None
if feed_previous:
loop_function = extract_argmax_and_embed
emb_inp = [tf.nn.embedding_lookup(embedding, i) for i in decoder_inputs]
return rnn_decoder(emb_inp, initial_state, cell,
loop_function=loop_function)
def embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
output_projection=None, feed_previous=False,
dtype=tf.float32, scope=None):
"""Embedding RNN sequence-to-sequence model.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x cell.input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
cell.input_size]). Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: a list of 1D int32-Tensors of shape [batch_size].
decoder_inputs: a list of 1D int32-Tensors of shape [batch_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: integer; number of symbols on the encoder side.
num_decoder_symbols: integer; number of symbols on the decoder side.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_seq2seq"
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope(scope or "embedding_rnn_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
_, encoder_states = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
return embedding_rnn_decoder(decoder_inputs, encoder_states[-1], cell,
num_decoder_symbols, output_projection,
feed_previous)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
outputs1, states1 = embedding_rnn_decoder(
decoder_inputs, encoder_states[-1], cell, num_decoder_symbols,
output_projection, True)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = embedding_rnn_decoder(
decoder_inputs, encoder_states[-1], cell, num_decoder_symbols,
output_projection, False)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def embedding_tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_symbols, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None):
"""Embedding RNN sequence-to-sequence model with tied (shared) parameters.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_symbols x cell.input_size]). Then it runs an RNN to encode embedded
encoder_inputs into a state vector. Next, it embeds decoder_inputs using
the same embedding. Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_symbols: integer; number of symbols for both encoder and decoder.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the initial RNN states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_tied_rnn_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_tied_rnn_seq2seq"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
emb_encoder_inputs = [tf.nn.embedding_lookup(embedding, x)
for x in encoder_inputs]
emb_decoder_inputs = [tf.nn.embedding_lookup(embedding, x)
for x in decoder_inputs]
def extract_argmax_and_embed(prev, _):
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_symbols)
if isinstance(feed_previous, bool):
loop_function = extract_argmax_and_embed if feed_previous else None
return tied_rnn_seq2seq(emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=loop_function, dtype=dtype)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
outputs1, states1 = tied_rnn_seq2seq(
emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=extract_argmax_and_embed, dtype=dtype)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = tied_rnn_seq2seq(
emb_encoder_inputs, emb_decoder_inputs, cell, dtype=dtype)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def attention_decoder(decoder_inputs, initial_state, attention_states, cell,
output_size=None, num_heads=1, loop_function=None,
dtype=tf.float32, scope=None):
"""RNN decoder with attention for the sequence-to-sequence model.
Args:
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function and size.
output_size: size of the output vectors; if None, we use cell.output_size.
num_heads: number of attention heads that read from attention_states.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x cell.output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x cell.input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors of shape
[batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either i-th decoder_inputs or
loop_function(output {i-1}, i)) as follows. First, we run the cell
on a combination of the input and previous attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, or shapes
of attention_states are not set.
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if not attention_states.get_shape()[1:2].is_fully_defined():
raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
% attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with tf.variable_scope(scope or "attention_decoder"):
batch_size = tf.shape(decoder_inputs[0])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = tf.reshape(attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = tf.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size])
hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(tf.get_variable("AttnV_%d" % a, [attention_vec_size]))
states = [initial_state]
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
ds = [] # Results of attention reads will be stored here.
for a in xrange(num_heads):
with tf.variable_scope("Attention_%d" % a):
y = linear.linear(query, attention_vec_size, True)
y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
# Attention mask is a softmax of v^T * tanh(...).
s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y), [2, 3])
a = tf.nn.softmax(s)
# Now calculate the attention-weighted vector d.
d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,
[1, 2])
ds.append(tf.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = tf.pack([batch_size, attn_size])
attns = [tf.zeros(batch_attn_size, dtype=dtype)
for _ in xrange(num_heads)]
for a in attns: # Ensure the second shape of attention vectors is set.
a.set_shape([None, attn_size])
for i in xrange(len(decoder_inputs)):
if i > 0:
tf.get_variable_scope().reuse_variables()
inp = decoder_inputs[i]
# If loop_function is set, we use it instead of decoder_inputs.
if loop_function is not None and prev is not None:
with tf.variable_scope("loop_function", reuse=True):
inp = tf.stop_gradient(loop_function(prev, i))
# Merge input and previous attentions into one vector of the right size.
x = linear.linear([inp] + attns, cell.input_size, True)
# Run the RNN.
cell_output, new_state = cell(x, states[-1])
states.append(new_state)
# Run the attention mechanism.
attns = attention(new_state)
with tf.variable_scope("AttnOutputProjection"):
output = linear.linear([cell_output] + attns, output_size, True)
if loop_function is not None:
# We do not propagate gradients over the loop function.
prev = tf.stop_gradient(output)
outputs.append(output)
return outputs, states
def embedding_attention_decoder(decoder_inputs, initial_state, attention_states,
cell, num_symbols, num_heads=1,
output_size=None, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None):
"""RNN decoder with embedding and attention and a pure-decoding option.
Args:
decoder_inputs: a list of 1D batch-sized int32-Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: integer, how many symbols come into the embedding.
num_heads: number of attention heads that read from attention_states.
output_size: size of the output vectors; if None, use cell.output_size.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the RNN initial states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_attention_decoder"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
def extract_argmax_and_embed(prev, _):
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
emb_prev = tf.nn.embedding_lookup(embedding, prev_symbol)
return emb_prev
loop_function = None
if feed_previous:
loop_function = extract_argmax_and_embed
emb_inp = [tf.nn.embedding_lookup(embedding, i) for i in decoder_inputs]
return attention_decoder(
emb_inp, initial_state, attention_states, cell, output_size=output_size,
num_heads=num_heads, loop_function=loop_function)
def embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
num_heads=1, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x cell.input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
cell.input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: integer; number of symbols on the encoder side.
num_decoder_symbols: integer; number of symbols on the decoder side.
num_heads: number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope(scope or "embedding_attention_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
encoder_outputs, encoder_states = rnn.rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [tf.reshape(e, [-1, 1, cell.output_size])
for e in encoder_outputs]
attention_states = tf.concat(1, top_states)
# Decoder.
output_size = None
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool):
return embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection,
feed_previous)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
outputs1, states1 = embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection, True)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection, False)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def sequence_loss_by_example(logits, targets, weights, num_decoder_symbols,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: list of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: list of 1D batch-sized int32-Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: the log-perplexity for each sequence.
Raises:
ValueError: if len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with tf.op_scope(logits + targets + weights, name,
"sequence_loss_by_example"):
batch_size = tf.shape(targets[0])[0]
log_perp_list = []
length = batch_size * num_decoder_symbols
for i in xrange(len(logits)):
if softmax_loss_function is None:
# TODO(lukaszkaiser): There is no SparseCrossEntropy in TensorFlow, so
# we need to first cast targets into a dense representation, and as
# SparseToDense does not accept batched inputs, we need to do this by
# re-indexing and re-sizing. When TensorFlow adds SparseCrossEntropy,
# rewrite this method.
indices = targets[i] + num_decoder_symbols * tf.range(batch_size)
with tf.device("/cpu:0"): # Sparse-to-dense must happen on CPU for now.
dense = tf.sparse_to_dense(indices, tf.expand_dims(length, 0), 1.0,
0.0)
target = tf.reshape(dense, [-1, num_decoder_symbols])
crossent = tf.nn.softmax_cross_entropy_with_logits(
logits[i], target, name="SequenceLoss/CrossEntropy{0}".format(i))
else:
crossent = softmax_loss_function(logits[i], targets[i])
log_perp_list.append(crossent * weights[i])
log_perps = tf.add_n(log_perp_list)
if average_across_timesteps:
total_size = tf.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
def sequence_loss(logits, targets, weights, num_decoder_symbols,
average_across_timesteps=True, average_across_batch=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: list of 2D Tensors os shape [batch_size x num_decoder_symbols].
targets: list of 1D batch-sized int32-Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: the average log-perplexity per symbol (weighted).
Raises:
ValueError: if len(logits) is different from len(targets) or len(weights).
"""
with tf.op_scope(logits + targets + weights, name, "sequence_loss"):
cost = tf.reduce_sum(sequence_loss_by_example(
logits, targets, weights, num_decoder_symbols,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = tf.shape(targets[0])[0]
return cost / tf.cast(batch_size, tf.float32)
else:
return cost
def model_with_buckets(encoder_inputs, decoder_inputs, targets, weights,
buckets, num_decoder_symbols, seq2seq,
softmax_loss_function=None, name=None):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))
Args:
encoder_inputs: a list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: a list of Tensors to feed the decoder; second seq2seq input.
targets: a list of 1D batch-sized int32-Tensors (desired output sequence).
weights: list of 1D batch-sized float-Tensors to weight the targets.
buckets: a list of pairs of (input size, output size) for each bucket.
num_decoder_symbols: integer, number of decoder symbols (output classes).
seq2seq: a sequence-to-sequence model function; it takes 2 input that
agree with encoder_inputs and decoder_inputs, and returns a pair
consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, defaults to "model_with_buckets".
Returns:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors of shape [batch_size x num_decoder_symbols] (j'th outputs).
losses: List of scalar Tensors, representing losses for each bucket.
Raises:
ValueError: if length of encoder_inputsut, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(encoder_inputs) < buckets[-1][0]:
raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
"st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last"
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = encoder_inputs + decoder_inputs + targets + weights
losses = []
outputs = []
with tf.op_scope(all_inputs, name, "model_with_buckets"):
for j in xrange(len(buckets)):
if j > 0:
tf.get_variable_scope().reuse_variables()
bucket_encoder_inputs = [encoder_inputs[i]
for i in xrange(buckets[j][0])]
bucket_decoder_inputs = [decoder_inputs[i]
for i in xrange(buckets[j][1])]
bucket_outputs, _ = seq2seq(bucket_encoder_inputs,
bucket_decoder_inputs)
outputs.append(bucket_outputs)
bucket_targets = [targets[i] for i in xrange(buckets[j][1])]
bucket_weights = [weights[i] for i in xrange(buckets[j][1])]
losses.append(sequence_loss(
outputs[-1], bucket_targets, bucket_weights, num_decoder_symbols,
softmax_loss_function=softmax_loss_function))
return outputs, losses
| |
# -*- coding: utf-8 -*-
import logging
import os
import re
import shutil
import salt.exceptions as exc
logger = logging.getLogger(__name__)
(
UNINSTALLED,
INSTALLED,
UPGRADABLE,
) = range(3)
# Sample output ouf `list-plugins`
#
# translation Translation Assistance plugin 1.12
# maven-plugin Maven Integration plugin 2.7.1 (2.12.1) # noqa
#
_list_re = re.compile(
'(?P<name>\S+)'
'.*?'
'(?P<installed>\d[\d.]*)'
'(?: \((?P<available>\d[\d.-]*)\))?'
'\n',
)
def _info(name):
runcli = __salt__['jenkins.runcli'] # noqa
try:
stdout = runcli('list-plugins {0}'.format(name))
except exc.CommandExecutionError as e:
if 'ERROR: No plugin with the name' in e.message:
return UNINSTALLED, 'Error in listing {}'.format(name), None
else:
raise
m = _list_re.match(stdout)
if not m:
return UNINSTALLED, '{} not found'.format(name), None
_, installed, available = m.groups()
if available:
return UPGRADABLE, installed, available
return INSTALLED, installed, None
def _install(name, current_version=None, available_version=None):
ret = {
'name': name,
'changes': {
'old': current_version or 'uninstalled',
'new': available_version or True,
},
'result': False,
'comment': 'Would install %s' % (name,)
}
runcli = __salt__['jenkins.runcli'] # noqa
test = __opts__['test'] # noqa
if not test:
try:
runcli('install-plugin', name)
except exc.CommandExecutionError as e:
ret['comment'] = "Failed to install plugins: %s" % (e.message,)
return ret
else:
ret['comment'] = 'Plugin installed successfully'
ret['result'] = None if test else True
return ret
def installed(name, update=False):
"""Ensures jenkins plugins are present.
name
The name of one specific plugin to ensure.
"""
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {},
}
runcli = __salt__['jenkins.runcli'] # noqa
test = __opts__['test'] # noqa
if name.endswith('.hpi'):
plugin_name = os.path.basename(name[:-4])
else:
plugin_name = name
try:
status, installed, available = _info(plugin_name)
except exc.CommandExecutionError as e:
ret['comment'] = e.message
return ret
if status == UNINSTALLED:
ret = _install(name)
elif status == INSTALLED:
ret['comment'] = 'Already installed'
ret['result'] = True
elif status == UPGRADABLE:
if update:
ret = _install(name, installed, available)
else:
ret['comment'] = 'Not updated'
ret['result'] = True
return ret
def _uninstall(name):
result = []
home = __pillar__['jenkins'].get('home', '/var/lib/jenkins') # noqa
plugin_dir = os.path.join(home, 'plugins')
test = __opts__['test'] # noqa
for item in os.listdir(plugin_dir):
# next
if not item.startswith(name):
continue
# remove
path = os.path.join(plugin_dir, item)
if item == name and os.path.isdir(path):
if not test:
shutil.rmtree(path)
result.append(path)
elif item in ['{0}{1}'.format(name, ext) for ext in ['.hpi', '.jpi']]:
if not test:
os.remove(path)
result.append(path)
return result
def removed(name):
ret = {
'name': name,
'changes': {},
'result': False,
'comment': ''
}
# get info before install
try:
status, info = _info(name)
except exc.CommandExecutionError as e:
ret['comment'] = e.message
return ret
# removed
if status == INSTALLED and _uninstall(name):
ret['changes'] = {
'old': info,
'new': None,
}
ret['result'] = None if __opts__['test'] else True # noqa
return ret
def updated(name, skipped=None, updateall=True):
"""Updates jenkins plugins.
name
The name of one specific plugin to update
skipped
The names of plugins to skipped from update.
updateall
Boolean flag if we want to update all the updateable plugins
(default: True).
"""
ret = {
'name': name,
'changes': {},
'result': False,
'comment': ''
}
update_list = [] if updateall else [name]
skipped = skipped or []
runcli = __salt__['jenkins.runcli'] # noqa
test = __opts__['test'] # noqa
try:
stdout = runcli('list-plugins')
except exc.CommandExecutionError as e:
ret['comment'] = "Failed to list plugins: %r" % (e.message,)
return ret
for line in stdout.splitlines():
m = _list_re.match(line)
if not m:
continue
name, current, update = m.groups()
if update_list and name not in update_list:
continue
if not update:
continue
if name in skipped:
logger.debug("%s %s available, but skipping", name, update)
continue
if not test:
try:
runcli('install-plugin', name)
except exc.CommandExecutionError as e:
ret['comment'] = "Failed to instal plugins: %s" % (e.message,)
return ret
ret['changes'][name] = {
'old': current,
'new': update,
}
ret['comment'] = 'Plugins uptodate'
ret['result'] = None if test and ret['changes'] else True
return ret
| |
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Reports URL handlers."""
import datetime
import json
import logging
import os
import re
import urllib
from simian.auth import gaeserver
from simian.mac import common as main_common
from simian.mac import models
from simian.mac.common import gae_util
from simian.mac.common import util
from simian.mac.munki import common
from simian.mac.munki import handlers
# int number of days after which postflight_datetime is considered stale.
FORCE_CONTINUE_POSTFLIGHT_DAYS = 5
# int number of failed Munki executions before instructing the client to repair.
REPAIR_CLIENT_PREFLIGHT_COUNT_SINCE_POSTFLIGHT = 5
INSTALL_RESULT_FAILED = 'FAILED with return code'
INSTALL_RESULT_SUCCESSFUL = 'SUCCESSFUL'
JSON_PREFIX = ')]}\',\n'
LEGACY_INSTALL_RESULTS_STRING_REGEX = re.compile(
r'^Install of (.*)-(\d+.*): (%s|%s: (\-?\d+))$' % (
INSTALL_RESULT_SUCCESSFUL, INSTALL_RESULT_FAILED))
# Example: 'iLife 11: Download failed (Error -1001: The request timed out.)'
DOWNLOAD_FAILED_STRING_REGEX = re.compile(
r'([\s\w\.\-]+): Download failed \((.*)\)')
# For legacy clients that do not support multiple feedback commands via JSON,
# this list is used to determine which single command to send, if any, in
# increasing importance order.
LEGACY_FEEDBACK_LIST = ['EXIT', 'FORCE_CONTINUE', 'REPAIR', 'UPLOAD_LOGS']
def IsExitFeedbackIpAddress(ip_address):
"""Is this an IP address that should result in an exit feedback?
Args:
ip_address: str, like "1.2.3.4"
Returns:
True if this IP address should result in exit feedback
"""
return (
ip_address and
models.KeyValueCache.IpInList('client_exit_ip_blocks', ip_address))
class Reports(handlers.AuthenticationHandler):
"""Handler for /reports/."""
def GetReportFeedback(self, uuid, report_type, **kwargs):
"""Inspect a report and provide a feedback status/command.
Args:
uuid: str, computer uuid
report_type: str, report type
**kwargs: dict, additional report parameters, e.g:
on_corp: str, optional, '1' or '0', on_corp status
message: str, optional, message from client
details: str, optional, details from client
ip_address: str, optional, IP address of client
Returns:
common.ReportFeedback.* constant
"""
feedback = {}
if 'computer' in kwargs:
c = kwargs['computer']
else:
c = models.Computer.get_by_key_name(uuid)
ip_address = kwargs.get('ip_address', None)
client_exit = kwargs.get('client_exit', None)
if client_exit and report_type == 'preflight':
# client has requested an exit, but let's ensure we should allow it.
if c is None or c.postflight_datetime is None:
# client has never fully executed Munki.
feedback['force_continue'] = True
else:
# check if the postflight_datetime warrants a FORCE_CONTINUE
now = datetime.datetime.utcnow()
postflight_stale_datetime = now - datetime.timedelta(
days=FORCE_CONTINUE_POSTFLIGHT_DAYS)
if c.postflight_datetime < postflight_stale_datetime:
# client hasn't executed Munki in FORCE_CONTINUE_POSTFLIGHT_DAYS.
feedback['force_continue'] = True
else:
feedback['exit'] = True
elif report_type == 'preflight':
if IsExitFeedbackIpAddress(ip_address):
feedback['exit'] = True
elif common.IsPanicModeNoPackages():
feedback['exit'] = True
elif not c or c.preflight_datetime is None:
# this is the first preflight post from this client.
feedback['force_continue'] = True
elif getattr(c, 'upload_logs_and_notify', None) is not None:
feedback['logging_level'] = 3
feedback['upload_logs'] = True
else:
# check if preflight_count_since_postflight warrants a repair.
if (c.preflight_count_since_postflight >=
REPAIR_CLIENT_PREFLIGHT_COUNT_SINCE_POSTFLIGHT):
# disable repair since it's non-functional
# TODO(b/111302920): re-enable after repair functionality is fixed.
feedback['pkill_installd'] = True
feedback['pkill_softwareupdated'] = True
# feedback['repair'] = True
feedback['logging_level'] = 3
feedback['upload_logs'] = True
return feedback
def _LogInstalls(self, installs, computer):
"""Logs a batch of installs for a given computer.
Args:
installs: list, of str install data from a preflight/postflight report.
computer: models.Computer entity.
"""
if not installs:
return
on_corp = self.request.get('on_corp')
if on_corp == '1':
on_corp = True
elif on_corp == '0':
on_corp = False
else:
on_corp = None
to_put = []
for install in installs:
if install.startswith('Install of'):
d = {
'applesus': 'false',
'duration_seconds': None,
'download_kbytes_per_sec': None,
'name': install,
'status': 'UNKNOWN',
'version': '',
'unattended': 'false',
}
# support for old 'Install of FooPkg-1.0: SUCCESSFUL' style strings.
try:
m = LEGACY_INSTALL_RESULTS_STRING_REGEX.search(install)
if not m:
raise ValueError
elif m.group(3) == INSTALL_RESULT_SUCCESSFUL:
d['status'] = 0
else:
d['status'] = m.group(4)
d['name'] = m.group(1)
d['version'] = m.group(2)
except (IndexError, AttributeError, ValueError):
logging.warning('Unknown install string format: %s', install)
else:
# support for new 'name=pkg|version=foo|...' style strings.
d = common.KeyValueStringToDict(install)
name = d.get('display_name', '') or d.get('name', '')
version = d.get('version', '')
status = str(d.get('status', ''))
applesus = common.GetBoolValueFromString(d.get('applesus', '0'))
unattended = common.GetBoolValueFromString(d.get('unattended', '0'))
try:
duration_seconds = int(d.get('duration_seconds', None))
except (TypeError, ValueError):
duration_seconds = None
try:
dl_kbytes_per_sec = int(d.get('download_kbytes_per_sec', None))
# Ignore zero KB/s download speeds, as that's how Munki reports
# unknown speed.
if dl_kbytes_per_sec == 0:
dl_kbytes_per_sec = None
except (TypeError, ValueError):
dl_kbytes_per_sec = None
try:
install_datetime = util.Datetime.utcfromtimestamp(d.get('time', None))
except ValueError as e:
logging.info('Ignoring invalid install_datetime: %s', str(e))
install_datetime = datetime.datetime.utcnow()
except util.EpochExtremeFutureValueError as e:
logging.info('Ignoring extreme future install_datetime: %s', str(e))
install_datetime = datetime.datetime.utcnow()
except util.EpochFutureValueError:
install_datetime = datetime.datetime.utcnow()
pkg = '%s-%s' % (name, version)
entity = models.InstallLog(
uuid=computer.uuid, computer=computer, package=pkg, status=status,
on_corp=on_corp, applesus=applesus, unattended=unattended,
duration_seconds=duration_seconds, mtime=install_datetime,
dl_kbytes_per_sec=dl_kbytes_per_sec)
entity.success = entity.IsSuccess()
to_put.append(entity)
gae_util.BatchDatastoreOp(models.db.put, to_put)
def post(self):
"""Reports get handler.
Returns:
A webapp.Response() response.
"""
session = gaeserver.DoMunkiAuth()
uuid = main_common.SanitizeUUID(session.uuid)
report_type = self.request.get('_report_type')
report_feedback = {}
message = None
details = None
client_id = None
computer = None
if report_type == 'preflight' or report_type == 'postflight':
client_id_str = urllib.unquote(self.request.get('client_id'))
client_id = common.ParseClientId(client_id_str, uuid=uuid)
user_settings_str = self.request.get('user_settings')
user_settings = None
try:
if user_settings_str:
user_settings = util.Deserialize(
urllib.unquote(str(user_settings_str)))
except util.DeserializeError:
logging.warning(
'Client %s sent broken user_settings: %s',
client_id_str, user_settings_str)
pkgs_to_install = self.request.get_all('pkgs_to_install')
apple_updates_to_install = self.request.get_all(
'apple_updates_to_install')
computer = models.Computer.get_by_key_name(uuid)
ip_address = os.environ.get('REMOTE_ADDR', '')
if report_type == 'preflight':
# we want to get feedback now, before preflight_datetime changes.
client_exit = self.request.get('client_exit', None)
report_feedback = self.GetReportFeedback(
uuid, report_type, computer=computer, ip_address=ip_address,
client_exit=client_exit)
if self.request.get('json') == '1':
self.response.out.write(JSON_PREFIX + json.dumps(report_feedback))
else:
# For legacy clients that accept a single string, not JSON.
feedback_to_send = 'OK'
for feedback in LEGACY_FEEDBACK_LIST:
if report_feedback.get(feedback.lower()):
feedback_to_send = feedback
self.response.out.write(feedback_to_send)
# if report feedback calls for a client exit, log it.
if report_feedback.get('exit'):
if not client_exit:
# client didn't ask for an exit, which means server decided.
client_exit = 'Connection from defined exit IP address'
common.WriteClientLog(
models.PreflightExitLog, uuid, computer=computer,
exit_reason=client_exit)
cert_fingerprint = None
common.LogClientConnection(
report_type, client_id, user_settings, pkgs_to_install,
apple_updates_to_install, computer=computer, ip_address=ip_address,
report_feedback=report_feedback, cert_fingerprint=cert_fingerprint)
elif report_type == 'install_report':
computer = models.Computer.get_by_key_name(uuid)
self._LogInstalls(self.request.get_all('installs'), computer)
for removal in self.request.get_all('removals'):
common.WriteClientLog(
models.ClientLog, uuid, computer=computer, action='removal',
details=removal)
for problem in self.request.get_all('problem_installs'):
common.WriteClientLog(
models.ClientLog, uuid, computer=computer,
action='install_problem', details=problem)
elif report_type == 'broken_client':
# Default reason of "objc" to support legacy clients, existing when objc
# was the only broken state ever reported.
reason = self.request.get('reason', 'objc')
details = self.request.get('details')
logging.warning('Broken Munki client (%s): %s', reason, details)
common.WriteBrokenClient(uuid, reason, details)
elif report_type == 'msu_log':
details = {}
for k in ['time', 'user', 'source', 'event', 'desc']:
details[k] = self.request.get(k, None)
common.WriteComputerMSULog(uuid, details)
else:
# unknown report type; log all post params.
params = []
for param in self.request.arguments():
params.append('%s=%s' % (param, self.request.get_all(param)))
common.WriteClientLog(
models.ClientLog, uuid, action='unknown', details=str(params))
| |
# -*- coding: utf-8 -*-
"""
test_http11.py
~~~~~~~~~~~~~~
Unit tests for hyper's HTTP/1.1 implementation.
"""
import os
import zlib
from collections import namedtuple
from io import BytesIO, StringIO
import mock
import pytest
import hyper
from hyper.http11.connection import HTTP11Connection
from hyper.http11.response import HTTP11Response
from hyper.common.headers import HTTPHeaderMap
from hyper.common.exceptions import ChunkedDecodeError, ConnectionResetError
from hyper.common.util import HTTPVersion
from hyper.compat import bytes, zlib_compressobj
class TestHTTP11Connection(object):
def test_pycohttpparser_installs_correctly(self):
# This test is a debugging tool: if pycohttpparser is being tested by
# Travis, we need to confirm it imports correctly. Hyper will normally
# hide the import failure, so let's discover it here.
# Alternatively, if we are *not* testing with nghttp2, this test should
# confirm that it's not available.
if os.environ.get('HYPER_FAST_PARSE') == 'true':
import pycohttpparser
else:
with pytest.raises(ImportError):
import pycohttpparser # noqa
assert True
def test_initialization_no_port(self):
c = HTTP11Connection('httpbin.org')
assert c.host == 'httpbin.org'
assert c.port == 80
assert not c.secure
assert not c.proxy_host
def test_initialization_inline_port(self):
c = HTTP11Connection('httpbin.org:443')
assert c.host == 'httpbin.org'
assert c.port == 443
assert c.secure
assert not c.proxy_host
def test_initialization_separate_port(self):
c = HTTP11Connection('localhost', 8080)
assert c.host == 'localhost'
assert c.port == 8080
assert not c.secure
assert not c.proxy_host
def test_can_override_security(self):
c = HTTP11Connection('localhost', 443, secure=False)
assert c.host == 'localhost'
assert c.port == 443
assert not c.secure
assert not c.proxy_host
def test_initialization_proxy(self):
c = HTTP11Connection('httpbin.org', proxy_host='localhost')
assert c.host == 'httpbin.org'
assert c.port == 80
assert not c.secure
assert c.proxy_host == 'localhost'
assert c.proxy_port == 8080
def test_initialization_proxy_with_inline_port(self):
c = HTTP11Connection('httpbin.org', proxy_host='localhost:8443')
assert c.host == 'httpbin.org'
assert c.port == 80
assert not c.secure
assert c.proxy_host == 'localhost'
assert c.proxy_port == 8443
def test_initialization_proxy_with_separate_port(self):
c = HTTP11Connection(
'httpbin.org', proxy_host='localhost', proxy_port=8443
)
assert c.host == 'httpbin.org'
assert c.port == 80
assert not c.secure
assert c.proxy_host == 'localhost'
assert c.proxy_port == 8443
def test_initialization_with_ipv6_addresses_proxy_inline_port(self):
c = HTTP11Connection(
'[abcd:dcba::1234]', proxy_host='[ffff:aaaa::1]:8443'
)
assert c.host == 'abcd:dcba::1234'
assert c.port == 80
assert not c.secure
assert c.proxy_host == 'ffff:aaaa::1'
assert c.proxy_port == 8443
def test_basic_request(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
c.request('GET', '/get', headers={'User-Agent': 'hyper'})
expected = (
b"GET /get HTTP/1.1\r\n"
b"User-Agent: hyper\r\n"
b"connection: Upgrade, HTTP2-Settings\r\n"
b"upgrade: h2c\r\n"
b"HTTP2-Settings: AAQAAP__\r\n"
b"host: httpbin.org\r\n"
b"\r\n"
)
received = b''.join(sock.queue)
assert received == expected
def test_iterable_header(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
c.request('GET', '/get', headers=(
('User-Agent', 'hyper'),
('Custom-field', 'test'),
('Custom-field2', 'test'),
('Custom-field', 'test2'),
))
expected = (
b"GET /get HTTP/1.1\r\n"
b"User-Agent: hyper\r\n"
b"Custom-field: test\r\n"
b"Custom-field2: test\r\n"
b"Custom-field: test2\r\n"
b"connection: Upgrade, HTTP2-Settings\r\n"
b"upgrade: h2c\r\n"
b"HTTP2-Settings: AAQAAP__\r\n"
b"host: httpbin.org\r\n"
b"\r\n"
)
received = b''.join(sock.queue)
assert received == expected
def test_invalid_header(self):
c = HTTP11Connection('httpbin.org')
c._sock = DummySocket()
with pytest.raises(ValueError):
c.request('GET', '/get', headers=42)
def test_proxy_request(self):
c = HTTP11Connection('httpbin.org', proxy_host='localhost')
c._sock = sock = DummySocket()
c.request('GET', '/get', headers={'User-Agent': 'hyper'})
expected = (
b"GET /get HTTP/1.1\r\n"
b"User-Agent: hyper\r\n"
b"connection: Upgrade, HTTP2-Settings\r\n"
b"upgrade: h2c\r\n"
b"HTTP2-Settings: AAQAAP__\r\n"
b"host: httpbin.org\r\n"
b"\r\n"
)
received = b''.join(sock.queue)
assert received == expected
def test_request_with_bytestring_body(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
c.request(
'POST',
'/post',
headers=HTTPHeaderMap([('User-Agent', 'hyper')]),
body=b'hi'
)
expected = (
b"POST /post HTTP/1.1\r\n"
b"User-Agent: hyper\r\n"
b"connection: Upgrade, HTTP2-Settings\r\n"
b"upgrade: h2c\r\n"
b"HTTP2-Settings: AAQAAP__\r\n"
b"content-length: 2\r\n"
b"host: httpbin.org\r\n"
b"\r\n"
b"hi"
)
received = b''.join(sock.queue)
assert received == expected
def test_request_with_file_body(self):
# Testing this is tricksy: in practice, we do this by passing a fake
# file and monkeypatching out 'os.fstat'. This makes it look like a
# real file.
FstatRval = namedtuple('FstatRval', ['st_size'])
def fake_fstat(*args):
return FstatRval(16)
old_fstat = hyper.http11.connection.os.fstat
try:
hyper.http11.connection.os.fstat = fake_fstat
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
f = DummyFile(b'some binary data')
c.request('POST', '/post', body=f)
expected = (
b"POST /post HTTP/1.1\r\n"
b"connection: Upgrade, HTTP2-Settings\r\n"
b"upgrade: h2c\r\n"
b"HTTP2-Settings: AAQAAP__\r\n"
b"content-length: 16\r\n"
b"host: httpbin.org\r\n"
b"\r\n"
b"some binary data"
)
received = b''.join(sock.queue)
assert received == expected
finally:
# Put back the monkeypatch.
hyper.http11.connection.os.fstat = old_fstat
def test_request_with_generator_body(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
def body():
yield b'hi'
yield b'there'
yield b'sir'
c.request('POST', '/post', body=body())
expected = (
b"POST /post HTTP/1.1\r\n"
b"connection: Upgrade, HTTP2-Settings\r\n"
b"upgrade: h2c\r\n"
b"HTTP2-Settings: AAQAAP__\r\n"
b"transfer-encoding: chunked\r\n"
b"host: httpbin.org\r\n"
b"\r\n"
b"2\r\nhi\r\n"
b"5\r\nthere\r\n"
b"3\r\nsir\r\n"
b"0\r\n\r\n"
)
received = b''.join(sock.queue)
assert received == expected
def test_content_length_overrides_generator(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
def body():
yield b'hi'
yield b'there'
yield b'sir'
c.request(
'POST', '/post', body=body(), headers={b'content-length': b'10'}
)
expected = (
b"POST /post HTTP/1.1\r\n"
b"content-length: 10\r\n"
b"connection: Upgrade, HTTP2-Settings\r\n"
b"upgrade: h2c\r\n"
b"HTTP2-Settings: AAQAAP__\r\n"
b"host: httpbin.org\r\n"
b"\r\n"
b"hitheresir"
)
received = b''.join(sock.queue)
assert received == expected
def test_chunked_overrides_body(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
f = DummyFile(b'oneline\nanotherline')
c.request(
'POST',
'/post',
headers={'transfer-encoding': 'chunked'},
body=f
)
expected = (
b"POST /post HTTP/1.1\r\n"
b"transfer-encoding: chunked\r\n"
b"connection: Upgrade, HTTP2-Settings\r\n"
b"upgrade: h2c\r\n"
b"HTTP2-Settings: AAQAAP__\r\n"
b"host: httpbin.org\r\n"
b"\r\n"
b"8\r\noneline\n\r\n"
b"b\r\nanotherline\r\n"
b"0\r\n\r\n"
)
received = b''.join(sock.queue)
assert received == expected
def test_response_with_empty_reason(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
sock._buffer = BytesIO(
b"HTTP/1.1 201 \r\n"
b"Connection: close\r\n"
b"Server: Socket\r\n"
b"Content-Length: 0\r\n"
b"\r\n"
)
r = c.get_response()
assert r.status == 201
assert r.reason == b''
def test_get_response(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
sock._buffer = BytesIO(
b"HTTP/1.1 201 No Content\r\n"
b"Connection: close\r\n"
b"Server: Socket\r\n"
b"Content-Length: 0\r\n"
b"\r\n"
)
r = c.get_response()
assert r.status == 201
assert r.reason == b'No Content'
assert list(r.headers.iter_raw()) == [
(b'Connection', b'close'),
(b'Server', b'Socket'),
(b'Content-Length', b'0')
]
assert r.read() == b''
def test_response_short_reads(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
sock._buffer = BytesIO(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 15\r\n"
b"\r\n"
b"hellotherechamp"
)
r = c.get_response()
assert r.status == 200
assert r.reason == b'OK'
assert r.read(5) == b'hello'
assert r.read(5) == b'there'
assert r.read(5) == b'champ'
assert r.read(5) == b''
def test_request_with_unicodestring_body(self):
c = HTTP11Connection('httpbin.org')
c._sock = DummySocket()
with pytest.raises(ValueError):
c.request(
'POST',
'/post',
headers=HTTPHeaderMap([('User-Agent', 'hyper')]),
body=u'hi'
)
def test_request_with_file_body_in_text_mode(self):
# Testing this is tricksy: in practice, we do this by passing a fake
# file and monkeypatching out 'os.fstat'. This makes it look like a
# real file.
FstatRval = namedtuple('FstatRval', ['st_size'])
def fake_fstat(*args):
return FstatRval(16)
old_fstat = hyper.http11.connection.os.fstat
try:
hyper.http11.connection.os.fstat = fake_fstat
c = HTTP11Connection('httpbin.org')
c._sock = DummySocket()
f = DummyFile(b'')
f.buffer = StringIO(u'some binary data')
with pytest.raises(ValueError):
c.request('POST', '/post', body=f)
finally:
# Put back the monkeypatch.
hyper.http11.connection.os.fstat = old_fstat
def test_request_with_unicode_generator_body(self):
c = HTTP11Connection('httpbin.org')
c._sock = DummySocket()
def body():
yield u'hi'
yield u'there'
yield u'sir'
with pytest.raises(ValueError):
c.request('POST', '/post', body=body())
def test_content_length_overrides_generator_unicode(self):
c = HTTP11Connection('httpbin.org')
c._sock = DummySocket()
def body():
yield u'hi'
yield u'there'
yield u'sir'
with pytest.raises(ValueError):
c.request(
'POST',
'/post',
headers={b'content-length': b'10'},
body=body()
)
def test_http_upgrade_headers_only_sent_once(self):
c = HTTP11Connection('httpbin.org')
c._sock = sock = DummySocket()
c.request('GET', '/get', headers={'User-Agent': 'hyper'})
sock.queue = []
c.request('GET', '/get', headers={'User-Agent': 'hyper'})
received = b''.join(sock.queue)
expected = (
b"GET /get HTTP/1.1\r\n"
b"User-Agent: hyper\r\n"
b"host: httpbin.org\r\n"
b"\r\n"
)
assert received == expected
def test_exception_raised_for_illegal_body_type(self):
c = HTTP11Connection('httpbin.org')
with pytest.raises(ValueError) as exc_info:
body = 1234
# content-length set so body type is set to BODY_FLAT. value
# doesn't matter
c.request(
'GET',
'/get',
body=body,
headers={'content-length': str(len(str(body)))}
)
assert 'Request body must be a bytestring, a file-like object ' \
'returning bytestrings or an iterable of bytestrings. ' \
'Got: {}'.format(type(body)) in str(exc_info)
def test_exception_raised_for_illegal_elements_in_iterable_body(self):
c = HTTP11Connection('httpbin.org')
rogue_element = 123
body = [b'legal1', b'legal2', rogue_element]
body_size = sum(len(bytes(x)) for x in body)
with pytest.raises(ValueError) as exc_info:
# content-length set so body type is set to BODY_FLAT. value
# doesn't matter
c.request(
'GET',
'/get',
body=body,
headers={'content-length': str(body_size)}
)
assert 'Elements in iterable body must be bytestrings. Illegal ' \
'element: {}'.format(rogue_element) \
in str(exc_info)
def test_exception_raised_for_filelike_body_not_returning_bytes(self):
c = HTTP11Connection('httpbin.org')
class RogueFile(object):
def read(self, size):
return 42
with pytest.raises(ValueError) as exc_info:
# content-length set so body type is BODY_FLAT. value doesn't
# matter
c.request(
'GET',
'/get',
body=RogueFile(),
headers={'content-length': str(10)}
)
assert 'File-like bodies must return bytestrings. ' \
'Got: {}'.format(int) in str(exc_info)
def test_close_with_uninitialized_socket(self):
c = HTTP11Connection('httpbin.org')
c.close()
class TestHTTP11Response(object):
def test_short_circuit_read(self):
r = HTTP11Response(200, 'OK', {b'content-length': [b'0']}, None, None)
assert r.read() == b''
def test_aborted_reads(self):
d = DummySocket()
r = HTTP11Response(200, 'OK', {b'content-length': [b'15']}, d, None)
with pytest.raises(ConnectionResetError):
r.read()
def test_read_expect_close(self):
d = DummySocket()
r = HTTP11Response(200, 'OK', {b'connection': [b'close']}, d, None)
assert r.read() == b''
def test_response_as_context_manager(self):
r = HTTP11Response(
200, 'OK', {b'content-length': [b'0']}, DummySocket(), None
)
with r:
assert r.read() == b''
assert r._sock is None
def test_response_transparently_decrypts_gzip(self):
d = DummySocket()
headers = {b'content-encoding': [b'gzip'], b'connection': [b'close']}
r = HTTP11Response(200, 'OK', headers, d, None)
c = zlib_compressobj(wbits=24)
body = c.compress(b'this is test data')
body += c.flush()
d._buffer = BytesIO(body)
assert r.read() == b'this is test data'
def test_response_transparently_decrypts_real_deflate(self):
d = DummySocket()
headers = {
b'content-encoding': [b'deflate'],
b'connection': [b'close'],
}
r = HTTP11Response(200, 'OK', headers, d, None)
c = zlib_compressobj(wbits=zlib.MAX_WBITS)
body = c.compress(b'this is test data')
body += c.flush()
d._buffer = BytesIO(body)
assert r.read() == b'this is test data'
def test_response_transparently_decrypts_wrong_deflate(self):
c = zlib_compressobj(wbits=-zlib.MAX_WBITS)
body = c.compress(b'this is test data')
body += c.flush()
body_len = ('%s' % len(body)).encode('ascii')
headers = {
b'content-encoding': [b'deflate'], b'content-length': [body_len]
}
d = DummySocket()
d._buffer = BytesIO(body)
r = HTTP11Response(200, 'OK', headers, d, None)
assert r.read() == b'this is test data'
def test_basic_chunked_read(self):
d = DummySocket()
r = HTTP11Response(
200, 'OK', {b'transfer-encoding': [b'chunked']}, d, None
)
data = (
b'4\r\nwell\r\n'
b'4\r\nwell\r\n'
b'4\r\nwhat\r\n'
b'4\r\nhave\r\n'
b'2\r\nwe\r\n'
b'a\r\nhereabouts\r\n'
b'0\r\n\r\n'
)
d._buffer = BytesIO(data)
results = [
b'well', b'well', b'what', b'have', b'we', b'hereabouts'
]
for c1, c2 in zip(results, r.read_chunked()):
assert c1 == c2
assert not list(r.read_chunked())
def test_chunked_read_of_non_chunked(self):
r = HTTP11Response(200, 'OK', {b'content-length': [b'0']}, None, None)
with pytest.raises(ChunkedDecodeError):
list(r.read_chunked())
def test_chunked_read_aborts_early(self):
r = HTTP11Response(
200, 'OK', {b'transfer-encoding': [b'chunked']}, None, None
)
assert not list(r.read_chunked())
def test_response_transparently_decrypts_chunked_gzip(self):
d = DummySocket()
headers = {
b'content-encoding': [b'gzip'],
b'transfer-encoding': [b'chunked'],
}
r = HTTP11Response(200, 'OK', headers, d, None)
c = zlib_compressobj(wbits=24)
body = c.compress(b'this is test data')
body += c.flush()
data = b''
for index in range(0, len(body), 2):
data += b'2\r\n' + body[index:index+2] + b'\r\n'
data += b'0\r\n\r\n'
d._buffer = BytesIO(data)
received_body = b''
for chunk in r.read_chunked():
received_body += chunk
assert received_body == b'this is test data'
def test_chunked_normal_read(self):
d = DummySocket()
r = HTTP11Response(
200, 'OK', {b'transfer-encoding': [b'chunked']}, d, None)
data = (
b'4\r\nwell\r\n'
b'4\r\nwell\r\n'
b'4\r\nwhat\r\n'
b'4\r\nhave\r\n'
b'2\r\nwe\r\n'
b'a\r\nhereabouts\r\n'
b'0\r\n\r\n'
)
d._buffer = BytesIO(data)
assert r.read() == b'wellwellwhathavewehereabouts'
def test_chunk_length_read(self):
d = DummySocket()
r = HTTP11Response(
200, 'OK', {b'transfer-encoding': [b'chunked']}, d, None
)
data = (
b'4\r\nwell\r\n'
b'4\r\nwell\r\n'
b'4\r\nwhat\r\n'
b'4\r\nhave\r\n'
b'2\r\nwe\r\n'
b'a\r\nhereabouts\r\n'
b'0\r\n\r\n'
)
d._buffer = BytesIO(data)
assert r.read(5) == b'wellw'
assert r.read(15) == b'ellwhathavewehe'
assert r.read(20) == b'reabouts'
assert r.read() == b''
def test_bounded_read_expect_close_no_content_length(self):
d = DummySocket()
r = HTTP11Response(200, 'OK', {b'connection': [b'close']}, d, None)
d._buffer = BytesIO(b'hello there sir')
assert r.read(5) == b'hello'
assert r.read(6) == b' there'
assert r.read(8) == b' sir'
assert r.read(9) == b''
assert r._sock is None
def test_bounded_read_expect_close_with_content_length(self):
headers = {b'connection': [b'close'], b'content-length': [b'15']}
d = DummySocket()
r = HTTP11Response(200, 'OK', headers, d, None)
d._buffer = BytesIO(b'hello there sir')
assert r.read(5) == b'hello'
assert r.read(6) == b' there'
assert r.read(8) == b' sir'
assert r.read(9) == b''
assert r._sock is None
def test_compressed_bounded_read_expect_close(self):
headers = {b'connection': [b'close'], b'content-encoding': [b'gzip']}
c = zlib_compressobj(wbits=24)
body = c.compress(b'hello there sir')
body += c.flush()
d = DummySocket()
r = HTTP11Response(200, 'OK', headers, d, None)
d._buffer = BytesIO(body)
response = b''
while True:
# 12 is magic here: it's the smallest read that actually
# decompresses immediately.
chunk = r.read(15)
if not chunk:
break
response += chunk
assert response == b'hello there sir'
assert r._sock is None
def test_expect_close_reads_call_close_callback(self):
connection = mock.MagicMock()
d = DummySocket()
r = HTTP11Response(
200, 'OK', {b'connection': [b'close']}, d, connection
)
d._buffer = BytesIO(b'hello there sir')
assert r.read(5) == b'hello'
assert r.read(6) == b' there'
assert r.read(8) == b' sir'
assert r.read(9) == b''
assert r._sock is None
assert connection.close.call_count == 1
def test_expect_close_unbounded_reads_call_close_callback(self):
connection = mock.MagicMock()
d = DummySocket()
r = HTTP11Response(
200, 'OK', {b'connection': [b'close']}, d, connection
)
d._buffer = BytesIO(b'hello there sir')
r.read()
assert r._sock is None
assert connection.close.call_count == 1
def test_content_length_expect_close_reads_call_close_callback(self):
connection = mock.MagicMock()
headers = {b'connection': [b'close'], b'content-length': [b'15']}
d = DummySocket()
r = HTTP11Response(200, 'OK', headers, d, connection)
d._buffer = BytesIO(b'hello there sir')
r.read()
assert r._sock is None
assert connection.close.call_count == 1
def test_content_length_reads_dont_call_close_callback(self):
connection = mock.MagicMock()
headers = {b'content-length': [b'15']}
d = DummySocket()
r = HTTP11Response(200, 'OK', headers, d, connection)
d._buffer = BytesIO(b'hello there sir')
r.read()
assert r._sock is None
assert connection.close.call_count == 0
def test_chunked_reads_dont_call_close_callback(self):
connection = mock.MagicMock()
headers = {b'transfer-encoding': [b'chunked']}
d = DummySocket()
r = HTTP11Response(200, 'OK', headers, d, connection)
data = (
b'4\r\nwell\r\n'
b'4\r\nwell\r\n'
b'4\r\nwhat\r\n'
b'4\r\nhave\r\n'
b'2\r\nwe\r\n'
b'a\r\nhereabouts\r\n'
b'0\r\n\r\n'
)
d._buffer = BytesIO(data)
list(r.read_chunked())
assert r._sock is None
assert connection.close.call_count == 0
def test_closing_chunked_reads_dont_call_close_callback(self):
connection = mock.MagicMock()
headers = {
b'transfer-encoding': [b'chunked'], b'connection': [b'close']
}
d = DummySocket()
r = HTTP11Response(200, 'OK', headers, d, connection)
data = (
b'4\r\nwell\r\n'
b'4\r\nwell\r\n'
b'4\r\nwhat\r\n'
b'4\r\nhave\r\n'
b'2\r\nwe\r\n'
b'a\r\nhereabouts\r\n'
b'0\r\n\r\n'
)
d._buffer = BytesIO(data)
list(r.read_chunked())
assert r._sock is None
assert connection.close.call_count == 1
def test_connection_version(self):
c = HTTP11Connection('httpbin.org')
assert c.version is HTTPVersion.http11
def test_response_version(self):
d = DummySocket()
headers = {
b'transfer-encoding': [b'chunked'], b'connection': [b'close']
}
r = HTTP11Response(200, 'OK', headers, d)
assert r.version is HTTPVersion.http11
class DummySocket(object):
def __init__(self):
self.queue = []
self._buffer = BytesIO()
self._read_counter = 0
self.can_read = False
@property
def buffer(self):
return memoryview(self._buffer.getvalue()[self._read_counter:])
def advance_buffer(self, amt):
self._read_counter += amt
self._buffer.read(amt)
def send(self, data):
if not isinstance(data, bytes):
raise TypeError()
self.queue.append(data)
def recv(self, l):
data = self._buffer.read(l)
self._read_counter += len(data)
return memoryview(data)
def close(self):
pass
def readline(self):
line = self._buffer.readline()
self._read_counter += len(line)
return memoryview(line)
def fill(self):
pass
class DummyFile(object):
def __init__(self, data):
self.buffer = BytesIO(data)
def read(self, amt=None):
return self.buffer.read(amt)
def fileno(self):
return -1
def readline(self):
self.buffer.readline()
def __iter__(self):
return self.buffer.__iter__()
| |
#!/usr/bin/env python
# encoding: utf-8
"""Orchestration template
The following tasks must be implemented:
- start
- stop
- restart
- status
An instance endpoint has to be provided using the CLUSTERDN environment variable.
For example:
CLUSTERDN="instances/test/reference/1.0.0/1"
A fabric roledef is created for each service defined in the registry.
It can be used with the decorator: @roles('servicename1')
WARN: The hosts are accesed using the IP address of the second network device,
usually eth1.
The properties of a given service can be accessed through:
SERVICES['servicename'].propertyname
for example:
SERVICES['namenode'].heap
# If the property has dots we can use
SERVICES['datanode'].get('dfs.blocksize')
# Or even define a default value in case it does not exist
SERVICES['datanode'].get('dfs.blocksize', '134217728')
Details about a given node can be obtained through each Node object returned by service.nodes
The fabfile can be tested running it in NOOP mode (testing mode) exporting a NOOP env variable.
Required roles: initiator, responders, peerback
"""
from __future__ import print_function
import os
import sys
from fabric.api import *
from fabric.colors import red, green, yellow, blue
from fabric.contrib.files import exists, append, sed, comment, uncomment
# FIXME: Installing configuration-registry with pip and importing registry directly does not work
# inside the fabfile. Temporarily it is copied manually in the utils directory
#from utils import registry
# In the big data nodes configuration-registry is installed globally
import registry
import time
from pprint import pprint
from StringIO import StringIO
import jinja2
# Maximum number of retries to wait for a node to change to status running
MAX_RETRIES = 100
# Seconds between retries
DELAY = 5
def eprint(*args, **kwargs):
"""Print to stderr"""
print(*args, file=sys.stderr, **kwargs)
if os.environ.get('CLUSTERDN'):
CLUSTERDN = os.environ.get('CLUSTERDN')
else:
eprint(red('An instance endpoint has to be provided using the CLUSTERDN environment variable'))
sys.exit(2)
if os.environ.get('REGISTRY'):
REGISTRY = os.environ.get('REGISTRY')
else:
REGISTRY = 'http://consul.service.int.cesga.es:8500/v1/kv'
# Retrieve info from the registry
registry.connect(REGISTRY)
cluster = registry.Cluster(CLUSTERDN)
nodes = cluster.nodes
services = cluster.services
def wait_until_node_is_running(node):
"""Wait until node is in status running: i.e. docker-executor finished"""
name = node.name
retry = 0
while not node.status == 'running':
retry += 1
if retry > MAX_RETRIES: sys.exit(3)
print('Waiting for node {}: {}/{}'.format(name, retry, MAX_RETRIES))
time.sleep(DELAY)
def external_address(node):
"""Return the network address to be used by fabric to connect to the node
By convention the address used is the address of its **second** network interface
"""
return node.networks[1].address
def internal_address(node):
"""Return the network address to be used internally by the cluster
By convention the address used is the address of its **first** network interface
"""
return node.networks[0].address
def put_template(tmpl_string, dest, context=None):
"""Upload a template contained in tmpl_string to the dest path
The context is passed as p
"""
t = jinja2.Template(tmpl_string)
rendered = t.render(p=context)
put(StringIO(rendered), dest)
# Expose the relevant information
NODES = {node.name: node for node in nodes}
SERVICES = {service.name: service for service in services}
NODE = {}
for node in nodes:
wait_until_node_is_running(node)
properties = {'hostname': node.name}
for dev in node.networks:
properties[dev.name] = dev.address
for disk in node.disks:
properties[disk.name] = disk.destination
properties['address_int'] = internal_address(node)
properties['address_ext'] = external_address(node)
# The node is labeled with the network address that will be used by fabric
# to connect to the node, this allows to retrieve the node using NODE[env.host]
label = external_address(node)
NODE[label] = properties
env.user = 'root'
env.hosts = NODE.keys()
# Allow known hosts with changed keys
env.disable_known_hosts = True
# Retry 30 times each 10 seconds -> (30-1)*10 = 290 seconds
env.connection_attempts = 30
env.timeout = 10
# Enable ssh client keep alive messages
env.keepalive = 15
# Define the fabric roles according to the cluster services
for service in services:
env.roledefs[service.name] = [external_address(n) for n in service.nodes]
# Define also a global var ROLE to be used for internal cluster configuration
ROLE = {}
for service in services:
ROLE[service.name] = [internal_address(n) for n in service.nodes]
print(blue('= Summary of cluster information ='))
print('== NODE ==')
pprint(NODE)
print('== Fabric roles ==')
pprint(env.roledefs)
print('== ROLE ==')
pprint(ROLE)
print(blue('= End of summary ='))
#
# Debugging mode
#
# To enable it use: export NOOP=1
if os.environ.get('NOOP'):
print(yellow('\n\n== Running in NOOP mode ==\n\n'))
def run(name):
print('[{0}] run: {1}'.format(env.host, name))
def put(source, destination):
print('[{0}] put: {1} {2}'.format(env.host, source, destination))
@task
@parallel
def hostname():
"""Print the hostnames: mainly used for testing purposes"""
run('/bin/hostname')
#
# CONFIGURATION FILE TEMPLATES
#
# /etc/mongodb.conf
MONGOD_CONF = """{{ include_file_contents('files/mongod.conf.jinja') }}"""
@task
@runs_once
def start():
"""Initialize the GlusterFS cluster and create the volumes"""
execute(generate_etc_hosts)
execute(configure_service)
execute(start_mongod)
cluster.status = 'running'
print(green("All services started"))
@task
def generate_etc_hosts():
"""Generate the /etc/hosts file using internal cluster addresses"""
for n in NODE.values():
append('/etc/hosts', '{} {}'.format(n['address_int'], n['hostname']))
@task
def configure_service():
"""Configure the service"""
modify_mongod_conf()
create_dirs()
def modify_mongod_conf():
"""Modify mongod.conf configuration file"""
node = NODE[env.host]
disk_paths = sorted([node[p] for p in node if 'disk' in p])
p = {}
p['storage.dbPath'] = os.path.join(disk_paths[0], 'data')
p['systemLog.path'] = os.path.join(disk_paths[0], 'log', 'mongodb.log')
p['net.bindIp'] = '{},{}'.format(node['address_int'], node['address_ext'])
put_template(MONGOD_CONF, '/etc/mongod.conf', context=p)
def create_dirs():
"""Create required dirs"""
node = NODE[env.host]
disk_paths = sorted([node[p] for p in node if 'disk' in p])
data_dir = os.path.join(disk_paths[0], 'data')
run('mkdir -p {}'.format(data_dir))
run('chown mongod:mongod {}'.format(data_dir))
log_dir = os.path.join(disk_paths[0], 'log')
run('mkdir -p {}'.format(log_dir))
run('chown mongod:mongod {}'.format(log_dir))
@task
def start_mongod():
"""Start the mongod service"""
run('systemctl start mongod')
@task
def status():
"""Check the status of the GlusterFS service"""
run('systemctl status mongod')
@task
@runs_once
def stop():
"""Stop the GlusterFS service and all the containers that provide it"""
with settings(warn_only=True):
execute(stop_service)
@task
def stop_service():
"""Stop the service"""
run('systemctl stop mongod')
@task
@runs_once
def restart():
"""Restart all the services of the cluster"""
execute(stop)
execute(start)
| |
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""State machine for managing a router.
"""
# See state machine diagram and description:
# https://docs.google.com/a/dreamhost.com/document/d/1Ed5wDqCHW-CUt67ufjOUq4uYj0ECS5PweHxoueUoYUI/edit # noqa
import collections
import itertools
import logging
from oslo.config import cfg
from akanda.rug.event import POLL, CREATE, READ, UPDATE, DELETE, REBUILD
from akanda.rug import vm_manager
class StateParams(object):
def __init__(self, vm, log, queue, bandwidth_callback,
reboot_error_threshold, router_image_uuid):
self.vm = vm
self.log = log
self.queue = queue
self.bandwidth_callback = bandwidth_callback
self.reboot_error_threshold = reboot_error_threshold
self.router_image_uuid = router_image_uuid
class State(object):
def __init__(self, params):
self.params = params
@property
def log(self):
return self.params.log
@property
def queue(self):
return self.params.queue
@property
def vm(self):
return self.params.vm
@property
def router_image_uuid(self):
return self.params.router_image_uuid
@property
def name(self):
return self.__class__.__name__
def __str__(self):
return self.name
def execute(self, action, worker_context):
return action
def transition(self, action, worker_context):
return self
class CalcAction(State):
def execute(self, action, worker_context):
queue = self.queue
if DELETE in queue:
self.log.debug('shortcutting to delete')
return DELETE
while queue:
self.log.debug(
'action = %s, len(queue) = %s, queue = %s',
action,
len(queue),
list(itertools.islice(queue, 0, 60))
)
if action == UPDATE and queue[0] == CREATE:
# upgrade to CREATE from UPDATE by taking the next
# item from the queue
self.log.debug('upgrading from update to create')
action = queue.popleft()
continue
elif action in (CREATE, UPDATE) and queue[0] == REBUILD:
# upgrade to REBUILD from CREATE/UPDATE by taking the next
# item from the queue
self.log.debug('upgrading from %s to rebuild' % action)
action = queue.popleft()
continue
elif action == CREATE and queue[0] == UPDATE:
# CREATE implies an UPDATE so eat the update event
# without changing the action
self.log.debug('merging create and update')
queue.popleft()
continue
elif action and queue[0] == POLL:
# Throw away a poll following any other valid action,
# because a create or update will automatically handle
# the poll and repeated polls are not needed.
self.log.debug('discarding poll event following action %s',
action)
queue.popleft()
continue
elif action and action != POLL and action != queue[0]:
# We are not polling and the next action is something
# different from what we are doing, so just do the
# current action.
self.log.debug('done collapsing events')
break
self.log.debug('popping action from queue')
action = queue.popleft()
return action
def transition(self, action, worker_context):
if self.vm.state == vm_manager.GONE:
next_action = StopVM(self.params)
elif action == DELETE:
next_action = StopVM(self.params)
elif action == REBUILD:
next_action = RebuildVM(self.params)
elif self.vm.state == vm_manager.BOOTING:
next_action = CheckBoot(self.params)
elif self.vm.state == vm_manager.DOWN:
next_action = CreateVM(self.params)
else:
next_action = Alive(self.params)
if self.vm.state == vm_manager.ERROR:
if action == POLL:
# If the selected action is to poll, and we are in an
# error state, then an event slipped through the
# filter in send_message() and we should ignore it
# here.
next_action = self
elif self.vm.error_cooldown:
self.log.debug('Router is in ERROR cooldown, ignoring '
'event.')
next_action = self
else:
# If this isn't a POLL, and the configured `error_cooldown`
# has passed, clear the error status before doing what we
# really want to do.
next_action = ClearError(self.params, next_action)
return next_action
class PushUpdate(State):
"""Put an update instruction on the queue for the state machine.
"""
def execute(self, action, worker_context):
# Put the action back on the front of the queue.
self.queue.appendleft(UPDATE)
return action
def transition(self, action, worker_context):
return CalcAction(self.params)
class ClearError(State):
"""Remove the error state from the VM.
"""
def __init__(self, params, next_state=None):
super(ClearError, self).__init__(params)
self._next_state = next_state
def execute(self, action, worker_context):
# If we are being told explicitly to update the VM, we should
# ignore any error status.
self.vm.clear_error(worker_context)
return action
def transition(self, action, worker_context):
if self._next_state:
return self._next_state
return CalcAction(self.params)
class Alive(State):
def execute(self, action, worker_context):
self.vm.update_state(worker_context)
return action
def transition(self, action, worker_context):
if self.vm.state == vm_manager.GONE:
return StopVM(self.params)
elif self.vm.state == vm_manager.DOWN:
return CreateVM(self.params)
elif action == POLL and self.vm.state == vm_manager.CONFIGURED:
return CalcAction(self.params)
elif action == READ and self.vm.state == vm_manager.CONFIGURED:
return ReadStats(self.params)
else:
return ConfigureVM(self.params)
class CreateVM(State):
def execute(self, action, worker_context):
# Check for a loop where the router keeps failing to boot or
# accept the configuration.
if self.vm.attempts >= self.params.reboot_error_threshold:
self.log.info('dropping out of boot loop after %s trials',
self.vm.attempts)
self.vm.set_error(worker_context)
return action
self.vm.boot(worker_context, self.router_image_uuid)
self.log.debug('CreateVM attempt %s/%s',
self.vm.attempts,
self.params.reboot_error_threshold)
return action
def transition(self, action, worker_context):
if self.vm.state == vm_manager.GONE:
return StopVM(self.params)
elif self.vm.state == vm_manager.ERROR:
return CalcAction(self.params)
elif self.vm.state == vm_manager.DOWN:
return CreateVM(self.params)
return CheckBoot(self.params)
class CheckBoot(State):
def execute(self, action, worker_context):
self.vm.check_boot(worker_context)
# Put the action back on the front of the queue so that we can yield
# and handle it in another state machine traversal (which will proceed
# from CalcAction directly to CheckBoot).
if self.vm.state not in (vm_manager.DOWN, vm_manager.GONE):
self.queue.appendleft(action)
return action
def transition(self, action, worker_context):
if self.vm.state == vm_manager.REPLUG:
return ReplugVM(self.params)
if self.vm.state in (vm_manager.DOWN,
vm_manager.GONE):
return StopVM(self.params)
if self.vm.state == vm_manager.UP:
return ConfigureVM(self.params)
return CalcAction(self.params)
class ReplugVM(State):
def execute(self, action, worker_context):
self.vm.replug(worker_context)
return action
def transition(self, action, worker_context):
if self.vm.state == vm_manager.RESTART:
return StopVM(self.params)
return ConfigureVM(self.params)
class StopVM(State):
def execute(self, action, worker_context):
self.vm.stop(worker_context)
if self.vm.state == vm_manager.GONE:
# Force the action to delete since the router isn't there
# any more.
return DELETE
return action
def transition(self, action, worker_context):
if self.vm.state not in (vm_manager.DOWN, vm_manager.GONE):
return self
if self.vm.state == vm_manager.GONE:
return Exit(self.params)
if action == DELETE:
return Exit(self.params)
return CreateVM(self.params)
class RebuildVM(State):
def execute(self, action, worker_context):
self.vm.stop(worker_context)
if self.vm.state == vm_manager.GONE:
# Force the action to delete since the router isn't there
# any more.
return DELETE
# Re-create the VM
self.vm.reset_boot_counter()
return CREATE
def transition(self, action, worker_context):
if self.vm.state not in (vm_manager.DOWN, vm_manager.GONE):
return self
if self.vm.state == vm_manager.GONE:
return Exit(self.params)
return CreateVM(self.params)
class Exit(State):
pass
class ConfigureVM(State):
def execute(self, action, worker_context):
self.vm.configure(worker_context)
if self.vm.state == vm_manager.CONFIGURED:
if action == READ:
return READ
else:
return POLL
else:
return action
def transition(self, action, worker_context):
if self.vm.state == vm_manager.REPLUG:
return ReplugVM(self.params)
if self.vm.state in (vm_manager.RESTART,
vm_manager.DOWN,
vm_manager.GONE):
return StopVM(self.params)
if self.vm.state == vm_manager.UP:
return PushUpdate(self.params)
# Below here, assume vm.state == vm_manager.CONFIGURED
if action == READ:
return ReadStats(self.params)
return CalcAction(self.params)
class ReadStats(State):
def execute(self, action, worker_context):
stats = self.vm.read_stats()
self.params.bandwidth_callback(stats)
return POLL
def transition(self, action, worker_context):
return CalcAction(self.params)
class Automaton(object):
def __init__(self, router_id, tenant_id,
delete_callback, bandwidth_callback,
worker_context, queue_warning_threshold,
reboot_error_threshold):
"""
:param router_id: UUID of the router being managed
:type router_id: str
:param tenant_id: UUID of the tenant being managed
:type tenant_id: str
:param delete_callback: Invoked when the Automaton decides
the router should be deleted.
:type delete_callback: callable
:param bandwidth_callback: To be invoked when the Automaton
needs to report how much bandwidth
a router has used.
:type bandwidth_callback: callable taking router_id and bandwidth
info dict
:param worker_context: a WorkerContext
:type worker_context: WorkerContext
:param queue_warning_threshold: Limit after which adding items
to the queue triggers a warning.
:type queue_warning_threshold: int
:param reboot_error_threshold: Limit after which trying to reboot
the router puts it into an error state.
:type reboot_error_threshold: int
"""
self.router_id = router_id
self.tenant_id = tenant_id
self._delete_callback = delete_callback
self._queue_warning_threshold = queue_warning_threshold
self._reboot_error_threshold = reboot_error_threshold
self.deleted = False
self.bandwidth_callback = bandwidth_callback
self._queue = collections.deque()
self.log = logging.getLogger(__name__ + '.' + router_id)
self.action = POLL
self.vm = vm_manager.VmManager(router_id, tenant_id, self.log,
worker_context)
self._state_params = StateParams(
self.vm,
self.log,
self._queue,
self.bandwidth_callback,
self._reboot_error_threshold,
cfg.CONF.router_image_uuid
)
self.state = CalcAction(self._state_params)
def service_shutdown(self):
"Called when the parent process is being stopped"
def _do_delete(self):
if self._delete_callback is not None:
self.log.debug('calling delete callback')
self._delete_callback()
# Avoid calling the delete callback more than once.
self._delete_callback = None
# Remember that this router has been deleted
self.deleted = True
def update(self, worker_context):
"Called when the router config should be changed"
while self._queue:
while True:
if self.deleted:
self.log.debug(
'skipping update because the router is being deleted'
)
return
try:
self.log.debug('%s.execute(%s) vm.state=%s',
self.state, self.action, self.vm.state)
self.action = self.state.execute(
self.action,
worker_context,
)
self.log.debug('%s.execute -> %s vm.state=%s',
self.state, self.action, self.vm.state)
except:
self.log.exception(
'%s.execute() failed for action: %s',
self.state,
self.action
)
old_state = self.state
self.state = self.state.transition(
self.action,
worker_context,
)
self.log.debug('%s.transition(%s) -> %s vm.state=%s',
old_state, self.action, self.state,
self.vm.state)
# Yield control each time we stop to figure out what
# to do next.
if isinstance(self.state, CalcAction):
return # yield
# We have reached the exit state, so the router has
# been deleted somehow.
if isinstance(self.state, Exit):
self._do_delete()
return
def send_message(self, message):
"Called when the worker put a message in the state machine queue"
if self.deleted:
# Ignore any more incoming messages
self.log.debug(
'deleted state machine, ignoring incoming message %s',
message)
return False
# NOTE(dhellmann): This check is largely redundant with the
# one in CalcAction.transition() but it may allow us to avoid
# adding poll events to the queue at all, and therefore cut
# down on the number of times a worker thread wakes up to
# process something on a router that isn't going to actually
# do any work.
if message.crud == POLL and self.vm.state == vm_manager.ERROR:
self.log.info(
'Router status is ERROR, ignoring POLL message: %s',
message,
)
return False
if message.crud == REBUILD:
if message.body.get('router_image_uuid'):
self.log.info(
'Router is being REBUILT with custom image %s',
message.body['router_image_uuid']
)
self.router_image_uuid = message.body['router_image_uuid']
else:
self.router_image_uuid = cfg.CONF.router_image_uuid
self._queue.append(message.crud)
queue_len = len(self._queue)
if queue_len > self._queue_warning_threshold:
logger = self.log.warning
else:
logger = self.log.debug
logger('incoming message brings queue length to %s', queue_len)
return True
@property
def router_image_uuid(self):
return self.state.params.router_image_uuid
@router_image_uuid.setter
def router_image_uuid(self, value):
self.state.params.router_image_uuid = value
def has_more_work(self):
"Called to check if there are more messages in the state machine queue"
return (not self.deleted) and bool(self._queue)
def has_error(self):
return self.vm.state == vm_manager.ERROR
| |
#!/usr/bin/python
# art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <reinier@heeres.eu>
# Minor additions by Ben Axelrod <baxelrod@coroware.com>
'''
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib import lines, text as mtext, path as mpath, colors as mcolors
from matplotlib import artist
from matplotlib.collections import Collection, LineCollection, \
PolyCollection, PatchCollection, PathCollection
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Patch
from matplotlib.colors import Normalize
from matplotlib.cbook import iterable
import warnings
import numpy as np
import math
from . import proj3d
def norm_angle(a):
"""Return angle between -180 and +180"""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def norm_text_angle(a):
"""Return angle between -90 and +90"""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
'''
Text object with 3D position and (in the future) direction.
'''
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
'''
*x*, *y*, *z* Position of text
*text* Text string to display
*zdir* Direction of text
Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
'''
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
def draw(self, renderer):
proj = proj3d.proj_trans_points([self._position3d, \
self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
if dx==0. and dy==0.:
# atan2 raises ValueError: math domain error on 0,0
angle = 0.
else:
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(norm_text_angle(angle))
mtext.Text.draw(self, renderer)
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
'''
3D line object.
'''
def __init__(self, xs, ys, zs, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
'''
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = float(zs)
zs = [zs for x in xs]
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
def line_2d_to_3d(line, zs=0, zdir='z'):
'''
Convert a 2D line to 3D.
'''
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def path_to_3d_segment(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def paths_to_3d_segments(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
for path, pathz in zip(paths, zs):
segments.append(path_to_3d_segment(path, pathz, zdir))
return segments
class Line3DCollection(LineCollection):
'''
A collection of 3D lines.
'''
def __init__(self, segments, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
'''
LineCollection.__init__(self, segments, *args, **kwargs)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_segments(self, segments):
'''
Set 3D segments
'''
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
'''
Project the points according to renderer matrix.
'''
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [list(zip(xs, ys)) for (xs, ys, zs) in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for (xs, ys, zs) in xyslist:
minz = min(minz, min(zs))
return minz
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
'''
3D patch object.
'''
def __init__(self, *args, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
if not iterable(zs):
zs = np.ones(len(verts)) * zs
self._segment3d = [juggle_axes(x, y, z, zdir) \
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def draw(self, renderer):
Patch.draw(self, renderer)
class PathPatch3D(Patch3D):
'''
3D PathPatch object.
'''
def __init__(self, path, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
'''
A collection of 3D patches.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
super(self.__class__, self).__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
super(self.__class__, self).set_offsets(list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
class Path3DCollection(PathCollection):
'''
A collection of 3D paths.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
super(self.__class__, self).__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
super(self.__class__, self).set_offsets(list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Keywords:
*za* The location or locations to place the patches in the
collection along the *zdir* axis. Defaults to 0.
*zdir* The axis in which to place the patches. Default is "z".
*depthshade* Whether to shade the patches to give a sense of depth.
Defaults to *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
'''
A collection of 3D polygons.
'''
def __init__(self, verts, *args, **kwargs):
'''
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
'''
self.set_zsort(kwargs.pop('zsort', True))
PolyCollection.__init__(self, verts, *args, **kwargs)
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
'''
Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max'
'''
if zsort is True:
zsort = 'average'
if zsort is not False:
if zsort in self._zsort_functions:
zsortfunc = self._zsort_functions[zsort]
else:
return False
else:
zsortfunc = None
self._zsort = zsort
self._sort_zpos = None
self._zsortfunc = zsortfunc
def get_vector(self, segments3d):
"""Optimize points for projection"""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si+len(p)
segis.append((si, ei))
si = ei
if len(segments3d) > 0 :
xs, ys, zs = list(zip(*points))
else :
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
'''Set 3D vertices.'''
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], closed)
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort(True)
self._facecolors3d = PolyCollection.get_facecolors(self)
self._edgecolors3d = PolyCollection.get_edgecolors(self)
self._alpha3d = PolyCollection.get_alpha(self)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def do_3d_projection(self, renderer):
'''
Perform the 3D projection for this object.
'''
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei]) \
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
cedge = cedge.repeat(len(xyzlist), axis=0)
# if required sort by depth (furthest drawn first)
if self._zsort:
z_segments_2d = [(self._zsortfunc(zs), list(zip(xs, ys)), fc, ec) for
(xs, ys, zs), fc, ec in zip(xyzlist, cface, cedge)]
z_segments_2d.sort(key=lambda x: x[0], reverse=True)
else:
raise ValueError("whoops")
segments_2d = [s for z, s, fc, ec in z_segments_2d]
PolyCollection.set_verts(self, segments_2d)
self._facecolors2d = [fc for z, s, fc, ec in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0 :
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else :
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
set_facecolors = set_facecolor
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
set_edgecolors = set_edgecolor
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.colorConverter.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.colorConverter.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_facecolors(self):
return self._facecolors2d
get_facecolor = get_facecolors
def get_edgecolors(self):
return self._edgecolors2d
get_edgecolor = get_edgecolors
def draw(self, renderer):
return Collection.draw(self, renderer)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts(segments_3d)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def iscolor(c):
try:
if len(c) == 4 or len(c) == 3:
if iterable(c[0]):
return False
if hasattr(c[0], '__float__'):
return True
except:
return False
return False
def get_colors(c, num):
"""Stretch the color argument to provide the required number num"""
if type(c) == type("string"):
c = mcolors.colorConverter.to_rgba(c)
if iscolor(c):
return [c] * num
if len(c) == num:
return c
elif iscolor(c):
return [c] * num
elif len(c) == 0: #if edgecolor or facecolor is specified as 'none'
return [[0,0,0,0]] * num
elif iscolor(c[0]):
return [c[0]] * num
else:
raise ValueError('unknown color format %s' % c)
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth"""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
colors = get_colors(colors, len(zs))
if zs.size > 0 :
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
return colors
| |
################################################################################
#
# Copyright (c) 2013-2014, Alexander Todorov <atodorov@nospam.dif.io>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
import string
from django.contrib import messages
from models import PHP_PEAR_PKG, PHP_PEAR2_PKG
def parse_pip_freeze(request, package_text): # FALSE NEGATIVE
"""
Parse the output of `pip freeze'.
@request - HttpRequest object - used for showing error messages
@package_text - string - the output of the command
@return - dict - { 'errors' : int, 'packages' : [{ 'n' : 'v' }]}
"""
result = {'packages' : []}
errors = 0
for line in package_text.split("\n"):
# this will cause comments and blanks to be skipped
# also fix the case where comment is after the package version
# e.g. six==1.3 # Fix conflict
pkg_ver = line.split('#')[0]
pkg_ver = pkg_ver.strip()
# skip empty lines
if not pkg_ver:
continue
# try to find warnings
if pkg_ver.find('Warning:') > -1:
messages.warning(request, pkg_ver)
errors += 1
try:
(name, version) = pkg_ver.split('==')
# a fix for package list that came out from some Linux distro
# not a virtual env
if name.startswith('python-'):
name = name.replace('python-', '')
except:
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
continue
if (not name) or (not version):
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
result['packages'].append({'n' : name, 'v' : version})
result['errors'] = errors
return result
def parse_bundle_list(request, package_text): # FALSE NEGATIVE
"""
Parse the output of `bundle list` or `gem list`.
@request - HttpRequest object - used for showing error messages
@package_text - string - the output of the command
@return - dict - { 'errors' : int, 'packages' : [{ 'n' : 'v' }]}
"""
result = {'packages' : []}
errors = 0
for line in package_text.split("\n"):
pkg_ver = line.strip()
# skip empty lines
if not pkg_ver:
continue
# skip, comes from bundle list
if pkg_ver.startswith('Gems included by the bundle:'):
continue
# skip, comes from gem list
if pkg_ver.startswith('*** LOCAL GEMS ***'):
continue
# * rack-protection (1.2.0) - bundle list prefixes with space and a star
# journey (1.0.4) - gem list doesn't add any prefix
if pkg_ver.startswith('* '):
pkg_ver = pkg_ver[2:]
try:
(name, version) = pkg_ver.split('(')
name = name.strip() # strip trailing spaces
version = version[:-1] # strip trailing )
# because gem list may show multiple versions. add all of them
# sass (3.1.20, 3.1.18, 3.1.5)
# sinatra (1.3.3, 1.3.2, 1.2.6)
# stringex (1.4.0, 1.3.0)
for v in version.split(','):
v = v.strip()
if (not name) or (not v):
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
else:
result['packages'].append({'n' : name, 'v' : v})
except:
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
continue
result['errors'] = errors
return result
def parse_npm_ls(request, package_text): # FALSE NEGATIVE
"""
Parse the output of `npm ls'.
@request - HttpRequest object - used for showing error messages
@package_text - string - the output of the command
@return - dict - { 'errors' : int, 'packages' : [{ 'n' : 'v' }]}
"""
result = {'packages' : []}
errors = 0
for line in package_text.split("\n"):
pkg_ver = line.strip()
# skip empty lines
if not pkg_ver:
continue
# skip comments
if pkg_ver.startswith('#'):
continue
# try to find warnings
if pkg_ver.find('WARN') > -1:
messages.warning(request, pkg_ver)
errors += 1
if pkg_ver.find('UNMET DEPENDENCY') > -1:
messages.warning(request, pkg_ver)
errors += 1
# skip lines which are not packages
if pkg_ver.find('@') == -1:
continue
# remove non ASCII characters since `npm ls` will give us
# a tree like structure.
pkg_ver = filter(lambda x: x in string.printable, pkg_ver)
# remove any spaces left
pkg_ver = pkg_ver.strip()
try:
(name, version) = pkg_ver.split('@')
# For example:
# ep_etherpad-lite@1.0.0 -> /home/repos/git/etherpad-lite/src
# this is the application itself, so skip it.
if version.find(' ') > -1:
continue
except:
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
continue
if (not name) or (not version):
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
result['packages'].append({'n' : name, 'v' : version})
result['errors'] = errors
return result
def parse_perllocal(request, package_text): # FALSE NEGATIVE
"""
Parse perllocal.pod.
@request - HttpRequest object - used for showing error messages
@package_text - string - the output of the command
@return - dict - { 'errors' : int, 'packages' : [{ 'n' : 'v' }]}
"""
result = {'packages' : []}
errors = 0
name = None
version = None
# NB: this will only handle \n line endings. We may have problems with Perl on Windows/Mac
# see https://docs.djangoproject.com/en/dev/topics/http/file-uploads/#uploadedfile-objects
for line in package_text.split("\n"):
pkg_ver = line.strip()
# skip empty lines
if not pkg_ver:
continue
# try parse name first, then version
try:
if pkg_ver.find('L<') > -1:
# =head2 Tue Jul 3 17:41:12 2012: C<Module> L<CPAN::DistnameInfo|CPAN::DistnameInfo>
name = pkg_ver.split('L<')[1].replace('>', '')
name = name.split('|')[0]
continue
if pkg_ver.find('C<VERSION:') > -1:
# C<VERSION: 0.23>
version = pkg_ver.split(':')[1].replace('>', '').strip()
except:
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
continue
if name and version:
result['packages'].append({'n' : name, 'v' : version})
# reset variable since perllocal.pod is parsed in a stream fashion
name = None
version = None
result['errors'] = errors
return result
def parse_mvn_dependency_list(request, package_text): # FALSE NEGATIVE
"""
Parse the output of `mvn dependency:list`.
@request - HttpRequest object - used for showing error messages
@package_text - string - the output of the command
@return - dict - { 'errors' : int, 'packages' : [{ 'n' : 'v' }]}
"""
result = {'packages' : []}
errors = 0
for line in package_text.split("\n"):
pkg_ver = line.strip()
# skip empty lines
if not pkg_ver:
continue
# skip lines which don't follow the format
# [INFO] The following files have been resolved:
# [INFO] antlr:antlr:jar:2.7.7:provided
# [INFO] aopalliance:aopalliance:jar:1.0:compile
# [INFO] cglib:cglib-nodep:jar:2.2:test
if pkg_ver.find('[INFO] ') == -1:
continue
try:
pkg_ver = pkg_ver[10:]
(gid, aid, atype, version, scope) = pkg_ver.split(':')
name = "%s:%s" % (gid, aid)
except:
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
continue
if (not name) or (not version):
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
result['packages'].append({'n' : name, 'v' : version})
result['errors'] = errors
return result
def parse_composer_show(request, package_text): # FALSE NEGATIVE
"""
Parse the output of `composer.phar show --installed`.
@request - HttpRequest object - used for showing error messages
@package_text - string - the output of the command
@return - dict - { 'errors' : int, 'packages' : [{ 'n' : 'v' }]}
"""
result = {'packages' : []}
errors = 0
for line in package_text.split("\n"):
name = None
version = None
pkg_ver = line.strip()
# skip empty lines
if not pkg_ver:
continue
# skip lines w/o / - names are :vendor/:package
if pkg_ver.find('/') == -1:
continue
try:
splt = pkg_ver.split(' ')
name = splt[0]
# handle external PEAR repos
if name.startswith('pear-pear.php.net'):
name = name.split('/')[1]
type = PHP_PEAR_PKG
elif name.startswith('pear-pear2.php.net'):
name = name.split('/')[1]
type = PHP_PEAR2_PKG
else:
type = None
# take the rest, strip leading space, split again
# version is now the first component
version = " ".join(splt[1:]).strip().split(' ')[0]
except:
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
continue
if (not name) or (not version):
errors += 1
messages.error(request, "'%s' is not a valid input!" % pkg_ver)
if type:
result['packages'].append({'n' : name, 'v' : version, 't' : type})
else:
result['packages'].append({'n' : name, 'v' : version})
result['errors'] = errors
return result
| |
from __future__ import unicode_literals
import base64
import hashlib
import json
import re
import six
import struct
from xml.sax.saxutils import escape
import boto.sqs
from moto.core.exceptions import RESTError
from moto.core import BaseBackend, BaseModel
from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis
from .utils import generate_receipt_handle
from .exceptions import (
MessageAttributesInvalid,
MessageNotInflight,
QueueDoesNotExist,
QueueAlreadyExists,
ReceiptHandleIsInvalid,
)
DEFAULT_ACCOUNT_ID = 123456789012
DEFAULT_SENDER_ID = "AIDAIT2UOQQY3AUEKVGXU"
TRANSPORT_TYPE_ENCODINGS = {'String': b'\x01', 'Binary': b'\x02', 'Number': b'\x01'}
class Message(BaseModel):
def __init__(self, message_id, body):
self.id = message_id
self._body = body
self.message_attributes = {}
self.receipt_handle = None
self.sender_id = DEFAULT_SENDER_ID
self.sent_timestamp = None
self.approximate_first_receive_timestamp = None
self.approximate_receive_count = 0
self.deduplication_id = None
self.group_id = None
self.visible_at = 0
self.delayed_until = 0
@property
def body_md5(self):
md5 = hashlib.md5()
md5.update(self._body.encode('utf-8'))
return md5.hexdigest()
@property
def attribute_md5(self):
"""
The MD5 of all attributes is calculated by first generating a
utf-8 string from each attribute and MD5-ing the concatenation
of them all. Each attribute is encoded with some bytes that
describe the length of each part and the type of attribute.
Not yet implemented:
List types (https://github.com/aws/aws-sdk-java/blob/7844c64cf248aed889811bf2e871ad6b276a89ca/aws-java-sdk-sqs/src/main/java/com/amazonaws/services/sqs/MessageMD5ChecksumHandler.java#L58k)
"""
def utf8(str):
if isinstance(str, six.string_types):
return str.encode('utf-8')
return str
md5 = hashlib.md5()
struct_format = "!I".encode('ascii') # ensure it's a bytestring
for name in sorted(self.message_attributes.keys()):
attr = self.message_attributes[name]
data_type = attr['data_type']
encoded = utf8('')
# Each part of each attribute is encoded right after it's
# own length is packed into a 4-byte integer
# 'timestamp' -> b'\x00\x00\x00\t'
encoded += struct.pack(struct_format, len(utf8(name))) + utf8(name)
# The datatype is additionally given a final byte
# representing which type it is
encoded += struct.pack(struct_format, len(data_type)) + utf8(data_type)
encoded += TRANSPORT_TYPE_ENCODINGS[data_type]
if data_type == 'String' or data_type == 'Number':
value = attr['string_value']
elif data_type == 'Binary':
print(data_type, attr['binary_value'], type(attr['binary_value']))
value = base64.b64decode(attr['binary_value'])
else:
print("Moto hasn't implemented MD5 hashing for {} attributes".format(data_type))
# The following should be enough of a clue to users that
# they are not, in fact, looking at a correct MD5 while
# also following the character and length constraints of
# MD5 so as not to break client softwre
return('deadbeefdeadbeefdeadbeefdeadbeef')
encoded += struct.pack(struct_format, len(utf8(value))) + utf8(value)
md5.update(encoded)
return md5.hexdigest()
@property
def body(self):
return escape(self._body)
def mark_sent(self, delay_seconds=None):
self.sent_timestamp = int(unix_time_millis())
if delay_seconds:
self.delay(delay_seconds=delay_seconds)
def mark_received(self, visibility_timeout=None):
"""
When a message is received we will set the first receive timestamp,
tap the ``approximate_receive_count`` and the ``visible_at`` time.
"""
if visibility_timeout:
visibility_timeout = int(visibility_timeout)
else:
visibility_timeout = 0
if not self.approximate_first_receive_timestamp:
self.approximate_first_receive_timestamp = int(unix_time_millis())
self.approximate_receive_count += 1
# Make message visible again in the future unless its
# destroyed.
if visibility_timeout:
self.change_visibility(visibility_timeout)
self.receipt_handle = generate_receipt_handle()
def change_visibility(self, visibility_timeout):
# We're dealing with milliseconds internally
visibility_timeout_msec = int(visibility_timeout) * 1000
self.visible_at = unix_time_millis() + visibility_timeout_msec
def delay(self, delay_seconds):
delay_msec = int(delay_seconds) * 1000
self.delayed_until = unix_time_millis() + delay_msec
@property
def visible(self):
current_time = unix_time_millis()
if current_time > self.visible_at:
return True
return False
@property
def delayed(self):
current_time = unix_time_millis()
if current_time < self.delayed_until:
return True
return False
class Queue(BaseModel):
base_attributes = ['ApproximateNumberOfMessages',
'ApproximateNumberOfMessagesDelayed',
'ApproximateNumberOfMessagesNotVisible',
'CreatedTimestamp',
'DelaySeconds',
'LastModifiedTimestamp',
'MaximumMessageSize',
'MessageRetentionPeriod',
'QueueArn',
'ReceiveMessageWaitTimeSeconds',
'VisibilityTimeout']
fifo_attributes = ['FifoQueue',
'ContentBasedDeduplication']
kms_attributes = ['KmsDataKeyReusePeriodSeconds',
'KmsMasterKeyId']
ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage',
'GetQueueAttributes', 'GetQueueUrl',
'ReceiveMessage', 'SendMessage')
def __init__(self, name, region, **kwargs):
self.name = name
self.region = region
self.tags = {}
self.permissions = {}
self._messages = []
self._pending_messages = set()
now = unix_time()
self.created_timestamp = now
self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region,
self.name)
self.dead_letter_queue = None
# default settings for a non fifo queue
defaults = {
'ContentBasedDeduplication': 'false',
'DelaySeconds': 0,
'FifoQueue': 'false',
'KmsDataKeyReusePeriodSeconds': 300, # five minutes
'KmsMasterKeyId': None,
'MaximumMessageSize': int(64 << 10),
'MessageRetentionPeriod': 86400 * 4, # four days
'Policy': None,
'ReceiveMessageWaitTimeSeconds': 0,
'RedrivePolicy': None,
'VisibilityTimeout': 30,
}
defaults.update(kwargs)
self._set_attributes(defaults, now)
# Check some conditions
if self.fifo_queue and not self.name.endswith('.fifo'):
raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues')
@property
def pending_messages(self):
return self._pending_messages
@property
def pending_message_groups(self):
return set(message.group_id
for message in self._pending_messages
if message.group_id is not None)
def _set_attributes(self, attributes, now=None):
if not now:
now = unix_time()
integer_fields = ('DelaySeconds', 'KmsDataKeyreusePeriodSeconds',
'MaximumMessageSize', 'MessageRetentionPeriod',
'ReceiveMessageWaitTime', 'VisibilityTimeout')
bool_fields = ('ContentBasedDeduplication', 'FifoQueue')
for key, value in six.iteritems(attributes):
if key in integer_fields:
value = int(value)
if key in bool_fields:
value = value == "true"
if key == 'RedrivePolicy' and value is not None:
continue
setattr(self, camelcase_to_underscores(key), value)
if attributes.get('RedrivePolicy', None):
self._setup_dlq(attributes['RedrivePolicy'])
self.last_modified_timestamp = now
def _setup_dlq(self, policy):
if isinstance(policy, six.text_type):
try:
self.redrive_policy = json.loads(policy)
except ValueError:
raise RESTError('InvalidParameterValue', 'Redrive policy is not a dict or valid json')
elif isinstance(policy, dict):
self.redrive_policy = policy
else:
raise RESTError('InvalidParameterValue', 'Redrive policy is not a dict or valid json')
if 'deadLetterTargetArn' not in self.redrive_policy:
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain deadLetterTargetArn')
if 'maxReceiveCount' not in self.redrive_policy:
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain maxReceiveCount')
for queue in sqs_backends[self.region].queues.values():
if queue.queue_arn == self.redrive_policy['deadLetterTargetArn']:
self.dead_letter_queue = queue
if self.fifo_queue and not queue.fifo_queue:
raise RESTError('InvalidParameterCombination', 'Fifo queues cannot use non fifo dead letter queues')
break
else:
raise RESTError('AWS.SimpleQueueService.NonExistentQueue', 'Could not find DLQ for {0}'.format(self.redrive_policy['deadLetterTargetArn']))
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
sqs_backend = sqs_backends[region_name]
return sqs_backend.create_queue(
name=properties['QueueName'],
region=region_name,
**properties
)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
queue_name = properties['QueueName']
sqs_backend = sqs_backends[region_name]
queue = sqs_backend.get_queue(queue_name)
if 'VisibilityTimeout' in properties:
queue.visibility_timeout = int(properties['VisibilityTimeout'])
if 'ReceiveMessageWaitTimeSeconds' in properties:
queue.receive_message_wait_time_seconds = int(properties['ReceiveMessageWaitTimeSeconds'])
return queue
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
queue_name = properties['QueueName']
sqs_backend = sqs_backends[region_name]
sqs_backend.delete_queue(queue_name)
@property
def approximate_number_of_messages_delayed(self):
return len([m for m in self._messages if m.delayed])
@property
def approximate_number_of_messages_not_visible(self):
return len([m for m in self._messages if not m.visible])
@property
def approximate_number_of_messages(self):
return len(self.messages)
@property
def physical_resource_id(self):
return self.name
@property
def attributes(self):
result = {}
for attribute in self.base_attributes:
attr = getattr(self, camelcase_to_underscores(attribute))
result[attribute] = attr
if self.fifo_queue:
for attribute in self.fifo_attributes:
attr = getattr(self, camelcase_to_underscores(attribute))
result[attribute] = attr
if self.kms_master_key_id:
for attribute in self.kms_attributes:
attr = getattr(self, camelcase_to_underscores(attribute))
result[attribute] = attr
if self.policy:
result['Policy'] = self.policy
if self.redrive_policy:
result['RedrivePolicy'] = json.dumps(self.redrive_policy)
for key in result:
if isinstance(result[key], bool):
result[key] = str(result[key]).lower()
return result
def url(self, request_url):
return "{0}://{1}/123456789012/{2}".format(request_url.scheme, request_url.netloc, self.name)
@property
def messages(self):
return [message for message in self._messages if message.visible and not message.delayed]
def add_message(self, message):
self._messages.append(message)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
return self.queue_arn
elif attribute_name == 'QueueName':
return self.name
raise UnformattedGetAttTemplateException()
class SQSBackend(BaseBackend):
def __init__(self, region_name):
self.region_name = region_name
self.queues = {}
super(SQSBackend, self).__init__()
def reset(self):
region_name = self.region_name
self._reset_model_refs()
self.__dict__ = {}
self.__init__(region_name)
def create_queue(self, name, **kwargs):
queue = self.queues.get(name)
if queue:
try:
kwargs.pop('region')
except KeyError:
pass
new_queue = Queue(name, region=self.region_name, **kwargs)
queue_attributes = queue.attributes
new_queue_attributes = new_queue.attributes
for key in ['CreatedTimestamp', 'LastModifiedTimestamp']:
queue_attributes.pop(key)
new_queue_attributes.pop(key)
if queue_attributes != new_queue_attributes:
raise QueueAlreadyExists("The specified queue already exists.")
else:
try:
kwargs.pop('region')
except KeyError:
pass
queue = Queue(name, region=self.region_name, **kwargs)
self.queues[name] = queue
return queue
def list_queues(self, queue_name_prefix):
re_str = '.*'
if queue_name_prefix:
re_str = '^{0}.*'.format(queue_name_prefix)
prefix_re = re.compile(re_str)
qs = []
for name, q in self.queues.items():
if prefix_re.search(name):
qs.append(q)
return qs
def get_queue(self, queue_name):
queue = self.queues.get(queue_name)
if queue is None:
raise QueueDoesNotExist()
return queue
def delete_queue(self, queue_name):
if queue_name in self.queues:
return self.queues.pop(queue_name)
return False
def set_queue_attributes(self, queue_name, attributes):
queue = self.get_queue(queue_name)
queue._set_attributes(attributes)
return queue
def send_message(self, queue_name, message_body, message_attributes=None, delay_seconds=None, deduplication_id=None, group_id=None):
queue = self.get_queue(queue_name)
if delay_seconds:
delay_seconds = int(delay_seconds)
else:
delay_seconds = queue.delay_seconds
message_id = get_random_message_id()
message = Message(message_id, message_body)
# Attributes, but not *message* attributes
if deduplication_id is not None:
message.deduplication_id = deduplication_id
if group_id is not None:
message.group_id = group_id
if message_attributes:
message.message_attributes = message_attributes
message.mark_sent(
delay_seconds=delay_seconds
)
queue.add_message(message)
return message
def receive_messages(self, queue_name, count, wait_seconds_timeout, visibility_timeout):
"""
Attempt to retrieve visible messages from a queue.
If a message was read by client and not deleted it is considered to be
"inflight" and cannot be read. We make attempts to obtain ``count``
messages but we may return less if messages are in-flight or there
are simple not enough messages in the queue.
:param string queue_name: The name of the queue to read from.
:param int count: The maximum amount of messages to retrieve.
:param int visibility_timeout: The number of seconds the message should remain invisible to other queue readers.
:param int wait_seconds_timeout: The duration (in seconds) for which the call waits for a message to arrive in
the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds
"""
queue = self.get_queue(queue_name)
result = []
previous_result_count = len(result)
polling_end = unix_time() + wait_seconds_timeout
# queue.messages only contains visible messages
while True:
if result or (wait_seconds_timeout and unix_time() > polling_end):
break
messages_to_dlq = []
for message in queue.messages:
if not message.visible:
continue
if message in queue.pending_messages:
# The message is pending but is visible again, so the
# consumer must have timed out.
queue.pending_messages.remove(message)
if message.group_id and queue.fifo_queue:
if message.group_id in queue.pending_message_groups:
# There is already one active message with the same
# group, so we cannot deliver this one.
continue
queue.pending_messages.add(message)
if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']:
messages_to_dlq.append(message)
continue
message.mark_received(
visibility_timeout=visibility_timeout
)
result.append(message)
if len(result) >= count:
break
for message in messages_to_dlq:
queue._messages.remove(message)
queue.dead_letter_queue.add_message(message)
if previous_result_count == len(result):
if wait_seconds_timeout == 0:
# There is timeout and we have added no additional results,
# so break to avoid an infinite loop.
break
import time
time.sleep(0.01)
continue
previous_result_count = len(result)
return result
def delete_message(self, queue_name, receipt_handle):
queue = self.get_queue(queue_name)
new_messages = []
for message in queue._messages:
# Only delete message if it is not visible and the reciept_handle
# matches.
if message.receipt_handle == receipt_handle:
queue.pending_messages.remove(message)
continue
new_messages.append(message)
queue._messages = new_messages
def change_message_visibility(self, queue_name, receipt_handle, visibility_timeout):
queue = self.get_queue(queue_name)
for message in queue._messages:
if message.receipt_handle == receipt_handle:
if message.visible:
raise MessageNotInflight
message.change_visibility(visibility_timeout)
if message.visible:
# If the message is visible again, remove it from pending
# messages.
queue.pending_messages.remove(message)
return
raise ReceiptHandleIsInvalid
def purge_queue(self, queue_name):
queue = self.get_queue(queue_name)
queue._messages = []
def list_dead_letter_source_queues(self, queue_name):
dlq = self.get_queue(queue_name)
queues = []
for queue in self.queues.values():
if queue.dead_letter_queue is dlq:
queues.append(queue)
return queues
def add_permission(self, queue_name, actions, account_ids, label):
queue = self.get_queue(queue_name)
if actions is None or len(actions) == 0:
raise RESTError('InvalidParameterValue', 'Need at least one Action')
if account_ids is None or len(account_ids) == 0:
raise RESTError('InvalidParameterValue', 'Need at least one Account ID')
if not all([item in Queue.ALLOWED_PERMISSIONS for item in actions]):
raise RESTError('InvalidParameterValue', 'Invalid permissions')
queue.permissions[label] = (account_ids, actions)
def remove_permission(self, queue_name, label):
queue = self.get_queue(queue_name)
if label not in queue.permissions:
raise RESTError('InvalidParameterValue', 'Permission doesnt exist for the given label')
del queue.permissions[label]
def tag_queue(self, queue_name, tags):
queue = self.get_queue(queue_name)
queue.tags.update(tags)
def untag_queue(self, queue_name, tag_keys):
queue = self.get_queue(queue_name)
for key in tag_keys:
try:
del queue.tags[key]
except KeyError:
pass
sqs_backends = {}
for region in boto.sqs.regions():
sqs_backends[region.name] = SQSBackend(region.name)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pooling layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import backend
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.utils import conv_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import tf_export
class Pooling1D(Layer):
"""Pooling layer for arbitrary pooling functions, for 1D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format=None,
name=None, **kwargs):
super(Pooling1D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=3)
def call(self, inputs):
# There is no TF op for 1D pooling, hence we make the inputs 4D.
if self.data_format == 'channels_last':
# input is NWC, make it NHWC
inputs = array_ops.expand_dims(inputs, 1)
# pool on the W dim
pool_shape = (1, 1) + self.pool_size + (1,)
strides = (1, 1) + self.strides + (1,)
data_format = 'NHWC'
else:
# input is NCW, make it NCHW
inputs = array_ops.expand_dims(inputs, 2)
# pool on the W dim
pool_shape = (1, 1, 1) + self.pool_size
strides = (1, 1, 1) + self.strides
data_format = 'NCHW'
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=data_format)
if self.data_format == 'channels_last':
return array_ops.squeeze(outputs, 1)
else:
return array_ops.squeeze(outputs, 2)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
length = conv_utils.conv_output_length(input_shape[1], self.pool_size[0],
self.padding, self.strides[0])
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def get_config(self):
config = {
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding
}
base_config = super(Pooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.MaxPool1D', 'keras.layers.MaxPooling1D')
class MaxPooling1D(Pooling1D):
"""Max pooling operation for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, downsampled_steps, features)`.
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format=None, **kwargs):
super(MaxPooling1D, self).__init__(
nn.max_pool,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
@tf_export('keras.layers.AveragePooling1D', 'keras.layers.AvgPool1D')
class AveragePooling1D(Pooling1D):
"""Average pooling for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, downsampled_steps, features)`.
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format=None, **kwargs):
super(AveragePooling1D, self).__init__(
nn.avg_pool,
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
class Pooling2D(Layer):
"""Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images).
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format=None,
name=None, **kwargs):
super(Pooling2D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def call(self, inputs):
if self.data_format == 'channels_last':
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 4))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
else:
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.MaxPool2D', 'keras.layers.MaxPooling2D')
class MaxPooling2D(Pooling2D):
"""Max pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling2D, self).__init__(
nn.max_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@tf_export('keras.layers.AveragePooling2D', 'keras.layers.AvgPool2D')
class AveragePooling2D(Pooling2D):
"""Average pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling2D, self).__init__(
nn.avg_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class Pooling3D(Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling3D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == 'channels_first':
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper())
if self.data_format == 'channels_first':
outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(len_dim1, self.pool_size[0],
self.padding, self.strides[0])
len_dim2 = conv_utils.conv_output_length(len_dim2, self.pool_size[1],
self.padding, self.strides[1])
len_dim3 = conv_utils.conv_output_length(len_dim3, self.pool_size[2],
self.padding, self.strides[2])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3])
else:
return tensor_shape.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.MaxPool3D', 'keras.layers.MaxPooling3D')
class MaxPooling3D(Pooling3D):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling3D, self).__init__(
nn.max_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@tf_export('keras.layers.AveragePooling3D', 'keras.layers.AvgPool3D')
class AveragePooling3D(Pooling3D):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling3D, self).__init__(
nn.avg_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class GlobalPooling1D(Layer):
"""Abstract class for different global pooling 1D layers.
"""
def __init__(self, **kwargs):
super(GlobalPooling1D, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], input_shape[2]])
def call(self, inputs):
raise NotImplementedError
@tf_export('keras.layers.GlobalAveragePooling1D',
'keras.layers.GlobalAvgPool1D')
class GlobalAveragePooling1D(GlobalPooling1D):
"""Global average pooling operation for temporal data.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
2D tensor with shape:
`(batch_size, features)`
"""
def call(self, inputs):
return backend.mean(inputs, axis=1)
@tf_export('keras.layers.GlobalMaxPool1D', 'keras.layers.GlobalMaxPooling1D')
class GlobalMaxPooling1D(GlobalPooling1D):
"""Global max pooling operation for temporal data.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
2D tensor with shape:
`(batch_size, features)`
"""
def call(self, inputs):
return backend.max(inputs, axis=1)
class GlobalPooling2D(Layer):
"""Abstract class for different global pooling 2D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(GlobalPooling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[3]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.GlobalAveragePooling2D',
'keras.layers.GlobalAvgPool2D')
class GlobalAveragePooling2D(GlobalPooling2D):
"""Global average pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2])
else:
return backend.mean(inputs, axis=[2, 3])
@tf_export('keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPooling2D')
class GlobalMaxPooling2D(GlobalPooling2D):
"""Global max pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2])
else:
return backend.max(inputs, axis=[2, 3])
class GlobalPooling3D(Layer):
"""Abstract class for different global pooling 3D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(GlobalPooling3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[4]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.GlobalAveragePooling3D',
'keras.layers.GlobalAvgPool3D')
class GlobalAveragePooling3D(GlobalPooling3D):
"""Global Average pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2, 3])
else:
return backend.mean(inputs, axis=[2, 3, 4])
@tf_export('keras.layers.GlobalMaxPool3D', 'keras.layers.GlobalMaxPooling3D')
class GlobalMaxPooling3D(GlobalPooling3D):
"""Global Max pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2, 3])
else:
return backend.max(inputs, axis=[2, 3, 4])
# Aliases
AvgPool1D = AveragePooling1D
MaxPool1D = MaxPooling1D
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
AvgPool3D = AveragePooling3D
MaxPool3D = MaxPooling3D
GlobalMaxPool1D = GlobalMaxPooling1D
GlobalMaxPool2D = GlobalMaxPooling2D
GlobalMaxPool3D = GlobalMaxPooling3D
GlobalAvgPool1D = GlobalAveragePooling1D
GlobalAvgPool2D = GlobalAveragePooling2D
GlobalAvgPool3D = GlobalAveragePooling3D
| |
from __future__ import absolute_import
from django.utils import timezone
from sentry.models import (
OnboardingTask, OnboardingTaskStatus, OrganizationOnboardingTask, OrganizationOption
)
from sentry.signals import (
event_processed,
project_created,
first_event_pending,
first_event_received,
member_invited,
member_joined,
plugin_enabled,
issue_tracker_used,
)
from sentry.plugins import IssueTrackingPlugin, NotificationPlugin
from sentry.testutils import TestCase
class OrganizationOnboardingTaskTest(TestCase):
def test_no_existing_task(self):
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project(first_event=now)
first_event_received.send(project=project, group=self.group, sender=type(project))
task = OrganizationOnboardingTask.objects.get(
organization=project.organization, task=OnboardingTask.FIRST_EVENT
)
assert task.status == OnboardingTaskStatus.COMPLETE
assert task.project_id == project.id
assert task.date_completed == project.first_event
def test_existing_pending_task(self):
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project(first_event=now)
first_event_pending.send(project=project, user=self.user, sender=type(project))
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_EVENT,
)
assert task.status == OnboardingTaskStatus.PENDING
assert task.project_id == project.id
first_event_received.send(project=project, group=self.group, sender=type(project))
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_EVENT,
)
assert task.status == OnboardingTaskStatus.COMPLETE
assert task.project_id == project.id
assert task.date_completed == project.first_event
def test_existing_complete_task(self):
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project(first_event=now)
task = OrganizationOnboardingTask.objects.create(
organization=project.organization,
task=OnboardingTask.FIRST_PROJECT,
status=OnboardingTaskStatus.COMPLETE,
)
first_event_received.send(project=project, group=self.group, sender=type(project))
task = OrganizationOnboardingTask.objects.get(id=task.id)
assert task.status == OnboardingTaskStatus.COMPLETE
assert not task.project_id
# Tests on the receivers
def test_event_processed(self):
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project(first_event=now)
event = self.create_full_event()
event_processed.send(project=project, group=self.group, event=event, sender=type(project))
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.RELEASE_TRACKING,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.USER_CONTEXT,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.SOURCEMAPS,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_project_created(self):
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=type(project))
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_PROJECT,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_first_event_pending(self):
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project(first_event=now)
first_event_pending.send(project=project, user=self.user, sender=type(project))
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_EVENT,
status=OnboardingTaskStatus.PENDING,
)
assert task is not None
def test_first_event_received(self):
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
project = self.create_project(first_event=now)
project_created.send(project=project, user=self.user, sender=type(project))
group = self.create_group(
project=project, platform='javascript', message='javascript error message'
)
first_event_received.send(project=project, group=group, sender=type(project))
task = OrganizationOnboardingTask.objects.get(
organization=project.organization,
task=OnboardingTask.FIRST_EVENT,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
assert 'platform' in task.data
assert task.data['platform'] == 'javascript'
second_project = self.create_project(first_event=now)
project_created.send(project=second_project, user=self.user, sender=type(second_project))
second_task = OrganizationOnboardingTask.objects.get(
organization=second_project.organization,
task=OnboardingTask.SECOND_PLATFORM,
status=OnboardingTaskStatus.PENDING,
)
assert second_task is not None
second_group = self.create_group(
project=second_project, platform='python', message='python error message'
)
first_event_received.send(
project=second_project, group=second_group, sender=type(second_project)
)
second_task = OrganizationOnboardingTask.objects.get(
organization=second_project.organization,
task=OnboardingTask.SECOND_PLATFORM,
status=OnboardingTaskStatus.COMPLETE,
)
assert second_task is not None
assert 'platform' in second_task.data
assert second_task.data['platform'] == 'python'
assert task.data['platform'] != second_task.data['platform']
def test_member_invited(self):
user = self.create_user(email='test@example.org')
member = self.create_member(organization=self.organization, teams=[self.team], user=user)
member_invited.send(member=member, user=user, sender=type(member))
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.INVITE_MEMBER,
status=OnboardingTaskStatus.PENDING,
)
assert task is not None
def test_member_joined(self):
user = self.create_user(email='test@example.org')
member = self.create_member(organization=self.organization, teams=[self.team], user=user)
member_joined.send(member=member, sender=type(member))
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.INVITE_MEMBER,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
user2 = self.create_user(email='test@example.com')
member2 = self.create_member(organization=self.organization, teams=[self.team], user=user2)
member_joined.send(member=member2, sender=type(member2))
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.INVITE_MEMBER,
status=OnboardingTaskStatus.COMPLETE,
)
assert task.data['invited_member_id'] == member.id
def test_issue_tracker_onboarding(self):
plugin_enabled.send(
plugin=IssueTrackingPlugin(),
project=self.project,
user=self.user,
sender=type(IssueTrackingPlugin)
)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.ISSUE_TRACKER,
status=OnboardingTaskStatus.PENDING,
)
assert task is not None
issue_tracker_used.send(
plugin=IssueTrackingPlugin(),
project=self.project,
user=self.user,
sender=type(IssueTrackingPlugin)
)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.ISSUE_TRACKER,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_notification_added(self):
plugin_enabled.send(
plugin=NotificationPlugin(),
project=self.project,
user=self.user,
sender=type(NotificationPlugin)
)
task = OrganizationOnboardingTask.objects.get(
organization=self.organization,
task=OnboardingTask.NOTIFICATION_SERVICE,
status=OnboardingTaskStatus.COMPLETE,
)
assert task is not None
def test_onboarding_complete(self):
# Drop microsecond value for MySQL
now = timezone.now().replace(microsecond=0)
user = self.create_user(email='test@example.org')
project = self.create_project(first_event=now)
second_project = self.create_project(first_event=now)
second_group = self.create_group(
project=second_project, platform='python', message='python error message'
)
event = self.create_full_event()
member = self.create_member(organization=self.organization, teams=[self.team], user=user)
event_processed.send(project=project, group=self.group, event=event, sender=type(project))
project_created.send(project=project, user=user, sender=type(project))
project_created.send(project=second_project, user=user, sender=type(second_project))
first_event_received.send(project=project, group=self.group, sender=type(project))
first_event_received.send(
project=second_project, group=second_group, sender=type(second_project)
)
member_joined.send(member=member, sender=type(member))
plugin_enabled.send(
plugin=IssueTrackingPlugin(),
project=project,
user=user,
sender=type(IssueTrackingPlugin)
)
issue_tracker_used.send(
plugin=IssueTrackingPlugin(),
project=project,
user=user,
sender=type(IssueTrackingPlugin)
)
plugin_enabled.send(
plugin=NotificationPlugin(),
project=project,
user=user,
sender=type(NotificationPlugin)
)
assert OrganizationOption.objects.filter(
organization=self.organization, key="onboarding:complete"
).count() == 1
| |
# Send print to logger
import scriptoutputwriter
'''
Type testing
https://docs.python.org/2/library/types.html
Types not tested yet:
types.CodeType
types.MethodType
types.UnboundMethodType
types.BuiltinFunctionType
types.BuiltinMethodType
types.ModuleType
types.FileType
types.XRangeType
types.SliceType
types.EllipsisType
types.TracebackType
types.FrameType
types.BufferType
types.DictProxyType
types.NotImplementedType
types.GetSetDescriptorType
types.MemberDescriptorType
types.StringTypes
'''
class OldCallableClassTest:
def __call__( self, value ):
return "Callable class test " + value
class NewCallableClassTest( object ):
def __call__( self, value ):
return "Callable class test " + value
class DescriptorTest( object ):
def __init__( self, value ):
self.value = value
def __get__( self, obj, objtype ):
return self.value
def __set__( self, obj, value ):
self.value = value
def firstn(n):
'''Generator test'''
num = 0
while num < n:
yield num
num += 1
class ValueObjectTest( object ):
'''
Test object for reflected property paths.
The reflection system can get a path for "childTest.tupleTest[0]" only if
the value type is a Python object.
Basic types like int and string do not have path info stored on them.
'''
def __init__( self, value ):
self.value = value
class ChildObjectTest( object ):
def __init__( self ):
self.stringTest = "Child"
self.tupleTest = (ValueObjectTest( 0 ),
ValueObjectTest( 1 ),
ValueObjectTest( 2 ),
ValueObjectTest( 3 ) )
self.listTest = [ValueObjectTest( 0 ),
ValueObjectTest( 1 ),
ValueObjectTest( 2 ),
ValueObjectTest( 3 )]
self.dictTest = {ValueObjectTest( 'Bacon' ) : ValueObjectTest( 0 )}
class BadComparison( object ):
def __cmp__( self, other ):
raise Exception( "Bad comparison" )
class OldClassTest:
'''Test of old-style classes'''
'''
Properties exposed to GUI.
In the format "attribute name" : "meta data name"
'''
_metaData = {
"floatTest" : "MetaSlider",
}
# Enable for testing
#def __setattr__( self, name, value ):
# '''
# Hook for notifying the GUI
# '''
# print "setattr", self, name
# self.__dict__[ name ] = value
# Enable for testing
#def __delattr__( self, name ):
# '''
# Hook for notifying the GUI
# '''
# print "delattr", self, name
# del object.name
classIntTest = 1
def __init__( self ):
self.noneTest = None
self.boolTest = True
self.intTest = 1
self.longTest = 1L
self.floatTest = 1.0
#self.complexTest = 1.0j
self.stringTest = "Spam"
self.unicodeTest = u"Spam"
self.childTest = ChildObjectTest()
self.tupleTest = (1, 2, 3, "Spam")
self.listTest = [0, 1, 2, 3]
self.dictTest = {'Bacon': 1, 'Ham': 0}
self.functionTest1 = \
lambda testString: "Function test " + testString
self.functionTest2 = OldCallableClassTest()
self.functionTest3 = NewCallableClassTest()
#self.generatorTest = firstn
self.badComparison = BadComparison()
# Old-style classes only
self.typeTest1 = type( OldClassTest )
self.typeTest2 = type( self.typeTest1 )
self.classTest1 = OldClassTest
self.classTest2 = self.__class__
self.instanceTest = type( self )
def methodTest( self, testString ):
return "Method test " + testString
@classmethod
def classMethodTest( cls, testString ):
return "Class method test " + testString
@staticmethod
def staticMethodTest( testString ):
return "Static method test " + testString
class ConstructorTest1:
def __init__( self, value ):
self.constructorTest = "Constructor class test " + value
class ConstructorTest2:
pass
def updateValues( self ):
OldClassTest.classIntTest = OldClassTest.classIntTest + 1
self.noneTest = None
self.boolTest = not self.boolTest
self.intTest = self.intTest + 1
self.longTest = self.longTest + 1
self.floatTest = self.floatTest + 1.0
self.stringTest = "Spam" + repr( self.intTest )
self.unicodeTest = u"Spam" + repr( self.intTest )
class NewClassTest( object ):
'''Test of new-style classes'''
'''
Properties exposed to GUI.
In the format "attribute name" : "meta data name"
'''
_metaData = {
"floatTest" : "MetaSlider",
"readOnlyPropertyTest1" : "MetaReadOnly",
"readOnlyPropertyTest2" : "MetaReadOnly",
}
# Enable for testing
#def __setattr__( self, name, value ):
# '''
# Hook for notifying the GUI
# Note: descriptors will not be caught by this hook.
# '''
# print "setattr", self, name
# super( NewClassTest, self ).__setattr__( name, value )
# Enable for testing
#def __delattr__( self, name ):
# '''
# Hook for notifying the GUI
# Note: descriptors will not be caught by this hook.
# '''
# print "delattr", self, name
# del object.name
classIntTest = 1
def __init__( self ):
self.noneTest = None
self.boolTest = True
self.intTest = 1
self.longTest = 1L
self.floatTest = 1.0
#self.complexTest = 1.0j
self.stringTest = "Spam"
self.unicodeTest = u"Spam"
self.childTest = ChildObjectTest()
self.tupleTest = (1, 2, 3, "Spam")
self.listTest = [0, 1, 2, 3]
self.dictTest = {'Bacon': 1, 'Ham': 0}
self.functionTest1 = \
lambda testString: "Function test " + testString
self.functionTest2 = OldCallableClassTest()
self.functionTest3 = NewCallableClassTest()
#self.generatorTest = firstn
self.badComparison = BadComparison()
# New-style classes only
self.typeTest1 = type( NewClassTest )
self.typeTest2 = type( self.typeTest1 )
self.classTest1 = NewClassTest
self.classTest2 = self.__class__
self.instanceTest = type( self )
self.propertyTest1_ = "Read-only Property"
self.propertyTest2_ = "Read-only Property"
self.descriptorTest = DescriptorTest( "Descriptor property" )
def methodTest( self, testString ):
return "Method test " + testString
def getReadOnlyPropertyTest1( self ):
'''Only works for new-style classes'''
return self.propertyTest1_
readOnlyPropertyTest1 = property( getReadOnlyPropertyTest1 )
@property
def readOnlyPropertyTest2( self ):
'''Only works for new-style classes'''
return self.propertyTest2_
@classmethod
def classMethodTest( cls, testString ):
return "Class method test " + testString
@staticmethod
def staticMethodTest( testString ):
return "Static method test " + testString
class ConstructorTest1( object ):
def __init__( self, value ):
self.constructorTest = "Constructor class test " + value
class ConstructorTest2( object ):
pass
def updateValues( self ):
NewClassTest.classIntTest = NewClassTest.classIntTest + 1
self.noneTest = None
self.boolTest = not self.boolTest
self.intTest = self.intTest + 1
self.longTest = self.longTest + 1
self.floatTest = self.floatTest + 1.0
self.stringTest = "Spam" + repr( self.intTest )
self.unicodeTest = u"Spam" + repr( self.intTest )
oldStyleObject = OldClassTest()
newStyleObject = NewClassTest()
| |
"""
Support for interfacing to the Logitech SqueezeBox API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.squeezebox/
"""
import logging
import asyncio
import urllib.parse
import json
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_ENQUEUE, SUPPORT_PLAY_MEDIA,
MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, PLATFORM_SCHEMA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_PLAY, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, STATE_IDLE, STATE_OFF,
STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN, CONF_PORT)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 9000
TIMEOUT = 10
SUPPORT_SQUEEZEBOX = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_SEEK | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA | \
SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the squeezebox platform."""
import socket
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
if discovery_info is not None:
host = discovery_info.get("host")
port = discovery_info.get("port")
else:
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
# In case the port is not discovered
if port is None:
port = DEFAULT_PORT
# Get IP of host, to prevent duplication of same host (different DNS names)
try:
ipaddr = socket.gethostbyname(host)
except (OSError) as error:
_LOGGER.error(
"Could not communicate with %s:%d: %s", host, port, error)
return False
_LOGGER.debug("Creating LMS object for %s", ipaddr)
lms = LogitechMediaServer(hass, host, port, username, password)
players = yield from lms.create_players()
async_add_devices(players)
return True
class LogitechMediaServer(object):
"""Representation of a Logitech media server."""
def __init__(self, hass, host, port, username, password):
"""Initialize the Logitech device."""
self.hass = hass
self.host = host
self.port = port
self._username = username
self._password = password
@asyncio.coroutine
def create_players(self):
"""Create a list of devices connected to LMS."""
result = []
data = yield from self.async_query('players', 'status')
for players in data.get('players_loop', []):
player = SqueezeBoxDevice(
self, players['playerid'], players['name'])
yield from player.async_update()
result.append(player)
return result
@asyncio.coroutine
def async_query(self, *command, player=""):
"""Abstract out the JSON-RPC connection."""
auth = None if self._username is None else aiohttp.BasicAuth(
self._username, self._password)
url = "http://{}:{}/jsonrpc.js".format(
self.host, self.port)
data = json.dumps({
"id": "1",
"method": "slim.request",
"params": [player, command]
})
_LOGGER.debug("URL: %s Data: %s", url, data)
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(TIMEOUT, loop=self.hass.loop):
response = yield from websession.post(
url,
data=data,
auth=auth)
if response.status != 200:
_LOGGER.error(
"Query failed, response code: %s Full message: %s",
response.status, response)
return False
data = yield from response.json()
except (asyncio.TimeoutError, aiohttp.ClientError) as error:
_LOGGER.error("Failed communicating with LMS: %s", type(error))
return False
try:
return data['result']
except AttributeError:
_LOGGER.error("Received invalid response: %s", data)
return False
class SqueezeBoxDevice(MediaPlayerDevice):
"""Representation of a SqueezeBox device."""
def __init__(self, lms, player_id, name):
"""Initialize the SqueezeBox device."""
super(SqueezeBoxDevice, self).__init__()
self._lms = lms
self._id = player_id
self._status = {}
self._name = name
_LOGGER.debug("Creating SqueezeBox object: %s, %s", name, player_id)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return an unique ID."""
return self._id
@property
def state(self):
"""Return the state of the device."""
if 'power' in self._status and self._status['power'] == 0:
return STATE_OFF
if 'mode' in self._status:
if self._status['mode'] == 'pause':
return STATE_PAUSED
if self._status['mode'] == 'play':
return STATE_PLAYING
if self._status['mode'] == 'stop':
return STATE_IDLE
return STATE_UNKNOWN
def async_query(self, *parameters):
"""Send a command to the LMS.
This method must be run in the event loop and returns a coroutine.
"""
return self._lms.async_query(
*parameters, player=self._id)
@asyncio.coroutine
def async_update(self):
"""Retrieve the current state of the player."""
tags = 'adKl'
response = yield from self.async_query(
"status", "-", "1", "tags:{tags}"
.format(tags=tags))
if response is False:
return
self._status = response.copy()
try:
self._status.update(response["playlist_loop"][0])
except KeyError:
pass
try:
self._status.update(response["remoteMeta"])
except KeyError:
pass
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if 'mixer volume' in self._status:
return int(float(self._status['mixer volume'])) / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
if 'mixer volume' in self._status:
return str(self._status['mixer volume']).startswith('-')
@property
def media_content_id(self):
"""Content ID of current playing media."""
if 'current_title' in self._status:
return self._status['current_title']
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if 'duration' in self._status:
return int(float(self._status['duration']))
@property
def media_image_url(self):
"""Image url of current playing media."""
if 'artwork_url' in self._status:
media_url = self._status['artwork_url']
elif 'id' in self._status:
media_url = ('/music/{track_id}/cover.jpg').format(
track_id=self._status['id'])
else:
media_url = ('/music/current/cover.jpg?player={player}').format(
player=self._id)
# pylint: disable=protected-access
if self._lms._username:
base_url = 'http://{username}:{password}@{server}:{port}/'.format(
username=self._lms._username,
password=self._lms._password,
server=self._lms.host,
port=self._lms.port)
else:
base_url = 'http://{server}:{port}/'.format(
server=self._lms.host,
port=self._lms.port)
url = urllib.parse.urljoin(base_url, media_url)
return url
@property
def media_title(self):
"""Title of current playing media."""
if 'title' in self._status:
return self._status['title']
if 'current_title' in self._status:
return self._status['current_title']
@property
def media_artist(self):
"""Artist of current playing media."""
if 'artist' in self._status:
return self._status['artist']
@property
def media_album_name(self):
"""Album of current playing media."""
if 'album' in self._status:
return self._status['album']
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SQUEEZEBOX
def async_turn_off(self):
"""Turn off media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('power', '0')
def async_volume_up(self):
"""Volume up media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('mixer', 'volume', '+5')
def async_volume_down(self):
"""Volume down media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('mixer', 'volume', '-5')
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
volume_percent = str(int(volume*100))
return self.async_query('mixer', 'volume', volume_percent)
def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player.
This method must be run in the event loop and returns a coroutine.
"""
mute_numeric = '1' if mute else '0'
return self.async_query('mixer', 'muting', mute_numeric)
def async_media_play_pause(self):
"""Send pause command to media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('pause')
def async_media_play(self):
"""Send play command to media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('play')
def async_media_pause(self):
"""Send pause command to media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('pause', '1')
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('playlist', 'index', '+1')
def async_media_previous_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('playlist', 'index', '-1')
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('time', position)
def async_turn_on(self):
"""Turn the media player on.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_query('power', '1')
def async_play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the current playlist.
This method must be run in the event loop and returns a coroutine.
"""
if kwargs.get(ATTR_MEDIA_ENQUEUE):
return self._add_uri_to_playlist(media_id)
return self._play_uri(media_id)
def _play_uri(self, media_id):
"""Replace the current play list with the uri."""
return self.async_query('playlist', 'play', media_id)
def _add_uri_to_playlist(self, media_id):
"""Add a items to the existing playlist."""
return self.async_query('playlist', 'add', media_id)
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes.
"""
import collections
import datetime
import functools
from oslo.config import cfg
import six
from cinder import context
from cinder.db import base
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import glance
from cinder import keymgr
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume import qos_specs
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
az_cache_time_opt = cfg.IntOpt('az_cache_duration',
default=3600,
help='Cache volume availability zones in '
'memory for the provided duration in '
'seconds')
CONF = cfg.CONF
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.register_opt(az_cache_time_opt)
CONF.import_opt('glance_core_properties', 'cinder.image.glance')
CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zones = []
self.availability_zones_last_fetched = None
self.key_manager = keymgr.API()
super(API, self).__init__(db_driver)
def list_availability_zones(self, enable_cache=False):
"""Describe the known availability zones
:retval list of dicts, each with a 'name' and 'available' key
"""
refresh_cache = False
if enable_cache:
if self.availability_zones_last_fetched is None:
refresh_cache = True
else:
cache_age = timeutils.delta_seconds(
self.availability_zones_last_fetched,
timeutils.utcnow())
if cache_age >= CONF.az_cache_duration:
refresh_cache = True
if refresh_cache or not enable_cache:
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
az_data = [(s['availability_zone'], s['disabled'])
for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
if refresh_cache:
now = timeutils.utcnow()
self.availability_zones = azs
self.availability_zones_last_fetched = now
LOG.debug("Availability zone cache updated, next update will"
" occur around %s", now + datetime.timedelta(
seconds=CONF.az_cache_duration))
else:
azs = self.availability_zones
return tuple(azs)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None, backup_source_volume=None,
source_replica=None, consistencygroup=None,shareable=False):
# NOTE(jdg): we can have a create without size if we're
# doing a create from snap or volume. Currently
# the taskflow api will handle this and pull in the
# size from the source.
# NOTE(jdg): cinderclient sends in a string representation
# of the size value. BUT there is a possbility that somebody
# could call the API directly so the is_int_like check
# handles both cases (string representation or true float or int).
if size and (not utils.is_int_like(size) or int(size) <= 0):
msg = _('Invalid volume size provided for create request '
'(size argument must be an integer (or string '
'represenation or an integer) and greater '
'than zero).')
raise exception.InvalidInput(reason=msg)
if consistencygroup:
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a consistency group.")
raise exception.InvalidInput(reason=msg)
cg_voltypeids = consistencygroup.get('volume_type_id')
if volume_type.get('id') not in cg_voltypeids:
msg = _("Invalid volume_type provided (requested type "
"must be supported by this consistency group).")
raise exception.InvalidInput(reason=msg)
if source_volume and volume_type:
if volume_type['id'] != source_volume['volume_type_id']:
msg = _("Invalid volume_type provided (requested type "
"must match source volume, or be omitted). "
"You should omit the argument.")
raise exception.InvalidInput(reason=msg)
# When cloning replica (for testing), volume type must be omitted
if source_replica and volume_type:
msg = _("No volume_type should be provided when creating test "
"replica, type must be omitted.")
raise exception.InvalidInput(reason=msg)
'''if snapshot and volume_type:
if volume_type['id'] != snapshot['volume_type_id']:
msg = _("Invalid volume_type provided (requested type "
"must match source snapshot, or be omitted). "
"You should omit the argument.")
raise exception.InvalidInput(reason=msg)'''
# Determine the valid availability zones that the volume could be
# created in (a task in the flow will/can use this information to
# ensure that the availability zone requested is valid).
raw_zones = self.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata,
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'backup_source_volume': backup_source_volume,
'source_replica': source_replica,
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup,
'shareable': shareable,
}
try:
flow_engine = create_volume.get_flow(self.scheduler_rpcapi,
self.volume_rpcapi,
self.db,
self.image_service,
availability_zones,
create_what)
except Exception:
LOG.exception(_("Failed to create api volume flow"))
raise exception.CinderException(
_("Failed to create api volume flow"))
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinders debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return flow_engine.storage.fetch('volume')
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume['project_id']:
project_id = volume['project_id']
else:
project_id = context.project_id
volume_id = volume['id']
if not volume['host']:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update quota for deleting volume"))
self.db.volume_destroy(context.elevated(), volume_id)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
return
if not force and volume['status'] not in ["available", "error",
"error_restoring",
"error_extending"]:
msg = _("Volume status must be available or error, "
"but current status is: %s") % volume['status']
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
if len(snapshots):
msg = _("Volume still has %d dependent snapshots") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
self.key_manager.delete_key(context, encryption_key_id)
now = timeutils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
@wrap_check_policy
def update(self, context, volume, fields):
self.db.volume_update(context, volume['id'], fields)
def get(self, context, volume_id, viewable_admin_meta=False):
old_ctxt = context.deepcopy()
if viewable_admin_meta:
ctxt = context.elevated()
else:
ctxt = context
rv = self.db.volume_get(ctxt, volume_id)
volume = dict(rv.iteritems())
try:
check_policy(old_ctxt, 'get', volume)
except exception.PolicyNotAuthorized:
# raise VolumeNotFound instead to make sure Cinder behaves
# as it used to
raise exception.VolumeNotFound(volume_id=volume_id)
return volume
def _get_all_tenants_value(self, filters):
"""Returns a Boolean for the value of filters['all_tenants'].
False is returned if 'all_tenants' is not in the filters dictionary.
An InvalidInput exception is thrown for invalid values.
"""
b = False
if 'all_tenants' in filters:
val = six.text_type(filters['all_tenants']).lower()
if val in ['true', '1']:
b = True
elif val in ['false', '0']:
b = False
else:
msg = _('all_tenants param must be 0 or 1')
raise exception.InvalidInput(reason=msg)
return b
def get_all(self, context, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None, viewable_admin_meta=False):
check_policy(context, 'get_all')
if filters is None:
filters = {}
allTenants = self._get_all_tenants_value(filters)
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug("Searching by: %s" % six.text_type(filters))
if context.is_admin and allTenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = self.db.volume_get_all(context, marker, limit, sort_key,
sort_dir, filters=filters)
else:
if viewable_admin_meta:
context = context.elevated()
volumes = self.db.volume_get_all_by_project(context,
context.project_id,
marker, limit,
sort_key, sort_dir,
filters=filters)
return volumes
def get_snapshot(self, context, snapshot_id):
check_policy(context, 'get_snapshot')
rv = self.db.snapshot_get(context, snapshot_id)
return dict(rv.iteritems())
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
rv = self.db.volume_get(context, volume_id)
return dict(rv.iteritems())
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = self.db.snapshot_get_all(context)
else:
snapshots = self.db.snapshot_get_all_by_project(
context, context.project_id)
if search_opts:
LOG.debug("Searching by: %s" % search_opts)
results = []
not_found = object()
for snapshot in snapshots:
for opt, value in search_opts.iteritems():
if snapshot.get(opt, not_found) != value:
break
else:
results.append(snapshot)
snapshots = results
return snapshots
@wrap_check_policy
def reserve_volume(self, context, volume):
#NOTE(jdg): check for Race condition bug 1096983
#explicitly get updated ref and check
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
elif volume['status'] == 'in-use':
if volume['shareable']:
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume status must be shareable to reserve again.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
else:
msg = _("Volume status must be available to reserve")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def unreserve_volume(self, context, volume):
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'attaching':
attaches = self.db.volume_attachment_get_used_by_volume_id(
context, volume['id'])
if attaches:
self.update(context, volume, {"status": "in-use"})
else:
self.update(context, volume, {"status": "available"})
@wrap_check_policy
def begin_detaching(self, context, volume):
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if volume['migration_status']:
return
if (volume['status'] != 'in-use' or
volume['attach_status'] != 'attached'):
msg = (_("Unable to detach volume. Volume status must be 'in-use' "
"and attach_status must be 'attached' to detach. "
"Currently: status: '%(status)s', "
"attach_status: '%(attach_status)s'") %
{'status': volume['status'],
'attach_status': volume['attach_status']})
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.update(context, volume, {"status": "detaching"})
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
return self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
@wrap_check_policy
def detach(self, context, volume, attachment_id):
return self.volume_rpcapi.detach_volume(context, volume,
attachment_id)
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
return self.volume_rpcapi.initialize_connection(context,
volume,
connector)
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
return self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
return self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None,
cgsnapshot_id=None):
snapshot = self.create_snapshot_in_db(
context, volume, name,
description, force, metadata, cgsnapshot_id)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot_in_db(self, context,
volume, name, description,
force, metadata,
cgsnapshot_id):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating")
raise exception.InvalidVolume(reason=msg)
if volume['status'].startswith('replica_'):
# Can't snapshot secondary replica
msg = _("Snapshot of secondary replica is not allowed.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("must be available")
raise exception.InvalidVolume(reason=msg)
# Shareable volume that attached to multiple instances cannot create snapshot
if volume['shareable']:
attachments = self.db.volume_attachment_get_used_by_volume_id(
context, volume['id'])
if len(attachments) > 1:
msg = _("Shareable volume that attached "
"multiple instances cannot create snapshot.")
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(metadata)
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata}
snapshot = None
try:
snapshot = self.db.snapshot_create(context, options)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if snapshot:
self.db.snapshot_destroy(context, snapshot['id'])
finally:
QUOTAS.rollback(context, reservations)
return snapshot
def create_snapshots_in_db(self, context,
volume_list,
name, description,
force, cgsnapshot_id):
snapshot_list = []
for volume in volume_list:
self._create_snapshot_in_db_validate(context, volume, force)
reservations = self._create_snapshots_in_db_reserve(
context, volume_list)
options_list = []
for volume in volume_list:
options = self._create_snapshot_in_db_options(
context, volume, name, description, cgsnapshot_id)
options_list.append(options)
try:
for options in options_list:
snapshot = self.db.snapshot_create(context, options)
snapshot_list.append(snapshot)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
for snap in snapshot_list:
self.db.snapshot_destroy(context, snap['id'])
finally:
QUOTAS.rollback(context, reservations)
return snapshot_list
def _create_snapshot_in_db_validate(self, context, volume, force):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Snapshot cannot be created because volume '%s' is not "
"available.") % volume['id']
raise exception.InvalidVolume(reason=msg)
def _create_snapshots_in_db_reserve(self, context, volume_list):
reserve_opts_list = []
total_reserve_opts = {}
try:
for volume in volume_list:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1,
'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reserve_opts_list.append(reserve_opts)
for reserve_opts in reserve_opts_list:
for (key, value) in reserve_opts.items():
if key not in total_reserve_opts.keys():
total_reserve_opts[key] = value
else:
total_reserve_opts[key] = \
total_reserve_opts[key] + value
reservations = QUOTAS.reserve(context, **total_reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warning(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed)")
LOG.warning(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
return reservations
def _create_snapshot_in_db_options(self, context, volume,
name, description,
cgsnapshot_id):
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id']}
return options
def create_snapshot(self, context,
volume, name, description,
metadata=None, cgsnapshot_id=None):
return self._create_snapshot(context, volume, name, description,
False, metadata, cgsnapshot_id)
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
return self._create_snapshot(context, volume, name, description,
True, metadata)
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False):
if not force and snapshot['status'] not in ["available", "error"]:
msg = _("Volume Snapshot status must be available or error")
raise exception.InvalidSnapshot(reason=msg)
cgsnapshot_id = snapshot.get('cgsnapshot_id', None)
if cgsnapshot_id:
msg = _("Snapshot %s is part of a cgsnapshot and has to be "
"deleted together with the cgsnapshot.") % snapshot['id']
LOG.error(msg)
raise exception.InvalidSnapshot(reason=msg)
self.db.snapshot_update(context, snapshot['id'],
{'status': 'deleting'})
volume = self.db.volume_get(context, snapshot['volume_id'])
self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host'])
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
self.db.snapshot_update(context, snapshot['id'], fields)
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
"""Delete the given metadata item from a volume."""
self.db.volume_metadata_delete(context, volume['id'], key)
def _check_metadata_properties(self, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.volume_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return db_meta
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_admin_metadata(self, context, volume, key):
"""Delete the given administration metadata item from a volume."""
self.db.volume_admin_metadata_delete(context, volume['id'], key)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_admin_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
rv = self.db.snapshot_metadata_get(context, snapshot['id'])
return dict(rv.iteritems())
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
self.db.snapshot_metadata_delete(context, snapshot['id'], key)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_snapshot_metadata(context, snapshot)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.snapshot_metadata_update(context,
snapshot['id'],
_metadata,
True)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return db_meta
def get_snapshot_metadata_value(self, snapshot, key):
pass
def get_volumes_image_metadata(self, context):
check_policy(context, 'get_volumes_image_metadata')
db_data = self.db.volume_glance_metadata_get_all(context)
results = collections.defaultdict(dict)
for meta_entry in db_data:
results[meta_entry['volume_id']].update({meta_entry['key']:
meta_entry['value']})
return results
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
return dict(
(meta_entry.key, meta_entry.value) for meta_entry in db_data
)
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
self._check_volume_availability(volume, force)
glance_core_properties = CONF.glance_core_properties
if glance_core_properties:
try:
volume_image_metadata = self.get_volume_image_metadata(context,
volume)
custom_property_set = (set(volume_image_metadata).difference
(set(glance_core_properties)))
if custom_property_set:
metadata.update(dict(properties=dict((custom_property,
volume_image_metadata
[custom_property])
for custom_property
in custom_property_set)))
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
image_name = metadata["name"]
if image_name.startswith('image@'):
image_id = image_name.split('@')[1]
recv_metadata = self.image_service.show(context, image_id)
else:
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume status must be available to extend.')
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s)") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
try:
reservations = QUOTAS.reserve(context, gigabytes=+size_increase)
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
msg = _("Quota exceeded for %(s_pid)s, tried to extend volume by "
"%(s_size)sG, (%(d_consumed)dG of %(d_quota)dG already "
"consumed).")
LOG.error(msg % {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['status'] in ['in-use'] and volume['shareable']:
msg = _('Only available shareable Volume can be migrated')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration
if volume['migration_status'] is not None:
msg = _("Volume is already part of an active migration")
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
if snaps:
msg = _("volume must not have snapshots")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle non-replicated volumes for now
rep_status = volume['replication_status']
if rep_status is not None and rep_status != 'disabled':
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume must not be part of a consistency group.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = self.db.service_get_all_by_topic(elevated,
topic,
disabled=False)
found = False
for service in services:
svc_host = volume_utils.extract_host(host, 'backend')
if utils.service_is_up(service) and service['host'] == svc_host:
found = True
if not found:
msg = (_('No available service named %s') % host)
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different than current host')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'migration_status': 'starting'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume status must be available to update readonly flag.')
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': six.text_type(flag)})
@wrap_check_policy
def retype(self, context, volume, new_type, migration_policy=None):
"""Attempt to modify the type associated with an existing volume."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Unable to update type due to incorrect status '
'on volume: %s') % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
msg = (_("Volume %s is already part of an active migration.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if migration_policy and migration_policy not in ['on-demand', 'never']:
msg = _('migration_policy must be \'on-demand\' or \'never\', '
'passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume must not be part of a consistency group.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Support specifying volume type by ID or name
try:
if uuidutils.is_uuid_like(new_type):
vol_type = volume_types.get_volume_type(context, new_type)
else:
vol_type = volume_types.get_volume_type_by_name(context,
new_type)
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
vol_type_qos_id = vol_type['qos_specs_id']
old_vol_type = None
old_vol_type_id = volume['volume_type_id']
old_vol_type_qos_id = None
# Error if the original and new type are the same
if volume['volume_type_id'] == vol_type_id:
msg = (_('New volume_type same as original: %s') % new_type)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if volume['volume_type_id']:
old_vol_type = volume_types.get_volume_type(
context, old_vol_type_id)
old_vol_type_qos_id = old_vol_type['qos_specs_id']
# We don't support changing encryption requirements yet
old_enc = volume_types.get_volume_type_encryption(context,
old_vol_type_id)
new_enc = volume_types.get_volume_type_encryption(context,
vol_type_id)
if old_enc != new_enc:
msg = _('Retype cannot change encryption requirements')
raise exception.InvalidInput(reason=msg)
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
if (volume['status'] != 'available' and
old_vol_type_qos_id != vol_type_qos_id):
for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
if qos_id:
specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
if specs['qos_specs']['consumer'] != 'back-end':
msg = _('Retype cannot change front-end qos specs for '
'in-use volumes')
raise exception.InvalidInput(reason=msg)
# We're checking here in so that we can report any quota issues as
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(context, volume,
vol_type_id)
self.update(context, volume, {'status': 'retyping'})
request_spec = {'volume_properties': volume,
'volume_id': volume['id'],
'volume_type': vol_type,
'migration_policy': migration_policy,
'quota_reservations': reservations}
self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'],
request_spec=request_spec,
filter_properties={})
def manage_existing(self, context, host, ref, name=None, description=None,
volume_type=None, metadata=None,
availability_zone=None, bootable=False):
if availability_zone is None:
elevated = context.elevated()
try:
svc_host = volume_utils.extract_host(host, 'backend')
service = self.db.service_get_by_host_and_topic(
elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unable to find service for given host.'))
availability_zone = service.get('availability_zone')
volume_type_id = volume_type['id'] if volume_type else None
volume_properties = {
'size': 0,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
# Rename these to the internal name.
'display_description': description,
'display_name': name,
'host': host,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id,
'metadata': metadata,
'bootable': bootable
}
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume = self.db.volume_create(context, volume_properties)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id'],
'ref': ref}
self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic,
volume['id'],
request_spec=request_spec)
return volume
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
volume evacuation.
"""
raise NotImplementedError()
| |
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
https://tools.ietf.org/html/rfc7231#section-7.1.4
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import caches
from django.http import HttpResponse, HttpResponseNotModified
from django.utils.encoding import iri_to_uri
from django.utils.http import (
http_date, parse_etags, parse_http_date_safe, quote_etag,
)
from django.utils.log import log_response
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
Patch the Cache-Control header by adding all keyword arguments to it.
The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.get('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict(dictitem(el) for el in cc)
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join(dictvalue(el) for el in cc.items())
response['Cache-Control'] = cc
def get_max_age(response):
"""
Return the max-age from the response Cache-Control header as an integer,
or None if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control']))
try:
return int(cc['max-age'])
except (ValueError, TypeError, KeyError):
pass
def set_response_etag(response):
if not response.streaming:
response['ETag'] = quote_etag(hashlib.md5(response.content).hexdigest())
return response
def _precondition_failed(request):
response = HttpResponse(status=412)
log_response(
'Precondition Failed: %s', request.path,
response=response,
request=request,
)
return response
def _not_modified(request, response=None):
new_response = HttpResponseNotModified()
if response:
# Preserve the headers required by Section 4.1 of RFC 7232, as well as
# Last-Modified.
for header in ('Cache-Control', 'Content-Location', 'Date', 'ETag', 'Expires', 'Last-Modified', 'Vary'):
if header in response:
new_response[header] = response[header]
# Preserve cookies as per the cookie specification: "If a proxy server
# receives a response which contains a Set-cookie header, it should
# propagate the Set-cookie header to the client, regardless of whether
# the response was 304 (Not Modified) or 200 (OK).
# https://curl.haxx.se/rfc/cookie_spec.html
new_response.cookies = response.cookies
return new_response
def get_conditional_response(request, etag=None, last_modified=None, response=None):
# Only return conditional responses on successful requests.
if response and not (200 <= response.status_code < 300):
return response
# Get HTTP request headers.
if_match_etags = parse_etags(request.META.get('HTTP_IF_MATCH', ''))
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
if_unmodified_since = if_unmodified_since and parse_http_date_safe(if_unmodified_since)
if_none_match_etags = parse_etags(request.META.get('HTTP_IF_NONE_MATCH', ''))
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if_modified_since = if_modified_since and parse_http_date_safe(if_modified_since)
# Step 1 of section 6 of RFC 7232: Test the If-Match precondition.
if if_match_etags and not _if_match_passes(etag, if_match_etags):
return _precondition_failed(request)
# Step 2: Test the If-Unmodified-Since precondition.
if (not if_match_etags and if_unmodified_since and
not _if_unmodified_since_passes(last_modified, if_unmodified_since)):
return _precondition_failed(request)
# Step 3: Test the If-None-Match precondition.
if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
else:
return _precondition_failed(request)
# Step 4: Test the If-Modified-Since precondition.
if (not if_none_match_etags and if_modified_since and
not _if_modified_since_passes(last_modified, if_modified_since)):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
# Step 5: Test the If-Range precondition (not supported).
# Step 6: Return original response since there isn't a conditional response.
return response
def _if_match_passes(target_etag, etags):
"""
Test the If-Match comparison as defined in section 3.1 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there can't be a match.
return False
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", even if the ETag is weak,
# so there is a match to '*'.
return True
elif target_etag.startswith('W/'):
# A weak ETag can never strongly match another ETag.
return False
else:
# Since the ETag is strong, this will only return True if there's a
# strong match.
return target_etag in etags
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
"""
Test the If-Unmodified-Since comparison as defined in section 3.4 of
RFC 7232.
"""
return last_modified and last_modified <= if_unmodified_since
def _if_none_match_passes(target_etag, etags):
"""
Test the If-None-Match comparison as defined in section 3.2 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there isn't a match.
return True
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", so there is a match to '*'.
return False
else:
# The comparison should be weak, so look for a match after stripping
# off any weak indicators.
target_etag = target_etag.strip('W/')
etags = (etag.strip('W/') for etag in etags)
return target_etag not in etags
def _if_modified_since_passes(last_modified, if_modified_since):
"""
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
"""
return not last_modified or last_modified > if_modified_since
def patch_response_headers(response, cache_timeout=None):
"""
Add HTTP caching headers to the given HttpResponse: Expires and
Cache-Control.
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Add headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True, private=True)
def patch_vary_headers(response, newheaders):
"""
Add (or update) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". If headers
contains an asterisk, then "Vary" header will consist of a single asterisk
'*'. Otherwise, existing headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = {header.lower() for header in vary_headers}
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
vary_headers += additional_headers
if '*' in vary_headers:
response['Vary'] = '*'
else:
response['Vary'] = ', '.join(vary_headers)
def has_vary_header(response, header_query):
"""
Check to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = {header.lower() for header in vary_headers}
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, add the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
cache_key += '.%s' % get_current_timezone_name()
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Return a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header)
if value is not None:
ctx.update(value.encode())
url = hashlib.md5(iri_to_uri(request.build_absolute_uri()).encode('ascii'))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, url.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Return a cache key for the header cache."""
url = hashlib.md5(iri_to_uri(request.build_absolute_uri()).encode('ascii'))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, url.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Return a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there isn't a headerlist stored, return None, indicating that the page
needs to be rebuilt.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learn what headers to take into account for some request URL from the
response object. Store those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header != 'ACCEPT_LANGUAGE' or not is_accept_language_redundant:
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=', 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
| |
from pytz import timezone
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.template import loader
from rest_framework.response import Response
from add2cal import Add2Cal
from accelerator_abstract.models.base_user_utils import is_employee
from accelerator.models import (
MentorProgramOfficeHour,
Startup,
)
from ...permissions.v1_api_permissions import (
RESERVE_PERMISSION_DENIED_DETAIL,
IsAuthenticated,
)
from ...views import ADD2CAL_DATE_FORMAT
from .impact_view import ImpactView
from .utils import (
email_template_path,
is_office_hour_reserver,
office_hour_time_info,
datetime_is_in_past,
)
from ...minimal_email_handler import send_email
User = get_user_model()
mentor_template_name = "reserve_office_hour_email_to_mentor.html"
finalist_template_name = "reserve_office_hour_email_to_finalist.html"
ICS_FILENAME = 'reminder.ics'
ICS_FILETYPE = 'text/calendar'
class ReserveOfficeHourView(ImpactView):
view_name = "reserve_office_hour"
permission_classes = [IsAuthenticated]
OFFICE_HOUR_TITLE = "Office Hours Session with {}"
SUCCESS_HEADER = "Office Hour reserved with {}"
SUCCESS_PAST_DETAIL = ("This office officehour occurs in the past")
FAIL_HEADER = "Office hour could not be reserved"
NO_OFFICE_HOUR_SPECIFIED = "No office hour was specified"
NO_SUCH_OFFICE_HOUR = "This office hour is no longer available."
NO_SUCH_STARTUP = "No such startup exists"
NO_SUCH_USER = "No such user exists"
OFFICE_HOUR_ALREADY_RESERVED = "That session has already been reserved"
SUBJECT = "Office Hours Reservation Notification"
STARTUP_NOT_ASSOCIATED_WITH_USER = ("The selected startup is not a valid "
"choice for {}")
USER_CANNOT_RESERVE_OFFICE_HOURS = ("The selected user is not allowed to "
"reserve office hour sessions.")
CONFLICT_EXISTS = ("The requested time overlaps with another "
"existing officehours")
def post(self, request):
'''
params:
office_hour_id (required)
user_id (optional, defaults to request.user)
startup_id (optional)
'''
(self._extract_request_data(request) and
self._reserve_office_hour())
return self._response()
def _extract_request_data(self, request):
if not (self._extract_office_hour(request) and
self._extract_user(request) and
self._extract_startup(request)):
return False
self.message = request.data.get("message", "")
return True
def _extract_office_hour(self, request):
office_hour_id = request.data.get("office_hour_id", None)
if office_hour_id is None:
self.fail(self.NO_OFFICE_HOUR_SPECIFIED)
return False
try:
self.office_hour = MentorProgramOfficeHour.objects.get(
pk=office_hour_id)
except MentorProgramOfficeHour.DoesNotExist:
self.fail(self.NO_SUCH_OFFICE_HOUR)
return False
return True
def _extract_user(self, request):
user_id = request.data.get("user_id", None)
if user_id is not None and user_id != request.user.id:
try:
self.target_user = User.objects.get(pk=user_id)
except User.DoesNotExist:
self.fail(self.NO_SUCH_USER)
return False
if is_employee(request.user):
self.on_behalf_of = True
else:
self.fail(RESERVE_PERMISSION_DENIED_DETAIL)
return False
else:
self.target_user = request.user
self.on_behalf_of = False
if not is_office_hour_reserver(self.target_user):
self.fail(self.USER_CANNOT_RESERVE_OFFICE_HOURS)
return False
return True
def _extract_startup(self, request):
startup_id = request.data.get("startup_id", None)
if startup_id is None:
self.startup = None
else:
try:
self.startup = Startup.objects.get(pk=startup_id)
except Startup.DoesNotExist:
self.fail(self.NO_SUCH_STARTUP)
return False
if not self.target_user.startupteammember_set.filter(
startup=self.startup).exists():
self.fail(self.STARTUP_NOT_ASSOCIATED_WITH_USER.format(
self.target_user.email))
return False
return True
def _reserve_office_hour(self):
if self.office_hour.finalist is not None:
self.fail(self.OFFICE_HOUR_ALREADY_RESERVED)
return False
if self._conflict_exists():
self.fail(self.CONFLICT_EXISTS)
return False
self._update_office_hour_data()
self._send_confirmation_emails()
self._succeed()
return True
def _conflict_exists(self):
start = self.office_hour.start_date_time
end = self.office_hour.end_date_time
start_conflict = (Q(start_date_time__gt=start) &
Q(start_date_time__lt=end))
end_conflict = (Q(end_date_time__gt=start) &
Q(end_date_time__lt=end))
enclosing_conflict = (Q(start_date_time__lte=start) &
Q(end_date_time__gte=end))
if self.target_user.finalist_officehours.filter(
start_conflict | end_conflict | enclosing_conflict).exists():
return True
return False
def _update_office_hour_data(self):
self.office_hour.finalist = self.target_user
self.office_hour.topics = self.message
self.office_hour.startup = self.startup
self.office_hour.save()
def _send_confirmation_emails(self):
mentor = self.office_hour.mentor
finalist = self.target_user
send_email(**self.prepare_email_notification(mentor,
finalist,
mentor_template_name,
True))
send_email(**self.prepare_email_notification(finalist,
mentor,
finalist_template_name))
def prepare_email_notification(self,
recipient,
counterpart,
template_name,
mentor_recipient=False):
template_path = email_template_path(template_name)
if self.startup:
startup_name = self.startup.organization.name
else:
startup_name = ""
self.mentor_recipient = mentor_recipient
context = {"recipient": recipient,
"counterpart": counterpart,
"startup": startup_name,
"message": self.message,
"calendar_data": self.get_calendar_data(counterpart)
}
context.update(office_hour_time_info(self.office_hour))
html_email = loader.render_to_string(template_path, context)
return {"to": [recipient.email],
"subject": self.SUBJECT,
"body": None,
"attachment": (ICS_FILENAME,
self.calendar_data['ical_content'],
ICS_FILETYPE),
"attach_alternative": (html_email, 'text/html')
}
def _succeed(self):
if self.office_hour.startup:
startup_name = self.office_hour.startup.organization.name
else:
startup_name = ""
self.success = True
self.header = self.SUCCESS_HEADER.format(
self.office_hour.mentor.full_name())
self.detail = self._get_detail()
self.timecard_info = {
"finalist_first_name": self.target_user.first_name,
"finalist_last_name": self.target_user.last_name,
"finalist_email": self.target_user.email,
"topics": self.message,
"startup": startup_name,
"calendar_data": self.get_calendar_data(self.office_hour.mentor),
}
def _get_detail(self):
start_date_time = self.office_hour.start_date_time
if datetime_is_in_past(start_date_time):
return self.SUCCESS_PAST_DETAIL
else:
return ""
def fail(self, detail):
self.success = False
self.header = self.FAIL_HEADER
self.detail = detail
self.timecard_info = {}
def _response(self):
return Response({
'success': self.success,
'header': self.header,
'detail': self.detail,
'timecard_info': self.timecard_info})
def get_calendar_data(self, counterpart_name):
if hasattr(self, "calendar_data"):
return self.calendar_data
name = counterpart_name
if self.mentor_recipient:
name = self.startup.name if self.startup else counterpart_name
title = self.OFFICE_HOUR_TITLE.format(name)
office_hour = self.office_hour
tz_str = ""
if office_hour.location is None:
tz_str = "UTC"
location = ""
else:
tz_str = office_hour.location.timezone
location = office_hour.location
tz = timezone(tz_str)
meeting_info = office_hour.meeting_info
separator = ';' if office_hour.location and meeting_info else ""
location_info = "{location}{separator}{meeting_info}"
location_info = location_info.format(location=location,
separator=separator,
meeting_info=meeting_info)
self.calendar_data = Add2Cal(
start=office_hour.start_date_time.astimezone(tz).strftime(
ADD2CAL_DATE_FORMAT),
end=office_hour.end_date_time.astimezone(tz).strftime(
ADD2CAL_DATE_FORMAT),
title=title,
description=self._get_description(counterpart_name),
location=location_info,
timezone=tz).as_dict()
return self.calendar_data
def _get_description(self, counterpart_name):
topics_block = ""
attendees_block = """
Attendees:\n- {mentor_email}\n- {finalist_email} - {finalist_phone}\n
"""
finalist = self.startup if self.startup else counterpart_name
if self.office_hour.topics:
topics_block = "Message from {finalist}:\n{topics}\n".format(
topics=self.office_hour.topics,
finalist=finalist)
mentor_email = self.office_hour.mentor.email
finalist_email = self.target_user.email
finalist_phone = self.target_user.user_phone()
attendees_block = attendees_block.format(mentor_email=mentor_email,
finalist_email=finalist_email,
finalist_phone=finalist_phone)
description = """
{attendees_block}
{topics_block}
"""
return description.format(topics_block=topics_block,
attendees_block=attendees_block)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from core import path_util
from core import perf_benchmark
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import legacy_page_test
from telemetry.page import shared_page_state
from telemetry import story
from telemetry.value import list_of_scalar_values
from benchmarks import pywebsocket_server
from page_sets import webgl_supported_shared_state
BLINK_PERF_BASE_DIR = os.path.join(path_util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
SKIPPED_FILE = os.path.join(BLINK_PERF_BASE_DIR, 'Skipped')
def CreateStorySetFromPath(path, skipped_file,
shared_page_state_class=(
shared_page_state.SharedPageState)):
assert os.path.exists(path)
page_urls = []
serving_dirs = set()
def _AddPage(path):
if not path.endswith('.html'):
return
if '../' in open(path, 'r').read():
# If the page looks like it references its parent dir, include it.
serving_dirs.add(os.path.dirname(os.path.dirname(path)))
page_urls.append('file://' + path.replace('\\', '/'))
def _AddDir(dir_path, skipped):
for candidate_path in os.listdir(dir_path):
if candidate_path == 'resources':
continue
candidate_path = os.path.join(dir_path, candidate_path)
if candidate_path.startswith(skipped):
continue
if os.path.isdir(candidate_path):
_AddDir(candidate_path, skipped)
else:
_AddPage(candidate_path)
if os.path.isdir(path):
skipped = []
if os.path.exists(skipped_file):
for line in open(skipped_file, 'r').readlines():
line = line.strip()
if line and not line.startswith('#'):
skipped_path = os.path.join(os.path.dirname(skipped_file), line)
skipped.append(skipped_path.replace('/', os.sep))
_AddDir(path, tuple(skipped))
else:
_AddPage(path)
ps = story.StorySet(base_dir=os.getcwd() + os.sep,
serving_dirs=serving_dirs)
for url in page_urls:
ps.AddStory(page_module.Page(
url, ps, ps.base_dir,
shared_page_state_class=shared_page_state_class))
return ps
class _BlinkPerfMeasurement(legacy_page_test.LegacyPageTest):
"""Tuns a blink performance test and reports the results."""
def __init__(self):
super(_BlinkPerfMeasurement, self).__init__()
with open(os.path.join(os.path.dirname(__file__),
'blink_perf.js'), 'r') as f:
self._blink_perf_js = f.read()
def WillNavigateToPage(self, page, tab):
del tab # unused
page.script_to_evaluate_on_commit = self._blink_perf_js
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--js-flags=--expose_gc',
'--enable-experimental-web-platform-features',
'--disable-gesture-requirement-for-media-playback',
'--enable-experimental-canvas-features',
# TODO(qinmin): After fixing crbug.com/592017, remove this command line.
'--reduce-security-for-testing'
])
if 'content-shell' in options.browser_type:
options.AppendExtraBrowserArgs('--expose-internals-for-testing')
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('testRunner.isDone', 600)
log = tab.EvaluateJavaScript('document.getElementById("log").innerHTML')
for line in log.splitlines():
if line.startswith("FATAL: "):
print line
continue
if not line.startswith('values '):
continue
parts = line.split()
values = [float(v.replace(',', '')) for v in parts[1:-1]]
units = parts[-1]
metric = page.display_name.split('.')[0].replace('/', '_')
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, metric, units, values))
break
print log
class _SharedPywebsocketPageState(shared_page_state.SharedPageState):
"""Runs a pywebsocket server."""
def __init__(self, test, finder_options, user_story_set):
super(_SharedPywebsocketPageState, self).__init__(
test, finder_options, user_story_set)
self.platform.StartLocalServer(pywebsocket_server.PywebsocketServer())
class BlinkPerfBindings(perf_benchmark.PerfBenchmark):
tag = 'bindings'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.bindings'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Bindings')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/563979
@benchmark.Enabled('content-shell')
class BlinkPerfBlinkGC(perf_benchmark.PerfBenchmark):
tag = 'blink_gc'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.blink_gc'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'BlinkGC')
return CreateStorySetFromPath(path, SKIPPED_FILE)
class BlinkPerfCSS(perf_benchmark.PerfBenchmark):
tag = 'css'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.css'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'CSS')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@benchmark.Disabled('android-webview', # http://crbug.com/593200
'reference') # http://crbug.com/576779
class BlinkPerfCanvas(perf_benchmark.PerfBenchmark):
tag = 'canvas'
test = _BlinkPerfMeasurement
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/593973.
@classmethod
def Name(cls):
return 'blink_perf.canvas'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Canvas')
story_set = CreateStorySetFromPath(
path, SKIPPED_FILE,
shared_page_state_class=(
webgl_supported_shared_state.WebGLSupportedSharedState))
# WebGLSupportedSharedState requires the skipped_gpus property to
# be set on each page.
for page in story_set:
page.skipped_gpus = []
return story_set
class BlinkPerfDOM(perf_benchmark.PerfBenchmark):
tag = 'dom'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.dom'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'DOM')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@benchmark.Disabled('win') # http://crbug.com/588819
class BlinkPerfEvents(perf_benchmark.PerfBenchmark):
tag = 'events'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.events'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Events')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@benchmark.Disabled('win8') # http://crbug.com/462350
class BlinkPerfLayout(perf_benchmark.PerfBenchmark):
tag = 'layout'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.layout'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Layout')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/551950
class BlinkPerfPaint(perf_benchmark.PerfBenchmark):
tag = 'paint'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.paint'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Paint')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/574483
@benchmark.Disabled('win') # crbug.com/488493
class BlinkPerfParser(perf_benchmark.PerfBenchmark):
tag = 'parser'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.parser'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Parser')
return CreateStorySetFromPath(path, SKIPPED_FILE)
class BlinkPerfSVG(perf_benchmark.PerfBenchmark):
tag = 'svg'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.svg'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'SVG')
return CreateStorySetFromPath(path, SKIPPED_FILE)
class BlinkPerfShadowDOM(perf_benchmark.PerfBenchmark):
tag = 'shadow_dom'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.shadow_dom'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'ShadowDOM')
return CreateStorySetFromPath(path, SKIPPED_FILE)
# This benchmark is for local testing, doesn't need to run on bots.
@benchmark.Disabled('all')
class BlinkPerfXMLHttpRequest(perf_benchmark.PerfBenchmark):
tag = 'xml_http_request'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.xml_http_request'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'XMLHttpRequest')
return CreateStorySetFromPath(path, SKIPPED_FILE)
# Disabled on Windows and ChromeOS due to https://crbug.com/521887
@benchmark.Disabled('win', 'chromeos')
class BlinkPerfPywebsocket(perf_benchmark.PerfBenchmark):
"""The blink_perf.pywebsocket tests measure turn-around-time of 10MB
send/receive for XHR, Fetch API and WebSocket. We might ignore < 10%
regressions, because the tests are noisy and such regressions are
often unreproducible (https://crbug.com/549017).
"""
tag = 'pywebsocket'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.pywebsocket'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Pywebsocket')
return CreateStorySetFromPath(
path, SKIPPED_FILE,
shared_page_state_class=_SharedPywebsocketPageState)
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/551950
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument
"""Conv2D schedule for ARM CPU"""
from __future__ import absolute_import as _abs
import logging
import tvm
from tvm import autotvm
from tvm import relay
import tvm.contrib.nnpack
from ..generic import schedule_conv2d_nchw, schedule_conv2d_winograd_without_weight_transform, \
schedule_conv2d_winograd_nnpack_without_weight_transform
from ..util import traverse_inline, get_const_tuple
from ..nn import dilate, pad, conv2d, conv2d_alter_layout, \
conv2d_winograd_without_weight_transform, \
conv2d_winograd_nnpack_without_weight_transform, \
depthwise_conv2d_nchw
from ..nn import conv2d_legalize
from ..nn.util import get_const_int, get_pad_tuple
from ..nn.winograd_util import winograd_transform_matrices
logger = logging.getLogger('topi')
@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['direct'])
def conv2d_arm_cpu(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
"""TOPI compute callback for conv2d
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width] or
pre-packed 5-D with shape [num_filter_chunk, in_channel, filter_height,
filter_width, num_filter_block]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
layout : str
layout of data
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, layout, out_dtype,
num_tile=2)
@autotvm.register_topi_schedule(
schedule_conv2d_nchw, 'arm_cpu',
['direct', 'winograd', 'winograd_nnpack_fp16', 'winograd_nnpack_fp32'])
def schedule_conv2d_nchw_arm_cpu(cfg, outs):
"""TOPI schedule callback for conv2d
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
# schedule conv2d
if 'spatial_conv2d_output' in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
s[data_pad].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == 'kernel_vec':
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
_schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, outs[0])
if 'winograd_conv2d_output' in op.tag:
output = op.output(0)
_schedule_winograd(cfg, s, output, outs[0])
if 'winograd_nnpack_conv2d_output' in op.tag:
output = op.output(0)
_schedule_winograd_nnpack(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, num_tile):
assert layout == "NCHW", "Only support NCHW"
# create workload according to raw arguments
out_dtype = out_dtype or data.dtype
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
pre_packed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else: # kernel tensor is pre packed
pre_packed = True
CO, _, KH, KW, VC = get_const_tuple(kernel.shape)
CO = CO * VC
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w))
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_bottom - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
data_pad = pad(data, [0, 0, pad_top, pad_left], [0, 0, pad_bottom, pad_right])
# ==================== define configuration space ====================
n, co, oh, ow = cfg.axis(N), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split('tile_co', co, num_outputs=2)
oh, vh = cfg.define_split('tile_oh', oh, num_outputs=2)
ow, vw = cfg.define_split('tile_ow', ow, num_outputs=2)
elif num_tile == 3: # for mali gpu
co, _, vc = cfg.define_split('tile_co', co, num_outputs=3)
oh, _, vh = cfg.define_split('tile_oh', oh, num_outputs=3)
ow, _, vw = cfg.define_split('tile_ow', ow, num_outputs=3)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder("reorder_0",
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
policy='candidate', candidate=[
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
[n, co, oh, ow, ci, kh, kw, vc, vh, vw]])
cfg.define_annotate("ann_reduce", [kh, kw], policy='try_unroll')
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy='try_unroll_vec')
# fallback support
if cfg.is_fallback:
if num_tile == 2: # arm cpu
ref_log = autotvm.tophub.load_reference_log('arm_cpu', 'rk3399', 'conv2d', 'direct')
cfg.fallback_with_reference_log(ref_log)
elif num_tile == 3: # mali gpu
ref_log = autotvm.tophub.load_reference_log('mali', 'rk3399', 'conv2d', 'direct')
cfg.fallback_with_reference_log(ref_log)
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
kvshape = (CO // VC, CI, KH, KW, VC)
ovshape = (N, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, CO, OH, OW)
if dilation_h != 1 or dilation_w != 1:
# undilate input data
dvshape = (N, OH // VH, OW // VW, CI, KH, KW, VH, VW)
data_vec = tvm.compute(dvshape, lambda n, h, w, ci, kh, kw, vh, vw:
data_pad[n][ci][(h*VH+vh)*HSTR+kh*dilation_h]
[(w*VW+vw)*WSTR+kw*dilation_w],
name='data_vec_undilated')
else:
dvshape = (N, OH // VH, OW // VW, CI, VH*HSTR + KH-1, VW*WSTR + KW-1)
data_vec = tvm.compute(dvshape, lambda n, h, w, ci, vh, vw:
data_pad[n][ci][h*VH*HSTR+vh][w*VW*WSTR+vw],
name='data_vec')
if pre_packed:
kernel_vec = kernel
else:
kernel_vec = tvm.compute(kvshape, lambda co, ci, kh, kw, vc:
kernel[co*VC+vc][ci][kh][kw],
name='kernel_vec')
ci = tvm.reduce_axis((0, CI), name='ci')
kh = tvm.reduce_axis((0, KH), name='kh')
kw = tvm.reduce_axis((0, KW), name='kw')
if dilation_h != 1 or dilation_w != 1:
conv = tvm.compute(ovshape, lambda n, co, h, w, vh, vw, vc: \
tvm.sum(data_vec[n, h, w, ci, kh, kw, vh, vw].astype(out_dtype) *
kernel_vec[co, ci, kh, kw, vc].astype(out_dtype),
axis=[ci, kh, kw]), name='conv')
else:
conv = tvm.compute(ovshape, lambda n, co, h, w, vh, vw, vc: \
tvm.sum(data_vec[n, h, w, ci, vh*HSTR+kh, vw*WSTR+kw].astype(out_dtype) *
kernel_vec[co, ci, kh, kw, vc].astype(out_dtype),
axis=[ci, kh, kw]), name='conv')
output = tvm.compute(oshape, lambda n, co, h, w:
conv[n][co//VC][h//VH][w//VW][h%VH][w%VW][co%VC],
name='output_unpack', tag='spatial_conv2d_output')
return output
def _schedule_spatial_pack(cfg, s, data_vec, kernel_vec,
conv, output, last):
"""schedule implementation"""
n, co, oh, ow, vh, vw, vc = s[conv].op.axis
ci, kh, kw = s[conv].op.reduce_axis
# schedule conv
cfg["reorder_0"].apply(s, conv, [n, co, oh, ow, ci, kh, kw, vh, vw, vc])
cfg["ann_reduce"].apply(s, conv, [kh, kw],
axis_lens=[get_const_int(kh.dom.extent),
get_const_int(kw.dom.extent)],
max_unroll=16,
cfg=cfg)
cfg["ann_spatial"].apply(s, conv, [vh, vw, vc],
axis_lens=[cfg['tile_oh'].size[-1],
cfg['tile_ow'].size[-1],
cfg['tile_co'].size[-1]],
max_unroll=16,
cfg=cfg)
# schedule fusion
n, co, h, w = s[last].op.axis
co, vc = cfg['tile_co'].apply(s, last, co)
oh, vh = cfg['tile_oh'].apply(s, last, h)
ow, vw = cfg['tile_ow'].apply(s, last, w)
s[last].reorder(n, co, oh, ow, vh, vw, vc)
if last != output:
s[output].compute_inline()
cfg["ann_spatial"].apply(s, last, [vh, vw, vc],
axis_lens=[cfg['tile_oh'].size[-1],
cfg['tile_ow'].size[-1],
cfg['tile_co'].size[-1]],
max_unroll=16,
cfg=cfg)
s[conv].compute_at(s[last], ow)
# mark parallel
p = s[last].fuse(n, co)
s[last].parallel(p)
if data_vec.op.name == 'data_vec_undilated':
n, h, _, _, _, _, _, _ = s[data_vec].op.axis
else:
n, h, _, _, _, _ = s[data_vec].op.axis
p = s[data_vec].fuse(n, h)
s[data_vec].parallel(p)
if kernel_vec.op.name == 'kernel_vec':
co, _, _, _, _ = s[kernel_vec].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# kernel packing will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[kernel_vec].pragma(co, 'debug_skip_region')
else:
s[kernel_vec].parallel(co)
elif kernel_vec.op.name == 'kernel_vec_conv2d_transpose': # for conv2d transpose
co, _, _, _, _ = s[kernel_vec].op.axis
s[kernel_vec].parallel(co)
return s
@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['winograd'])
def conv2d_arm_cpu_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
""" TOPI compute callback. Use winograd template """
tile_size = 4
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout,
out_dtype, tile_size)
def _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size):
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
if dilation_h != 1 or dilation_w != 1:
kernel = dilate(kernel, (1, 1, dilation_h, dilation_w))
pre_computed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else:
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
pre_computed = True
H_CAT, W_CAT, CO, CI, VC = get_const_tuple(kernel.shape)
CO *= VC
KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
assert layout == 'NCHW'
assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1
data_pad = pad(data, (0, 0, HPAD, WPAD), name="data_pad")
r = KW
m = tile_size
alpha = m + r - 1
A, B, G = winograd_transform_matrices(m, r, out_dtype)
K = CO
C = CI
H = (IH + 2 * HPAD - 3) // HSTR + 1
W = (IW + 2 * WPAD - 3) // WSTR + 1
nH, nW = (H + m-1) // m, (W + m-1) // m
P = N * nH * nW
cfg.define_split('tile_p', cfg.axis(P), num_outputs=2, filter=lambda x: x.size[-1] <= 16)
cfg.define_split('tile_k', cfg.axis(K), num_outputs=2, filter=lambda x: x.size[-1] <= 16)
VP = cfg['tile_p'].size[-1]
VK = cfg['tile_k'].size[-1]
# pack input tile
input_tile = tvm.compute((C, P // VP, alpha, alpha, VP),
lambda c, b, eps, nu, bb:
data_pad[(b*VP+bb) // (nH*nW)][c][(b*VP+bb) // nW % nH * m + eps]
[(b*VP+bb) % nW * m + nu],
name='d')
# transform kernel
if pre_computed:
U = kernel
else:
r_kh = tvm.reduce_axis((0, KH), 'r_kh')
r_kw = tvm.reduce_axis((0, KW), 'r_kw')
U = tvm.compute((alpha, alpha, K // VK, C, VK), lambda eps, nu, k, c, kk:
tvm.sum(kernel[k * VK + kk][c][r_kh][r_kw].astype(out_dtype) *
G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]), name='U')
# transform image
r_eps = tvm.reduce_axis((0, alpha), 'r_eps')
r_nu = tvm.reduce_axis((0, alpha), 'r_nu')
V = tvm.compute((alpha, alpha, P // VP, C, VP), lambda eps, nu, b, c, bb:
tvm.sum(input_tile[c][b][r_eps][r_nu][bb].astype(out_dtype) *
B[r_eps][eps] * B[r_nu][nu], axis=[r_eps, r_nu]), name='V')
# batch gemm
c = tvm.reduce_axis((0, C), name='c')
M = tvm.compute((alpha, alpha, K, P), lambda eps, nu, k, b:
tvm.sum(U[eps][nu][k // VK][c][k % VK] *
V[eps][nu][b // VP][c][b % VP], axis=c), name='M')
# inverse transform
r_eps = tvm.reduce_axis((0, alpha), 'r_eps')
r_nu = tvm.reduce_axis((0, alpha), 'r_nu')
Y = tvm.compute((K, P, m, m), lambda k, b, vh, vw:
tvm.sum(M[r_eps][r_nu][k][b] * A[r_eps][vh] * A[r_nu][vw],
axis=[r_eps, r_nu]), name='Y')
# unpack output
output = tvm.compute((N, K, H, W), lambda n, k, h, w:
Y[k][n * nH * nW + (h//m) * nW + w//m][h % m][w % m],
name='output', tag='winograd_conv2d_output')
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * K * H * W * KH * KW * C)
return output
def _schedule_winograd(cfg, s, output, last):
Y = output.op.input_tensors[0]
M, A = Y.op.input_tensors
U, V = M.op.input_tensors
d, B = V.op.input_tensors
data_pad = d.op.input_tensors[0]
# padding
s[data_pad].compute_inline()
# pack input tiles
s[d].compute_inline()
# transform kernel
if isinstance(U.op, tvm.tensor.ComputeOp):
kernel, G = U.op.input_tensors
s[G].compute_inline()
eps, nu, k, c, kk, = s[U].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# kernel transformation will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[U].pragma(eps, 'debug_skip_region')
else:
r_kh, r_kw = s[U].op.reduce_axis
s[U].reorder(k, c, eps, nu, r_kh, r_kw, kk)
for axis in [eps, nu, r_kh, r_kw]:
s[U].unroll(axis)
s[U].vectorize(kk)
s[U].parallel(k)
if isinstance(kernel.op, tvm.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
# transform image
DD = s.cache_read(d, 'global', [V])
s[B].compute_inline()
eps, nu, b, c, bb = s[V].op.axis
r_eps, r_nu = s[V].op.reduce_axis
s[V].reorder(b, c, eps, nu, r_eps, r_nu, bb)
for axis in [eps, nu, r_eps, r_nu]:
s[V].unroll(axis)
s[DD].compute_at(s[V], c)
s[V].vectorize(bb)
s[V].parallel(b)
# batch gemm
eps, nu, k, b = s[M].op.axis
c = s[M].op.reduce_axis[0]
cfg.define_split('tile_c', c, num_outputs=2, filter=lambda x: x.size[-1] <= 16)
co, ci = cfg['tile_c'].apply(s, M, c)
xo, xi = cfg['tile_p'].apply(s, M, b)
s[M].reorder(eps, nu, xo, co, k, ci, xi)
cfg.define_annotate('ann_reduce', [ci], policy='try_unroll')
cfg.define_annotate('ann_spatial', [k, xi], policy='try_unroll_vec')
cfg['ann_reduce'].apply(s, M, [ci],
axis_lens=[cfg['tile_c'].size[-1]],
max_unroll=16,
cfg=cfg)
cfg['ann_spatial'].apply(s, M, [k, xi])
# inverse transform
s[A].compute_inline()
k, b, vh, vw = s[Y].op.axis
r_eps, r_nu = s[Y].op.reduce_axis
for axis in [vh, vw, r_eps, r_nu]:
s[Y].unroll(axis)
# output
n, co, h, w = s[last].op.axis
co, coi = cfg['tile_k'].apply(s, last, co)
p = s[last].fuse(n, co)
s[M].compute_at(s[last], p)
s[last].parallel(p)
MM = s.cache_read(M, 'global', [Y])
m = get_const_int(V.shape[0]) + 1 - 3
ho, wo, hi, wi = s[last].tile(h, w, m, m)
s[Y].compute_at(s[last], wo)
s[MM].compute_at(s[last], wo)
if output != last:
s[output].compute_inline()
@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['winograd_nnpack_fp16'])
def conv2d_arm_cpu_winograd_nnpack_fp16(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
""" TOPI compute callback. Use winograd_nnpack_fp16 template """
return conv2d_arm_cpu_winograd_nnpack(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype,
tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16)
@autotvm.register_topi_compute(conv2d, 'arm_cpu', ['winograd_nnpack_fp32'])
def conv2d_arm_cpu_winograd_nnpack_fp32(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
""" TOPI compute callback. Use winograd_nnpack_fp32 template """
return conv2d_arm_cpu_winograd_nnpack(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype,
tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8)
def conv2d_arm_cpu_winograd_nnpack(
cfg, data, kernel, strides, padding, dilation, layout, out_dtype, convolution_algorithm):
""" TOPI compute callback. Use winograd NNPACK template """
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert (dilation_h, dilation_w) == (1, 1)
assert len(kernel.shape) == 4
CO, _, KH, KW = get_const_tuple(kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
assert layout == 'NCHW'
assert KH == 3 and KW == 3 and HPAD == 1 and WPAD == 1 and HSTR == 1 and WSTR == 1
H = (IH + 2 * HPAD - 3) // HSTR + 1
W = (IW + 2 * WPAD - 3) // WSTR + 1
cfg.define_knob('winograd_nnpack_algorithm', [convolution_algorithm])
assert N == 1
with tvm.tag_scope("winograd_nnpack_conv2d_weight_transform"):
transformed_kernel = tvm.contrib.nnpack.convolution_inference_weight_transform(
kernel, algorithm=cfg['winograd_nnpack_algorithm'].val)
if autotvm.GLOBAL_SCOPE.in_tuning:
transformed_kernel = tvm.compute(transformed_kernel.shape, lambda *args: 0.0)
with tvm.tag_scope("winograd_nnpack_conv2d_output"):
output = tvm.contrib.nnpack.convolution_inference_without_weight_transform(
data, transformed_kernel,
bias=None,
padding=[HPAD, HPAD, WPAD, WPAD],
stride=[HSTR, WSTR],
algorithm=cfg['winograd_nnpack_algorithm'].val)
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CI * H * W * KH * KW * CO)
return output
def _schedule_winograd_nnpack(cfg, s, output, last):
# Could have bias.
(X, TK) = output.op.input_tensors[:2]
# transform kernel
assert isinstance(TK.op, (tvm.tensor.ComputeOp, tvm.tensor.ExternOp, tvm.tensor.PlaceholderOp))
if autotvm.GLOBAL_SCOPE.in_tuning and isinstance(TK.op, tvm.tensor.ComputeOp):
# kernel transformation will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[TK].pragma(s[TK].op.axis[0], 'debug_skip_region')
##### REGISTER TOPI COMPUTE / SCHEDULE FOR WINOGRAD WITH WEIGHT TRANSFORM #####
@autotvm.register_topi_compute(conv2d_winograd_without_weight_transform, 'arm_cpu', ['winograd'])
def conv2d_winograd_ww(cfg, data, kernel, strides, padding, dilation, layout, out_dtype, tile_size):
"""TOPI compute callback"""
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, layout, out_dtype,\
tile_size)
@autotvm.register_topi_schedule(schedule_conv2d_winograd_without_weight_transform,
'arm_cpu', ['winograd'])
def schedule_conv2d_winograd_without_weight_transform_(cfg, outs):
"""TOPI schedule callback"""
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if 'winograd_conv2d_output' in op.tag:
output = op.output(0)
_schedule_winograd(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
##### REGISTER TOPI COMPUTE / SCHEDULE FOR WINOGRAD NNPACK WITHOUT WEIGHT TRANSFORM #####
@autotvm.register_topi_compute(conv2d_winograd_nnpack_without_weight_transform,
'arm_cpu',
['winograd_nnpack_fp16', 'winograd_nnpack_fp32'])
def conv2d_winograd_nnpack_ww(cfg, data, transformed_kernel, bias, strides,
padding, dilation, layout, out_dtype):
""" TOPI compute callback. Use winograd NNPACK template """
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert (dilation_h, dilation_w) == (1, 1)
assert len(transformed_kernel.shape) == 4
CO, _, _, _ = get_const_tuple(transformed_kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
HPAD, WPAD, _, _ = get_pad_tuple(padding, (3, 3))
KH, KW = 3, 3
assert layout == 'NCHW'
assert KH == 3 and KW == 3 and HPAD == 1 and WPAD == 1 and HSTR == 1 and WSTR == 1
H = (IH + 2 * HPAD - 3) // HSTR + 1
W = (IW + 2 * WPAD - 3) // WSTR + 1
assert N == 1
with tvm.tag_scope("winograd_nnpack_conv2d_output"):
output = tvm.contrib.nnpack.convolution_inference_without_weight_transform(
data=data,
transformed_kernel=transformed_kernel,
bias=bias,
padding=[HPAD, HPAD, WPAD, WPAD],
stride=[HSTR, WSTR],
algorithm=cfg['winograd_nnpack_algorithm'].val)
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CI * H * W * KH * KW * CO)
return output
@autotvm.register_topi_schedule(schedule_conv2d_winograd_nnpack_without_weight_transform,
'arm_cpu', ['winograd_nnpack_fp16', 'winograd_nnpack_fp32'])
def schedule_conv2d_winograd_nnpack_without_weight_transform_(cfg, outs):
"""TOPI schedule callback"""
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if 'winograd_nnpack_conv2d_output' in op.tag:
output = op.output(0)
_schedule_winograd_nnpack(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
##### REGISTER ALTER OP LAYOUT #####
@conv2d_alter_layout.register(["arm_cpu"])
def _alter_conv2d_layout_arm(attrs, inputs, tinfos, F):
"""Alter op layout for pre-computing kernel transformation
Parameters
----------
attrs : nnvm.top.AttrDict or tvm.attrs.Attrs
Attributes of current convolution
inputs : nnvm.symbol or tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
F: symbol
The context, can be either nnvm.sym or relay.op
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level,
so we have to pass 'F' to make it support our two versions of graph IR, NNVM and Relay.
"""
copy_inputs = [s for s in inputs]
new_attrs = {k: attrs[k] for k in attrs.keys()}
if F.__name__ == 'tvm.relay.op':
# Derive channels for frontends (e.g ONNX) that miss "channel" field.
new_attrs["channels"] = inputs[1].checked_type.shape[attrs['kernel_layout'].index('O')]
dilation = attrs.get_int_tuple("dilation")
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
groups = attrs.get_int('groups')
data_layout_key = "data_layout" if "data_layout" in new_attrs else "layout"
layout = attrs[data_layout_key]
out_dtype = attrs["out_dtype"]
if out_dtype in ("same", ""):
out_dtype = tinfos[0].dtype
if layout != 'NCHW':
return None
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
data, kernel = tinfos[0:2]
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
if groups == 1:
# query config of this workload
workload = autotvm.task.args_to_workload(
[data, kernel, strides, padding, dilation, layout, out_dtype], conv2d)
target = tvm.target.current_target()
dispatch_ctx = autotvm.DispatchContext.current
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
if cfg.template_key == 'direct': # pack weight tensor
VC = cfg['tile_co'].size[-1]
new_attrs['kernel_layout'] = 'OIHW%do' % VC
# Store the same config for the altered operator (workload)
new_data = data
new_kernel = tvm.placeholder((CO // VC, CI, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, 'NCHW', out_dtype], conv2d)
dispatch_ctx.update(target, new_workload, cfg)
return F.nn.conv2d(*copy_inputs, **new_attrs)
elif cfg.template_key == "winograd": # pre-compute weight transformation in winograd
if "-device=arm_cpu" in target.options:
tile_size = 4
VC = cfg['tile_k'].size[-1]
else:
from ..mali.conv2d import _pick_tile_size
tile_size = _pick_tile_size(tinfos[0], tinfos[1])
VC = cfg['tile_bna'].val
weight = F.nn.contrib_conv2d_winograd_weight_transform(copy_inputs[1],
tile_size=tile_size)
weight = F.reshape(weight,
newshape=(KH + tile_size - 1, KW + tile_size - 1, CO // VC, VC, CI))
weight = F.transpose(weight, axes=[0, 1, 2, 4, 3])
copy_inputs[1] = weight
new_attrs['tile_size'] = tile_size
# Store the same config for the altered operator (workload)
new_data = data
new_weight = tvm.placeholder((KH + tile_size - 1, KH + tile_size -1, CO // VC, CI, VC),
kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation,
new_attrs[data_layout_key], out_dtype, tile_size],
conv2d_winograd_without_weight_transform)
dispatch_ctx.update(target, new_workload, cfg)
return F.nn.contrib_conv2d_winograd_without_weight_transform(*copy_inputs, **new_attrs)
elif cfg.template_key in ["winograd_nnpack_fp16", "winograd_nnpack_fp32"]:
# pre-compute winograd_nnpack transform
# for winograd_nnpack_fp16, the the precomputeprune pass must run on device,
# where float16 is supported
weight_dtype = 'float32'
transformed_kernel = F.nn.contrib_conv2d_winograd_nnpack_weight_transform(
copy_inputs[1],
convolution_algorithm=cfg['winograd_nnpack_algorithm'].val,
out_dtype=weight_dtype)
copy_inputs[1] = transformed_kernel
new_data = data
new_kernel = tvm.placeholder((CO, CI, 8, 8), "float32")
bias = tvm.placeholder((CO, ), "float32")
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, bias, strides,
padding, dilation, new_attrs[data_layout_key], out_dtype]
if len(copy_inputs) == 3 else
[new_data, new_kernel, strides,
padding, dilation, new_attrs[data_layout_key], out_dtype],
conv2d_winograd_nnpack_without_weight_transform)
dispatch_ctx.update(target, new_workload, cfg)
return F.nn.contrib_conv2d_winograd_nnpack_without_weight_transform(
*copy_inputs, **new_attrs)
else:
raise RuntimeError("Unsupported template_key '%s'" % cfg.template_key)
else:
workload = autotvm.task.args_to_workload(
[data, kernel, strides, padding, dilation, out_dtype], depthwise_conv2d_nchw)
target = tvm.target.current_target()
dispatch_ctx = autotvm.DispatchContext.current
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(tvm.target.current_target(), workload)
return None
if cfg.template_key == 'contrib_spatial_pack':
VC = cfg['tile_co'].size[-1]
new_attrs['kernel_layout'] = 'OIHW%do' % (cfg['tile_co'].size[-1])
# Store the same config for the altered operator (workload)
new_data = data
CO, M, KH, KW = get_const_tuple(kernel.shape)
new_kernel = tvm.placeholder((CO // VC, M, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
depthwise_conv2d_nchw)
dispatch_ctx.update(target, new_workload, cfg)
return F.nn.conv2d(*copy_inputs, **new_attrs)
else:
# currently we only have contrib_spatial_pack and direct template
# add more schedule templates.
return None
@conv2d_legalize.register("arm_cpu")
def _conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Conv2D op.
Parameters
----------
attrs : tvm.attrs.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
if attrs['data_layout'] == 'NHWC':
data, kernel = inputs
if attrs['kernel_layout'] == 'HWIO':
# Handle HWIO layout. This is common in TF graph.
kernel = relay.transpose(kernel, axes=(3, 2, 0, 1))
elif attrs['kernel_layout'] == 'HWOI':
# Handle HWOI layout. This is common in TF depthwise conv2d graph.
kernel = relay.transpose(kernel, axes=(2, 3, 0, 1))
elif attrs['kernel_layout'] != 'OIHW':
return None
logger.warning("Legalize arm_cpu - NHWC schedule absent. Inserting layout transforms to "
+ "fallback to NCHW. This can result in performance degradation.")
# Set new attrs for the tranposed conv.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs['data_layout'] = 'NCHW'
new_attrs['kernel_layout'] = 'OIHW'
# Convert from NHWC to NCHW.
data = relay.transpose(data, axes=(0, 3, 1, 2))
conv = relay.nn.conv2d(data, kernel, **new_attrs)
# Convert back to original NHWC layout.
out = relay.transpose(conv, axes=(0, 2, 3, 1))
return out
return None
| |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Topics."""
import base64
from gcloud._helpers import _datetime_to_rfc3339
from gcloud._helpers import _NOW
from gcloud.exceptions import NotFound
from gcloud.pubsub._helpers import subscription_name_from_path
from gcloud.pubsub._helpers import topic_name_from_path
from gcloud.pubsub.iam import Policy
from gcloud.pubsub.subscription import Subscription
class Topic(object):
"""Topics are targets to which messages can be published.
Subscribers then receive those messages.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.topics
:type name: string
:param name: the name of the topic
:type client: :class:`gcloud.pubsub.client.Client`
:param client: A client which holds credentials and project configuration
for the topic (which requires a project).
:type timestamp_messages: boolean
:param timestamp_messages: If true, the topic will add a ``timestamp`` key
to the attributes of each published message:
the value will be an RFC 3339 timestamp.
"""
def __init__(self, name, client, timestamp_messages=False):
self.name = name
self._client = client
self.timestamp_messages = timestamp_messages
def subscription(self, name, ack_deadline=None, push_endpoint=None):
"""Creates a subscription bound to the current topic.
Example: pull-mode subcription, default paramter values
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_subscription_defaults]
:end-before: [END topic_subscription_defaults]
Example: pull-mode subcription, override ``ack_deadline`` default
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_subscription_ack90]
:end-before: [END topic_subscription_ack90]
Example: push-mode subcription
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_subscription_push]
:end-before: [END topic_subscription_push]
:type name: string
:param name: the name of the subscription
:type ack_deadline: int
:param ack_deadline: the deadline (in seconds) by which messages pulled
from the back-end must be acknowledged.
:type push_endpoint: string
:param push_endpoint: URL to which messages will be pushed by the
back-end. If not set, the application must pull
messages.
:rtype: :class:`Subscription`
:returns: The subscription created with the passed in arguments.
"""
return Subscription(name, self, ack_deadline=ack_deadline,
push_endpoint=push_endpoint)
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a topic given its API representation
:type resource: dict
:param resource: topic resource representation returned from the API
:type client: :class:`gcloud.pubsub.client.Client`
:param client: Client which holds credentials and project
configuration for the topic.
:rtype: :class:`gcloud.pubsub.topic.Topic`
:returns: Topic parsed from ``resource``.
:raises: :class:`ValueError` if ``client`` is not ``None`` and the
project from the resource does not agree with the project
from the client.
"""
topic_name = topic_name_from_path(resource['name'], client.project)
return cls(topic_name, client=client)
@property
def project(self):
"""Project bound to the topic."""
return self._client.project
@property
def full_name(self):
"""Fully-qualified name used in topic / subscription APIs"""
return 'projects/%s/topics/%s' % (self.project, self.name)
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:rtype: :class:`gcloud.pubsub.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
def create(self, client=None):
"""API call: create the topic via a PUT request
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/create
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_create]
:end-before: [END topic_create]
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
"""
client = self._require_client(client)
api = client.publisher_api
api.topic_create(topic_path=self.full_name)
def exists(self, client=None):
"""API call: test for the existence of the topic via a GET request
See
https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/get
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_exists]
:end-before: [END topic_exists]
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:rtype: bool
:returns: Boolean indicating existence of the topic.
"""
client = self._require_client(client)
api = client.publisher_api
try:
api.topic_get(topic_path=self.full_name)
except NotFound:
return False
else:
return True
def delete(self, client=None):
"""API call: delete the topic via a DELETE request
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/delete
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_delete]
:end-before: [END topic_delete]
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
"""
client = self._require_client(client)
api = client.publisher_api
api.topic_delete(topic_path=self.full_name)
def _timestamp_message(self, attrs):
"""Add a timestamp to ``attrs``, if the topic is so configured.
If ``attrs`` already has the key, do nothing.
Helper method for ``publish``/``Batch.publish``.
"""
if self.timestamp_messages and 'timestamp' not in attrs:
attrs['timestamp'] = _datetime_to_rfc3339(_NOW())
def publish(self, message, client=None, **attrs):
"""API call: publish a message to a topic via a POST request
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/publish
Example without message attributes:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_publish_simple_message]
:end-before: [END topic_publish_simple_message]
With message attributes:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_publish_message_with_attrs]
:end-before: [END topic_publish_message_with_attrs]
:type message: bytes
:param message: the message payload
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:type attrs: dict (string -> string)
:param attrs: key-value pairs to send as message attributes
:rtype: str
:returns: message ID assigned by the server to the published message
"""
client = self._require_client(client)
api = client.publisher_api
self._timestamp_message(attrs)
message_b = base64.b64encode(message).decode('ascii')
message_data = {'data': message_b, 'attributes': attrs}
message_ids = api.topic_publish(self.full_name, [message_data])
return message_ids[0]
def batch(self, client=None):
"""Return a batch to use as a context manager.
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_batch]
:end-before: [END topic_batch]
.. note::
The only API request happens during the ``__exit__()`` of the topic
used as a context manager, and only if the block exits without
raising an exception.
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:rtype: :class:`Batch`
:returns: A batch to use as a context manager.
"""
client = self._require_client(client)
return Batch(self, client)
def list_subscriptions(self, page_size=None, page_token=None, client=None):
"""List subscriptions for the project associated with this client.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.topics.subscriptions/list
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_list_subscriptions]
:end-before: [END topic_list_subscriptions]
:type page_size: int
:param page_size: maximum number of topics to return, If not passed,
defaults to a value set by the API.
:type page_token: string
:param page_token: opaque marker for the next "page" of topics. If not
passed, the API will return the first page of
topics.
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:rtype: tuple, (list, str)
:returns: list of :class:`gcloud.pubsub.subscription.Subscription`,
plus a "next page token" string: if not None, indicates that
more topics can be retrieved with another call (pass that
value as ``page_token``).
"""
client = self._require_client(client)
api = client.publisher_api
sub_paths, next_token = api.topic_list_subscriptions(
self.full_name, page_size, page_token)
subscriptions = []
for sub_path in sub_paths:
sub_name = subscription_name_from_path(sub_path, self.project)
subscriptions.append(Subscription(sub_name, self))
return subscriptions, next_token
def get_iam_policy(self, client=None):
"""Fetch the IAM policy for the topic.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/getIamPolicy
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_get_iam_policy]
:end-before: [END topic_get_iam_policy]
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current batch.
:rtype: :class:`gcloud.pubsub.iam.Policy`
:returns: policy created from the resource returned by the
``getIamPolicy`` API request.
"""
client = self._require_client(client)
api = client.iam_policy_api
resp = api.get_iam_policy(self.full_name)
return Policy.from_api_repr(resp)
def set_iam_policy(self, policy, client=None):
"""Update the IAM policy for the topic.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/setIamPolicy
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_set_iam_policy]
:end-before: [END topic_set_iam_policy]
:type policy: :class:`gcloud.pubsub.iam.Policy`
:param policy: the new policy, typically fetched via
:meth:`get_iam_policy` and updated in place.
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current batch.
:rtype: :class:`gcloud.pubsub.iam.Policy`
:returns: updated policy created from the resource returned by the
``setIamPolicy`` API request.
"""
client = self._require_client(client)
api = client.iam_policy_api
resource = policy.to_api_repr()
resp = api.set_iam_policy(self.full_name, resource)
return Policy.from_api_repr(resp)
def check_iam_permissions(self, permissions, client=None):
"""Verify permissions allowed for the current user.
See:
https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/testIamPermissions
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START topic_check_iam_permissions]
:end-before: [END topic_check_iam_permissions]
:type permissions: list of string
:param permissions: list of permissions to be tested
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current batch.
:rtype: sequence of string
:returns: subset of ``permissions`` allowed by current IAM policy.
"""
client = self._require_client(client)
api = client.iam_policy_api
return api.test_iam_permissions(
self.full_name, list(permissions))
class Batch(object):
"""Context manager: collect messages to publish via a single API call.
Helper returned by :meth:Topic.batch
:type topic: :class:`gcloud.pubsub.topic.Topic`
:param topic: the topic being published
:type client: :class:`gcloud.pubsub.client.Client`
:param client: The client to use.
"""
def __init__(self, topic, client):
self.topic = topic
self.messages = []
self.message_ids = []
self.client = client
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.commit()
def __iter__(self):
return iter(self.message_ids)
def publish(self, message, **attrs):
"""Emulate publishing a message, but save it.
:type message: bytes
:param message: the message payload
:type attrs: dict (string -> string)
:param attrs: key-value pairs to send as message attributes
"""
self.topic._timestamp_message(attrs)
self.messages.append(
{'data': base64.b64encode(message).decode('ascii'),
'attributes': attrs})
def commit(self, client=None):
"""Send saved messages as a single API call.
:type client: :class:`gcloud.pubsub.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current batch.
"""
if not self.messages:
return
if client is None:
client = self.client
api = client.publisher_api
message_ids = api.topic_publish(self.topic.full_name, self.messages[:])
self.message_ids.extend(message_ids)
del self.messages[:]
| |
#!/usr/bin/env python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This tool creates an html visualization of a TensorFlow Lite graph.
Example usage:
python visualize.py foo.tflite foo.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from tensorflow.python.platform import resource_loader
# Schema to use for flatbuffers
_SCHEMA = "third_party/tensorflow/contrib/lite/schema/schema.fbs"
# TODO(angerson): fix later when rules are simplified..
_SCHEMA = resource_loader.get_path_to_datafile("../schema/schema.fbs")
_BINARY = resource_loader.get_path_to_datafile("../../../../flatbuffers/flatc")
# Account for different package positioning internal vs. external.
if not os.path.exists(_BINARY):
_BINARY = resource_loader.get_path_to_datafile(
"../../../../../flatbuffers/flatc")
if not os.path.exists(_SCHEMA):
raise RuntimeError("Sorry, schema file cannot be found at %r" % _SCHEMA)
if not os.path.exists(_BINARY):
raise RuntimeError("Sorry, flatc is not available at %r" % _BINARY)
# A CSS description for making the visualizer
_CSS = """
<html>
<head>
<style>
body {font-family: sans-serif; background-color: #ffaa00;}
table {background-color: #eeccaa;}
th {background-color: black; color: white;}
h1 {
background-color: ffaa00;
padding:5px;
color: black;
}
div {
border-radius: 5px;
background-color: #ffeecc;
padding:5px;
margin:5px;
}
.tooltip {color: blue;}
.tooltip .tooltipcontent {
visibility: hidden;
color: black;
background-color: yellow;
padding: 5px;
border-radius: 4px;
position: absolute;
z-index: 1;
}
.tooltip:hover .tooltipcontent {
visibility: visible;
}
.edges line {
stroke: #333333;
}
.nodes text {
color: black;
pointer-events: none;
font-family: sans-serif;
font-size: 11px;
}
</style>
<script src="https://d3js.org/d3.v4.min.js"></script>
</head>
<body>
"""
_D3_HTML_TEMPLATE = """
<script>
// Build graph data
var graph = %s;
var svg = d3.select("#subgraph%d");
var width = svg.attr("width");
var height = svg.attr("height");
var color = d3.scaleOrdinal(d3.schemeCategory20);
var simulation = d3.forceSimulation()
.force("link", d3.forceLink().id(function(d) {return d.id;}))
.force("charge", d3.forceManyBody())
.force("center", d3.forceCenter(0.5 * width, 0.5 * height));
function buildGraph() {
var edge = svg.append("g").attr("class", "edges").selectAll("line")
.data(graph.edges).enter().append("line")
// Make the node group
var node = svg.selectAll(".nodes")
.data(graph.nodes)
.enter().append("g")
.attr("class", "nodes")
.call(d3.drag()
.on("start", function(d) {
if(!d3.event.active) simulation.alphaTarget(1.0).restart();
d.fx = d.x;d.fy = d.y;
})
.on("drag", function(d) {
d.fx = d3.event.x; d.fy = d3.event.y;
})
.on("end", function(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = d.fy = null;
}));
// Within the group, draw a circle for the node position and text
// on the side.
node.append("circle")
.attr("r", "5px")
.attr("fill", function(d) { return color(d.group); })
node.append("text")
.attr("dx", 8).attr("dy", 5).text(function(d) { return d.name; });
// Setup force parameters and update position callback
simulation.nodes(graph.nodes).on("tick", forceSimulationUpdated);
simulation.force("link").links(graph.edges);
function forceSimulationUpdated() {
// Update edges.
edge.attr("x1", function(d) {return d.source.x;})
.attr("y1", function(d) {return d.source.y;})
.attr("x2", function(d) {return d.target.x;})
.attr("y2", function(d) {return d.target.y;});
// Update node positions
node.attr("transform", function(d) { return "translate(" + d.x + "," + d.y + ")"; });
}
}
buildGraph()
</script>
"""
class OpCodeMapper(object):
"""Maps an opcode index to an op name."""
def __init__(self, data):
self.code_to_name = {}
for idx, d in enumerate(data["operator_codes"]):
self.code_to_name[idx] = d["builtin_code"]
def __call__(self, x):
if x not in self.code_to_name:
s = "<UNKNOWN>"
else:
s = self.code_to_name[x]
return "%s (opcode=%d)" % (s, x)
class DataSizeMapper(object):
"""For buffers, report the number of bytes."""
def __call__(self, x):
if x is not None:
return "%d bytes" % len(x)
else:
return "--"
class TensorMapper(object):
"""Maps a list of tensor indices to a tooltip hoverable indicator of more."""
def __init__(self, subgraph_data):
self.data = subgraph_data
def __call__(self, x):
html = ""
html += "<span class='tooltip'><span class='tooltipcontent'>"
for i in x:
tensor = self.data["tensors"][i]
html += str(i) + " "
html += tensor["name"] + " "
html += str(tensor["type"]) + " "
html += repr(tensor["shape"]) + "<br>"
html += "</span>"
html += repr(x)
html += "</span>"
return html
def GenerateGraph(subgraph_idx, g, opcode_mapper):
"""Produces the HTML required to have a d3 visualization of the dag."""
def TensorName(idx):
return "t%d" % idx
def OpName(idx):
return "o%d" % idx
edges = []
nodes = []
first = {}
pixel_mult = 50 # TODO(aselle): multiplier for initial placement
for op_index, op in enumerate(g["operators"]):
for tensor_input_position, tensor_index in enumerate(op["inputs"]):
if tensor_index not in first:
first[tensor_index] = (
op_index * pixel_mult,
tensor_input_position * pixel_mult - pixel_mult / 2)
edges.append({
"source": TensorName(tensor_index),
"target": OpName(op_index)
})
for tensor_index in op["outputs"]:
edges.append({
"target": TensorName(tensor_index),
"source": OpName(op_index)
})
nodes.append({
"id": OpName(op_index),
"name": opcode_mapper(op["opcode_index"]),
"group": 2,
"x": pixel_mult,
"y": op_index * pixel_mult
})
for tensor_index, tensor in enumerate(g["tensors"]):
initial_y = (
first[tensor_index] if tensor_index in first else len(g["operators"]))
nodes.append({
"id": TensorName(tensor_index),
"name": "%s (%d)" % (tensor["name"], tensor_index),
"group": 1,
"x": 2,
"y": initial_y
})
graph_str = json.dumps({"nodes": nodes, "edges": edges})
html = _D3_HTML_TEMPLATE % (graph_str, subgraph_idx)
return html
def GenerateTableHtml(items, keys_to_print, display_index=True):
"""Given a list of object values and keys to print, make an HTML table.
Args:
items: Items to print an array of dicts.
keys_to_print: (key, display_fn). `key` is a key in the object. i.e.
items[0][key] should exist. display_fn is the mapping function on display.
i.e. the displayed html cell will have the string returned by
`mapping_fn(items[0][key])`.
display_index: add a column which is the index of each row in `items`.
Returns:
An html table.
"""
html = ""
# Print the list of items
html += "<table><tr>\n"
html += "<tr>\n"
if display_index:
html += "<th>index</th>"
for h, mapper in keys_to_print:
html += "<th>%s</th>" % h
html += "</tr>\n"
for idx, tensor in enumerate(items):
html += "<tr>\n"
if display_index:
html += "<td>%d</td>" % idx
# print tensor.keys()
for h, mapper in keys_to_print:
val = tensor[h] if h in tensor else None
val = val if mapper is None else mapper(val)
html += "<td>%s</td>\n" % val
html += "</tr>\n"
html += "</table>\n"
return html
def CreateHtmlFile(tflite_input, html_output):
"""Given a tflite model in `tflite_input` file, produce html description."""
# Convert the model into a JSON flatbuffer using flatc (build if doesn't
# exist.
if not os.path.exists(tflite_input):
raise RuntimeError("Invalid filename %r" % tflite_input)
if tflite_input.endswith(".tflite") or tflite_input.endswith(".bin"):
# Run convert
cmd = (
_BINARY + " -t "
"--strict-json --defaults-json -o /tmp {schema} -- {input}".format(
input=tflite_input, schema=_SCHEMA))
print(cmd)
os.system(cmd)
real_output = ("/tmp/" + os.path.splitext(
os.path.split(tflite_input)[-1])[0] + ".json")
data = json.load(open(real_output))
elif tflite_input.endswith(".json"):
data = json.load(open(tflite_input))
else:
raise RuntimeError("Input file was not .tflite or .json")
html = ""
html += _CSS
html += "<h1>TensorFlow Lite Model</h2>"
data["filename"] = tflite_input # Avoid special case
toplevel_stuff = [("filename", None), ("version", None), ("description",
None)]
html += "<table>\n"
for key, mapping in toplevel_stuff:
if not mapping:
mapping = lambda x: x
html += "<tr><th>%s</th><td>%s</td></tr>\n" % (key, mapping(data[key]))
html += "</table>\n"
# Spec on what keys to display
buffer_keys_to_display = [("data", DataSizeMapper())]
operator_keys_to_display = [("builtin_code", None)]
for subgraph_idx, g in enumerate(data["subgraphs"]):
# Subgraph local specs on what to display
html += "<div class='subgraph'>"
tensor_mapper = TensorMapper(g)
opcode_mapper = OpCodeMapper(data)
op_keys_to_display = [("inputs", tensor_mapper), ("outputs", tensor_mapper),
("builtin_options", None), ("opcode_index",
opcode_mapper)]
tensor_keys_to_display = [("name", None), ("type", None), ("shape", None),
("buffer", None), ("quantization", None)]
html += "<h2>Subgraph %d</h2>\n" % subgraph_idx
# Inputs and outputs.
html += "<h3>Inputs/Outputs</h3>\n"
html += GenerateTableHtml(
[{
"inputs": g["inputs"],
"outputs": g["outputs"]
}], [("inputs", tensor_mapper), ("outputs", tensor_mapper)],
display_index=False)
# Print the tensors.
html += "<h3>Tensors</h3>\n"
html += GenerateTableHtml(g["tensors"], tensor_keys_to_display)
# Print the ops.
html += "<h3>Ops</h3>\n"
html += GenerateTableHtml(g["operators"], op_keys_to_display)
# Visual graph.
html += "<svg id='subgraph%d' width='960' height='1600'></svg>\n" % (
subgraph_idx,)
html += GenerateGraph(subgraph_idx, g, opcode_mapper)
html += "</div>"
# Buffers have no data, but maybe in the future they will
html += "<h2>Buffers</h2>\n"
html += GenerateTableHtml(data["buffers"], buffer_keys_to_display)
# Operator codes
html += "<h2>Operator Codes</h2>\n"
html += GenerateTableHtml(data["operator_codes"], operator_keys_to_display)
html += "</body></html>\n"
open(html_output, "w").write(html)
def main(argv):
try:
tflite_input = argv[1]
html_output = argv[2]
except IndexError:
print("Usage: %s <input tflite> <output html>" % (argv[0]))
else:
CreateHtmlFile(tflite_input, html_output)
if __name__ == "__main__":
main(sys.argv)
| |
#!/usr/bin/python
from Raspi_PWM_Servo_Driver import PWM
import time
class Raspi_StepperMotor:
MICROSTEPS = 8
MICROSTEP_CURVE = [0, 50, 98, 142, 180, 212, 236, 250, 255]
#MICROSTEPS = 16
# a sinusoidal curve NOT LINEAR!
#MICROSTEP_CURVE = [0, 25, 50, 74, 98, 120, 141, 162, 180, 197, 212, 225, 236, 244, 250, 253, 255]
def __init__(self, controller, num, steps=200):
self.MC = controller
self.revsteps = steps
self.motornum = num
self.sec_per_step = 0.1
self.steppingcounter = 0
self.currentstep = 0
num -= 1
if (num == 0):
self.PWMA = 8
self.AIN2 = 9
self.AIN1 = 10
self.PWMB = 13
self.BIN2 = 12
self.BIN1 = 11
elif (num == 1):
self.PWMA = 2
self.AIN2 = 3
self.AIN1 = 4
self.PWMB = 7
self.BIN2 = 6
self.BIN1 = 5
else:
raise NameError('MotorHAT Stepper must be between 1 and 2 inclusive')
def setSpeed(self, rpm):
self.sec_per_step = 60.0 / (self.revsteps * rpm)
self.steppingcounter = 0
def oneStep(self, dir, style):
pwm_a = pwm_b = 255
# first determine what sort of stepping procedure we're up to
if (style == Raspi_MotorHAT.SINGLE):
if ((self.currentstep/(self.MICROSTEPS/2)) % 2):
# we're at an odd step, weird
if (dir == Raspi_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS/2
else:
self.currentstep -= self.MICROSTEPS/2
else:
# go to next even step
if (dir == Raspi_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS
else:
self.currentstep -= self.MICROSTEPS
if (style == Raspi_MotorHAT.DOUBLE):
if not (self.currentstep/(self.MICROSTEPS/2) % 2):
# we're at an even step, weird
if (dir == Raspi_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS/2
else:
self.currentstep -= self.MICROSTEPS/2
else:
# go to next odd step
if (dir == Raspi_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS
else:
self.currentstep -= self.MICROSTEPS
if (style == Raspi_MotorHAT.INTERLEAVE):
if (dir == Raspi_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS/2
else:
self.currentstep -= self.MICROSTEPS/2
if (style == Raspi_MotorHAT.MICROSTEP):
if (dir == Raspi_MotorHAT.FORWARD):
self.currentstep += 1
else:
self.currentstep -= 1
# go to next 'step' and wrap around
self.currentstep += self.MICROSTEPS * 4
self.currentstep %= self.MICROSTEPS * 4
pwm_a = pwm_b = 0
if (self.currentstep >= 0) and (self.currentstep < self.MICROSTEPS):
pwm_a = self.MICROSTEP_CURVE[self.MICROSTEPS - self.currentstep]
pwm_b = self.MICROSTEP_CURVE[self.currentstep]
elif (self.currentstep >= self.MICROSTEPS) and (self.currentstep < self.MICROSTEPS*2):
pwm_a = self.MICROSTEP_CURVE[self.currentstep - self.MICROSTEPS]
pwm_b = self.MICROSTEP_CURVE[self.MICROSTEPS*2 - self.currentstep]
elif (self.currentstep >= self.MICROSTEPS*2) and (self.currentstep < self.MICROSTEPS*3):
pwm_a = self.MICROSTEP_CURVE[self.MICROSTEPS*3 - self.currentstep]
pwm_b = self.MICROSTEP_CURVE[self.currentstep - self.MICROSTEPS*2]
elif (self.currentstep >= self.MICROSTEPS*3) and (self.currentstep < self.MICROSTEPS*4):
pwm_a = self.MICROSTEP_CURVE[self.currentstep - self.MICROSTEPS*3]
pwm_b = self.MICROSTEP_CURVE[self.MICROSTEPS*4 - self.currentstep]
# go to next 'step' and wrap around
self.currentstep += self.MICROSTEPS * 4
self.currentstep %= self.MICROSTEPS * 4
# only really used for microstepping, otherwise always on!
self.MC._pwm.setPWM(self.PWMA, 0, pwm_a*16)
self.MC._pwm.setPWM(self.PWMB, 0, pwm_b*16)
# set up coil energizing!
coils = [0, 0, 0, 0]
if (style == Raspi_MotorHAT.MICROSTEP):
if (self.currentstep >= 0) and (self.currentstep < self.MICROSTEPS):
coils = [1, 1, 0, 0]
elif (self.currentstep >= self.MICROSTEPS) and (self.currentstep < self.MICROSTEPS*2):
coils = [0, 1, 1, 0]
elif (self.currentstep >= self.MICROSTEPS*2) and (self.currentstep < self.MICROSTEPS*3):
coils = [0, 0, 1, 1]
elif (self.currentstep >= self.MICROSTEPS*3) and (self.currentstep < self.MICROSTEPS*4):
coils = [1, 0, 0, 1]
else:
step2coils = [ [1, 0, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 1],
[1, 0, 0, 1] ]
coils = step2coils[self.currentstep/(self.MICROSTEPS/2)]
#print "coils state = " + str(coils)
self.MC.setPin(self.AIN2, coils[0])
self.MC.setPin(self.BIN1, coils[1])
self.MC.setPin(self.AIN1, coils[2])
self.MC.setPin(self.BIN2, coils[3])
return self.currentstep
def step(self, steps, direction, stepstyle):
s_per_s = self.sec_per_step
lateststep = 0
if (stepstyle == Raspi_MotorHAT.INTERLEAVE):
s_per_s = s_per_s / 2.0
if (stepstyle == Raspi_MotorHAT.MICROSTEP):
s_per_s /= self.MICROSTEPS
steps *= self.MICROSTEPS
print s_per_s, " sec per step"
for s in range(steps):
lateststep = self.oneStep(direction, stepstyle)
time.sleep(s_per_s)
if (stepstyle == Raspi_MotorHAT.MICROSTEP):
# this is an edge case, if we are in between full steps, lets just keep going
# so we end on a full step
while (lateststep != 0) and (lateststep != self.MICROSTEPS):
lateststep = self.oneStep(dir, stepstyle)
time.sleep(s_per_s)
class Raspi_DCMotor:
def __init__(self, controller, num):
self.MC = controller
self.motornum = num
pwm = in1 = in2 = 0
if (num == 0):
pwm = 8
in2 = 9
in1 = 10
elif (num == 1):
pwm = 13
in2 = 12
in1 = 11
elif (num == 2):
pwm = 2
in2 = 3
in1 = 4
elif (num == 3):
pwm = 7
in2 = 6
in1 = 5
else:
raise NameError('MotorHAT Motor must be between 1 and 4 inclusive')
self.PWMpin = pwm
self.IN1pin = in1
self.IN2pin = in2
def run(self, command):
if not self.MC:
return
if (command == Raspi_MotorHAT.FORWARD):
self.MC.setPin(self.IN2pin, 0)
self.MC.setPin(self.IN1pin, 1)
if (command == Raspi_MotorHAT.BACKWARD):
self.MC.setPin(self.IN1pin, 0)
self.MC.setPin(self.IN2pin, 1)
if (command == Raspi_MotorHAT.RELEASE):
self.MC.setPin(self.IN1pin, 0)
self.MC.setPin(self.IN2pin, 0)
def setSpeed(self, speed):
if (speed < 0):
speed = 0
if (speed > 255):
speed = 255
self.MC._pwm.setPWM(self.PWMpin, 0, speed*16)
class Raspi_MotorHAT:
FORWARD = 1
BACKWARD = 2
BRAKE = 3
RELEASE = 4
SINGLE = 1
DOUBLE = 2
INTERLEAVE = 3
MICROSTEP = 4
def __init__(self, addr = 0x60, freq = 1600):
self._i2caddr = addr # default addr on HAT
self._frequency = freq # default @1600Hz PWM freq
self.motors = [ Raspi_DCMotor(self, m) for m in range(4) ]
self.steppers = [ Raspi_StepperMotor(self, 1), Raspi_StepperMotor(self, 2) ]
self._pwm = PWM(addr, debug=False)
self._pwm.setPWMFreq(self._frequency)
def setPin(self, pin, value):
if (pin < 0) or (pin > 15):
raise NameError('PWM pin must be between 0 and 15 inclusive')
if (value != 0) and (value != 1):
raise NameError('Pin value must be 0 or 1!')
if (value == 0):
self._pwm.setPWM(pin, 0, 4096)
if (value == 1):
self._pwm.setPWM(pin, 4096, 0)
def getStepper(self, steps, num):
if (num < 1) or (num > 2):
raise NameError('MotorHAT Stepper must be between 1 and 2 inclusive')
return self.steppers[num-1]
def getMotor(self, num):
if (num < 1) or (num > 4):
raise NameError('MotorHAT Motor must be between 1 and 4 inclusive')
return self.motors[num-1]
| |
# Copyright 2012 Nebula, Inc.
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing volumes.
"""
import re
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import ValidationError # noqa
from django import http
from django.template.defaultfilters import filesizeformat # noqa
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.api import glance
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.admin.images import utils
from openstack_dashboard.dashboards.admin.instances import tables
from openstack_dashboard.usage import quotas
IMAGE_BACKEND_SETTINGS = getattr(settings, 'OPENSTACK_IMAGE_BACKEND', {})
IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS.get('image_formats', [])
VALID_DISK_FORMATS = ('raw', 'vmdk', 'vdi', 'qcow2')
DEFAULT_CONTAINER_FORMAT = 'bare'
# Determine whether the extension for Cinder AZs is enabled
def cinder_az_supported(request):
try:
return cinder.extension_supported(request, 'AvailabilityZones')
except Exception:
exceptions.handle(request, _('Unable to determine if availability '
'zones extension is supported.'))
return False
def availability_zones(request):
zone_list = []
if cinder_az_supported(request):
try:
zones = api.cinder.availability_zone_list(request)
zone_list = [(zone.zoneName, zone.zoneName)
for zone in zones if zone.zoneState['available']]
zone_list.sort()
except Exception:
exceptions.handle(request, _('Unable to retrieve availability '
'zones.'))
if not zone_list:
zone_list.insert(0, ("", _("No availability zones found")))
elif len(zone_list) > 1:
zone_list.insert(0, ("", _("Any Availability Zone")))
return zone_list
class CreateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Volume Name"),
required=False)
description = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'rows': 4}),
label=_("Description"), required=False)
volume_source_type = forms.ChoiceField(
label=_("Volume Source"),
required=False,
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'data-slug': 'source'}))
snapshot_source = forms.ChoiceField(
label=_("Use snapshot as a source"),
widget=forms.ThemableSelectWidget(
attrs={'class': 'snapshot-selector'},
data_attrs=('size', 'name'),
transform=lambda x: "%s (%s GiB)" % (x.name, x.size)),
required=False)
image_source = forms.ChoiceField(
label=_("Use image as a source"),
widget=forms.ThemableSelectWidget(
attrs={'class': 'image-selector'},
data_attrs=('size', 'name', 'min_disk'),
transform=lambda x: "%s (%s)" % (x.name, filesizeformat(x.bytes))),
required=False)
volume_source = forms.ChoiceField(
label=_("Use a volume as source"),
widget=forms.ThemableSelectWidget(
attrs={'class': 'image-selector'},
data_attrs=('size', 'name'),
transform=lambda x: "%s (%s GiB)" % (x.name, x.size)),
required=False)
type = forms.ChoiceField(
label=_("Type"),
required=False,
widget=forms.ThemableSelectWidget(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-no_source_type': _('Type'),
'data-source-image_source': _('Type')}))
size = forms.IntegerField(min_value=1, initial=1, label=_("Size (GiB)"))
availability_zone = forms.ChoiceField(
label=_("Availability Zone"),
required=False,
widget=forms.ThemableSelectWidget(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-no_source_type': _('Availability Zone'),
'data-source-image_source': _('Availability Zone')}))
def prepare_source_fields_if_snapshot_specified(self, request):
try:
snapshot = self.get_snapshot(request,
request.GET["snapshot_id"])
self.fields['name'].initial = snapshot.name
self.fields['size'].initial = snapshot.size
self.fields['snapshot_source'].choices = ((snapshot.id,
snapshot),)
try:
# Set the volume type from the original volume
orig_volume = cinder.volume_get(request,
snapshot.volume_id)
self.fields['type'].initial = orig_volume.volume_type
except Exception:
pass
self.fields['size'].help_text = (
_('Volume size must be equal to or greater than the '
'snapshot size (%sGiB)') % snapshot.size)
del self.fields['image_source']
del self.fields['volume_source']
del self.fields['volume_source_type']
del self.fields['availability_zone']
except Exception:
exceptions.handle(request,
_('Unable to load the specified snapshot.'))
def prepare_source_fields_if_image_specified(self, request):
self.fields['availability_zone'].choices = \
availability_zones(request)
try:
image = self.get_image(request,
request.GET["image_id"])
image.bytes = image.size
self.fields['name'].initial = image.name
min_vol_size = functions.bytes_to_gigabytes(
image.size)
size_help_text = (_('Volume size must be equal to or greater '
'than the image size (%s)')
% filesizeformat(image.size))
properties = getattr(image, 'properties', {})
min_disk_size = (getattr(image, 'min_disk', 0) or
properties.get('min_disk', 0))
if (min_disk_size > min_vol_size):
min_vol_size = min_disk_size
size_help_text = (_('Volume size must be equal to or '
'greater than the image minimum '
'disk size (%sGiB)')
% min_disk_size)
self.fields['size'].initial = min_vol_size
self.fields['size'].help_text = size_help_text
self.fields['image_source'].choices = ((image.id, image),)
del self.fields['snapshot_source']
del self.fields['volume_source']
del self.fields['volume_source_type']
except Exception:
msg = _('Unable to load the specified image. %s')
exceptions.handle(request, msg % request.GET['image_id'])
def prepare_source_fields_if_volume_specified(self, request):
self.fields['availability_zone'].choices = \
availability_zones(request)
volume = None
try:
volume = self.get_volume(request, request.GET["volume_id"])
except Exception:
msg = _('Unable to load the specified volume. %s')
exceptions.handle(request, msg % request.GET['volume_id'])
if volume is not None:
self.fields['name'].initial = volume.name
self.fields['description'].initial = volume.description
min_vol_size = volume.size
size_help_text = (_('Volume size must be equal to or greater '
'than the origin volume size (%sGiB)')
% volume.size)
self.fields['size'].initial = min_vol_size
self.fields['size'].help_text = size_help_text
self.fields['volume_source'].choices = ((volume.id, volume),)
self.fields['type'].initial = volume.type
del self.fields['snapshot_source']
del self.fields['image_source']
del self.fields['volume_source_type']
def prepare_source_fields_default(self, request):
source_type_choices = []
self.fields['availability_zone'].choices = \
availability_zones(request)
try:
available = api.cinder.VOLUME_STATE_AVAILABLE
snapshots = cinder.volume_snapshot_list(
request, search_opts=dict(status=available))
if snapshots:
source_type_choices.append(("snapshot_source",
_("Snapshot")))
choices = [('', _("Choose a snapshot"))] + \
[(s.id, s) for s in snapshots]
self.fields['snapshot_source'].choices = choices
else:
del self.fields['snapshot_source']
except Exception:
exceptions.handle(request,
_("Unable to retrieve volume snapshots."))
images = utils.get_available_images(request,
request.user.tenant_id)
if images:
source_type_choices.append(("image_source", _("Image")))
choices = [('', _("Choose an image"))]
for image in images:
image.bytes = image.size
image.size = functions.bytes_to_gigabytes(image.bytes)
choices.append((image.id, image))
self.fields['image_source'].choices = choices
else:
del self.fields['image_source']
volumes = self.get_volumes(request)
if volumes:
source_type_choices.append(("volume_source", _("Volume")))
choices = [('', _("Choose a volume"))]
for volume in volumes:
choices.append((volume.id, volume))
self.fields['volume_source'].choices = choices
else:
del self.fields['volume_source']
if source_type_choices:
choices = ([('no_source_type',
_("No source, empty volume"))] +
source_type_choices)
self.fields['volume_source_type'].choices = choices
else:
del self.fields['volume_source_type']
def __init__(self, request, *args, **kwargs):
super(CreateForm, self).__init__(request, *args, **kwargs)
volume_types = cinder.volume_type_list(request)
self.fields['type'].choices = [("", _("No volume type"))] + \
[(type.name, type.name)
for type in volume_types]
if 'initial' in kwargs and 'type' in kwargs['initial']:
# if there is a default volume type to select, then remove
# the first ""No volume type" entry
self.fields['type'].choices.pop(0)
if "snapshot_id" in request.GET:
self.prepare_source_fields_if_snapshot_specified(request)
elif 'image_id' in request.GET:
self.prepare_source_fields_if_image_specified(request)
elif 'volume_id' in request.GET:
self.prepare_source_fields_if_volume_specified(request)
else:
self.prepare_source_fields_default(request)
def clean(self):
cleaned_data = super(CreateForm, self).clean()
source_type = self.cleaned_data.get('volume_source_type')
if (source_type == 'image_source' and
not cleaned_data.get('image_source')):
msg = _('Image source must be specified')
self._errors['image_source'] = self.error_class([msg])
elif (source_type == 'snapshot_source' and
not cleaned_data.get('snapshot_source')):
msg = _('Snapshot source must be specified')
self._errors['snapshot_source'] = self.error_class([msg])
elif (source_type == 'volume_source' and
not cleaned_data.get('volume_source')):
msg = _('Volume source must be specified')
self._errors['volume_source'] = self.error_class([msg])
return cleaned_data
def get_volumes(self, request):
volumes = []
try:
available = api.cinder.VOLUME_STATE_AVAILABLE
volumes = cinder.volume_list(self.request,
search_opts=dict(status=available))
except Exception:
exceptions.handle(request,
_('Unable to retrieve list of volumes.'))
return volumes
def handle(self, request, data):
try:
usages = quotas.tenant_limit_usages(self.request)
availableGB = usages['maxTotalVolumeGigabytes'] - \
usages['gigabytesUsed']
availableVol = usages['maxTotalVolumes'] - usages['volumesUsed']
snapshot_id = None
image_id = None
volume_id = None
source_type = data.get('volume_source_type', None)
az = data.get('availability_zone', None) or None
if (data.get("snapshot_source", None) and
source_type in ['', None, 'snapshot_source']):
# Create from Snapshot
snapshot = self.get_snapshot(request,
data["snapshot_source"])
snapshot_id = snapshot.id
if (data['size'] < snapshot.size):
error_message = (_('The volume size cannot be less than '
'the snapshot size (%sGiB)')
% snapshot.size)
raise ValidationError(error_message)
az = None
elif (data.get("image_source", None) and
source_type in ['', None, 'image_source']):
# Create from Snapshot
image = self.get_image(request,
data["image_source"])
image_id = image.id
image_size = functions.bytes_to_gigabytes(image.size)
if (data['size'] < image_size):
error_message = (_('The volume size cannot be less than '
'the image size (%s)')
% filesizeformat(image.size))
raise ValidationError(error_message)
properties = getattr(image, 'properties', {})
min_disk_size = (getattr(image, 'min_disk', 0) or
properties.get('min_disk', 0))
if (min_disk_size > 0 and data['size'] < min_disk_size):
error_message = (_('The volume size cannot be less than '
'the image minimum disk size (%sGiB)')
% min_disk_size)
raise ValidationError(error_message)
elif (data.get("volume_source", None) and
source_type in ['', None, 'volume_source']):
# Create from volume
volume = self.get_volume(request, data["volume_source"])
volume_id = volume.id
if data['size'] < volume.size:
error_message = (_('The volume size cannot be less than '
'the source volume size (%sGiB)')
% volume.size)
raise ValidationError(error_message)
else:
if type(data['size']) is str:
data['size'] = int(data['size'])
if availableGB < data['size']:
error_message = _('A volume of %(req)iGiB cannot be created '
'as you only have %(avail)iGiB of your '
'quota available.')
params = {'req': data['size'],
'avail': availableGB}
raise ValidationError(error_message % params)
elif availableVol <= 0:
error_message = _('You are already using all of your available'
' volumes.')
raise ValidationError(error_message)
metadata = {}
volume = cinder.volume_create(request,
data['size'],
data['name'],
data['description'],
data['type'],
snapshot_id=snapshot_id,
image_id=image_id,
metadata=metadata,
availability_zone=az,
source_volid=volume_id)
message = _('Creating volume "%s"') % data['name']
messages.info(request, message)
return volume
except ValidationError as e:
self.api_error(e.messages[0])
return False
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_("Unable to create volume."),
redirect=redirect)
@memoized
def get_snapshot(self, request, id):
return cinder.volume_snapshot_get(request, id)
@memoized
def get_image(self, request, id):
return glance.image_get(request, id)
@memoized
def get_volume(self, request, id):
return cinder.volume_get(request, id)
class IncreaseForm(forms.SelfHandlingForm):
orig_size = forms.CharField(
label=_("Current Size (GB)"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
new_size = forms.IntegerField(label=_("Add Size (GB)"),
min_value=1,
initial=1)
def clean(self):
cleaned_data = super(IncreaseForm, self).clean()
orig_size = cleaned_data.get('orig_size')
if not orig_size:
msg = _("Fail to get the size of current volume storage, please check the cinder services or exclude other reasons.")
raise forms.ValidationError(msg)
return cleaned_data
def handle(self, request, data):
try:
endpoints = api.base.url_for(request, 'volume')
expression = r'https?://(.+?):.+?'
host = re.match(expression, endpoints).groups()
result= api.device.update_cloud_disk_size(request, host=host, volume_size=data['new_size']).text
res = eval(result)
if res.get("status", 'failed') is "success":
messages.success(request, _("Extending volume group successful!"))
else:
messages.error(request, _("Fail to extend volume group."))
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to extend volume group.'),
redirect=redirect)
return True
class AttachForm(forms.SelfHandlingForm):
instance = forms.ThemableChoiceField(label=_("Attach to Instance"),
help_text=_("Select an instance to "
"attach to."))
device = forms.CharField(label=_("Device Name"),
widget=forms.TextInput(attrs={'placeholder':
'/dev/vdc'}),
required=False,
help_text=_("Actual device name may differ due "
"to hypervisor settings. If not "
"specified, then hypervisor will "
"select a device name."))
def __init__(self, *args, **kwargs):
super(AttachForm, self).__init__(*args, **kwargs)
# Hide the device field if the hypervisor doesn't support it.
if not nova.can_set_mount_point():
self.fields['device'].widget = forms.widgets.HiddenInput()
# populate volume_id
volume = kwargs.get('initial', {}).get("volume", None)
if volume:
volume_id = volume.id
else:
volume_id = None
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
# Populate instance choices
instance_list = kwargs.get('initial', {}).get('instances', [])
instances = []
for instance in instance_list:
if instance.status in tables.VOLUME_ATTACH_READY_STATES and \
not any(instance.id == att["server_id"]
for att in volume.attachments):
instances.append((instance.id, '%s (%s)' % (instance.name,
instance.id)))
if instances:
instances.insert(0, ("", _("Select an instance")))
else:
instances = (("", _("No instances available")),)
self.fields['instance'].choices = instances
def handle(self, request, data):
instance_choices = dict(self.fields['instance'].choices)
instance_name = instance_choices.get(data['instance'],
_("Unknown instance (None)"))
# The name of the instance in the choices list has the ID appended to
# it, so let's slice that off...
instance_name = instance_name.rsplit(" (")[0]
# api requires non-empty device name or None
device = data.get('device') or None
try:
attach = api.nova.instance_volume_attach(request,
data['volume_id'],
data['instance'],
device)
volume = cinder.volume_get(request, data['volume_id'])
message = _('Attaching volume %(vol)s to instance '
'%(inst)s on %(dev)s.') % {"vol": volume.name,
"inst": instance_name,
"dev": attach.device}
messages.info(request, message)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to attach volume.'),
redirect=redirect)
class CreateSnapshotForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Snapshot Name"))
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateSnapshotForm, self).__init__(request, *args, **kwargs)
# populate volume_id
volume_id = kwargs.get('initial', {}).get('volume_id', [])
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
def handle(self, request, data):
try:
volume = cinder.volume_get(request,
data['volume_id'])
force = False
message = _('Creating volume snapshot "%s".') % data['name']
if volume.status == 'in-use':
force = True
message = _('Forcing to create snapshot "%s" '
'from attached volume.') % data['name']
snapshot = cinder.volume_snapshot_create(request,
data['volume_id'],
data['name'],
data['description'],
force=force)
messages.info(request, message)
return snapshot
except Exception as e:
redirect = reverse("horizon:admin:volumes:index")
msg = _('Unable to create volume snapshot.')
if e.code == 413:
msg = _('Requested snapshot would exceed the allowed quota.')
exceptions.handle(request,
msg,
redirect=redirect)
class CreateTransferForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Transfer Name"))
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if cleaned_name.isspace():
msg = _('Volume transfer name cannot be empty.')
self._errors['name'] = self.error_class([msg])
return cleaned_name
def handle(self, request, data):
try:
volume_id = self.initial['volume_id']
transfer = cinder.transfer_create(request, volume_id, data['name'])
msg = _('Created volume transfer: "%s".') % data['name']
messages.success(request, msg)
response = http.HttpResponseRedirect(
reverse("horizon:admin:volumes:volumes:show_transfer",
args=(transfer.id, transfer.auth_key)))
return response
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _('Unable to create volume transfer.'),
redirect=redirect)
class AcceptTransferForm(forms.SelfHandlingForm):
# These max lengths correspond to the sizes in cinder
transfer_id = forms.CharField(max_length=36, label=_("Transfer ID"))
auth_key = forms.CharField(max_length=16, label=_("Authorization Key"))
def handle(self, request, data):
try:
transfer = cinder.transfer_accept(request,
data['transfer_id'],
data['auth_key'])
msg = (_('Successfully accepted volume transfer: "%s"')
% data['transfer_id'])
messages.success(request, msg)
return transfer
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _('Unable to accept volume transfer.'),
redirect=redirect)
class ShowTransferForm(forms.SelfHandlingForm):
name = forms.CharField(
label=_("Transfer Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
id = forms.CharField(
label=_("Transfer ID"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
auth_key = forms.CharField(
label=_("Authorization Key"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
def handle(self, request, data):
pass
class UpdateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Volume Name"),
required=False)
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
bootable = forms.BooleanField(label=_("Bootable"),
required=False,
help_text=_("Specifies that the volume can "
"be used to launch an instance"))
def handle(self, request, data):
volume_id = self.initial['volume_id']
try:
cinder.volume_update(request, volume_id, data['name'],
data['description'])
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to update volume.'),
redirect=redirect)
# only update bootable flag if modified
make_bootable = data['bootable']
if make_bootable != self.initial['bootable']:
try:
cinder.volume_set_bootable(request, volume_id, make_bootable)
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to set bootable flag on volume.'),
redirect=redirect)
message = _('Updating volume "%s"') % data['name']
messages.info(request, message)
return True
class UploadToImageForm(forms.SelfHandlingForm):
name = forms.CharField(label=_('Volume Name'),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
image_name = forms.CharField(max_length=255, label=_('Image Name'))
disk_format = forms.ChoiceField(label=_('Disk Format'),
widget=forms.ThemableSelectWidget(),
required=False)
force = forms.BooleanField(
label=pgettext_lazy("Force upload volume in in-use status to image",
u"Force"),
widget=forms.CheckboxInput(),
required=False)
def __init__(self, request, *args, **kwargs):
super(UploadToImageForm, self).__init__(request, *args, **kwargs)
# 'vhd','iso','aki','ari' and 'ami' disk formats are supported by
# glance, but not by qemu-img. qemu-img supports 'vpc', 'cloop', 'cow'
# and 'qcow' which are not supported by glance.
# I can only use 'raw', 'vmdk', 'vdi' or 'qcow2' so qemu-img will not
# have issues when processes image request from cinder.
disk_format_choices = [(value, name) for value, name
in IMAGE_FORMAT_CHOICES
if value in VALID_DISK_FORMATS]
self.fields['disk_format'].choices = disk_format_choices
self.fields['disk_format'].initial = 'raw'
if self.initial['status'] != 'in-use':
self.fields['force'].widget = forms.widgets.HiddenInput()
def handle(self, request, data):
volume_id = self.initial['id']
try:
# 'aki','ari','ami' container formats are supported by glance,
# but they need matching disk format to use.
# Glance usually uses 'bare' for other disk formats except
# amazon's. Please check the comment in CreateImageForm class
cinder.volume_upload_to_image(request,
volume_id,
data['force'],
data['image_name'],
DEFAULT_CONTAINER_FORMAT,
data['disk_format'])
message = _(
'Successfully sent the request to upload volume to image '
'for volume: "%s"') % data['name']
messages.info(request, message)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
error_message = _(
'Unable to upload volume to image for volume: "%s"') \
% data['name']
exceptions.handle(request, error_message, redirect=redirect)
class ExtendForm(forms.SelfHandlingForm):
name = forms.CharField(
label=_("Volume Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
orig_size = forms.IntegerField(
label=_("Current Size (GiB)"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
new_size = forms.IntegerField(label=_("New Size (GiB)"))
def clean(self):
cleaned_data = super(ExtendForm, self).clean()
new_size = cleaned_data.get('new_size')
orig_size = self.initial['orig_size']
if new_size <= orig_size:
error_msg = _("New size must be greater than current size.")
self._errors['new_size'] = self.error_class([error_msg])
return cleaned_data
usages = quotas.tenant_limit_usages(self.request)
availableGB = usages['maxTotalVolumeGigabytes'] - \
usages['gigabytesUsed']
if availableGB < (new_size - orig_size):
message = _('Volume cannot be extended to %(req)iGiB as '
'you only have %(avail)iGiB of your quota '
'available.')
params = {'req': new_size, 'avail': availableGB}
self._errors["new_size"] = self.error_class([message % params])
return cleaned_data
def handle(self, request, data):
volume_id = self.initial['id']
try:
volume = cinder.volume_extend(request,
volume_id,
data['new_size'])
message = _('Extending volume: "%s"') % data['name']
messages.info(request, message)
return volume
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to extend volume.'),
redirect=redirect)
class RetypeForm(forms.SelfHandlingForm):
name = forms.CharField(label=_('Volume Name'),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
volume_type = forms.ThemableChoiceField(label=_('Type'))
MIGRATION_POLICY_CHOICES = [('never', _('Never')),
('on-demand', _('On Demand'))]
migration_policy = forms.ChoiceField(label=_('Migration Policy'),
widget=forms.ThemableSelectWidget(),
choices=(MIGRATION_POLICY_CHOICES),
initial='never',
required=False)
def __init__(self, request, *args, **kwargs):
super(RetypeForm, self).__init__(request, *args, **kwargs)
try:
volume_types = cinder.volume_type_list(request)
except Exception:
redirect_url = reverse("horizon:admin:volumes:index")
error_message = _('Unable to retrieve the volume type list.')
exceptions.handle(request, error_message, redirect=redirect_url)
origin_type = self.initial['volume_type']
types_list = [(t.name, t.name)
for t in volume_types
if t.name != origin_type]
if types_list:
types_list.insert(0, ("", _("Select a new volume type")))
else:
types_list.insert(0, ("", _("No other volume types available")))
self.fields['volume_type'].choices = sorted(types_list)
def handle(self, request, data):
volume_id = self.initial['id']
try:
cinder.volume_retype(request,
volume_id,
data['volume_type'],
data['migration_policy'])
message = _(
'Successfully sent the request to change the volume '
'type to "%(vtype)s" for volume: "%(name)s"')
params = {'name': data['name'],
'vtype': data['volume_type']}
messages.info(request, message % params)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
error_message = _(
'Unable to change the volume type for volume: "%s"') \
% data['name']
exceptions.handle(request, error_message, redirect=redirect)
class UpdateStatus(forms.SelfHandlingForm):
status = forms.ChoiceField(label=_("Status"))
def __init__(self, request, *args, **kwargs):
super(forms.SelfHandlingForm, self).__init__(request, *args, **kwargs)
# This set of states was culled from cinder's admin_actions.py
self.fields['status'].choices = (
('attaching', _('Attaching')),
('available', _('Available')),
('creating', _('Creating')),
('deleting', _('Deleting')),
('detaching', _('Detaching')),
('error', _('Error')),
('error_deleting', _('Error Deleting')),
('in-use', _('In Use')),
)
def handle(self, request, data):
# Obtain the localized status for including in the message
for choice in self.fields['status'].choices:
if choice[0] == data['status']:
new_status = choice[1]
break
else:
new_status = data['status']
try:
cinder.volume_reset_state(request,
self.initial['volume_id'],
data['status'])
messages.success(request,
_('Successfully updated volume status to "%s".') %
new_status)
return True
except Exception:
exceptions.handle(request,
_('Unable to update volume status to "%s".') %
new_status)
return False
| |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmap.hoverlabel"
_path_str = "heatmap.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmap.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmap.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| |
import pytest
import numpy as np
import itertools
from scipy.special import laguerre
from numpy.random import rand
from numpy.testing import assert_, run_module_suite, assert_equal, \
assert_almost_equal, assert_allclose
import qutip
from qutip.states import coherent, fock, ket, bell_state
from qutip.wigner import wigner, wigner_transform, _parity
from qutip.random_objects import rand_dm, rand_ket
class TestHusimiQ:
@pytest.mark.parametrize('xs', ["", 1, None], ids=['str', 'int', 'none'])
def test_failure_if_non_arraylike_coordinates(self, xs):
state = qutip.rand_ket(4)
valid = np.linspace(-1, 1, 5)
with pytest.raises(TypeError) as e:
qutip.qfunc(state, xs, valid)
assert "must be array-like" in e.value.args[0]
with pytest.raises(TypeError) as e:
qutip.qfunc(state, valid, xs)
assert "must be array-like" in e.value.args[0]
with pytest.raises(TypeError) as e:
qutip.QFunc(xs, valid)
assert "must be array-like" in e.value.args[0]
with pytest.raises(TypeError) as e:
qutip.QFunc(valid, xs)
assert "must be array-like" in e.value.args[0]
@pytest.mark.parametrize('ndim', [2, 3])
def test_failure_if_coordinates_not_1d(self, ndim):
state = qutip.rand_ket(4)
valid = np.linspace(-1, 1, 5)
bad = valid.reshape((-1,) + (1,)*(ndim - 1))
with pytest.raises(ValueError) as e:
qutip.qfunc(state, bad, valid)
assert "must be 1D" in e.value.args[0]
with pytest.raises(ValueError) as e:
qutip.qfunc(state, valid, bad)
assert "must be 1D" in e.value.args[0]
with pytest.raises(ValueError) as e:
qutip.QFunc(bad, valid)
assert "must be 1D" in e.value.args[0]
with pytest.raises(ValueError) as e:
qutip.QFunc(valid, bad)
assert "must be 1D" in e.value.args[0]
@pytest.mark.parametrize('dm', [True, False], ids=['dm', 'ket'])
def test_failure_if_tensor_hilbert_space(self, dm):
if dm:
state = qutip.rand_dm(4, dims=[[2, 2], [2, 2]])
else:
state = qutip.rand_ket(4, dims=[[2, 2], [1, 1]])
xs = np.linspace(-1, 1, 5)
with pytest.raises(ValueError) as e:
qutip.qfunc(state, xs, xs)
assert "must not have tensor structure" in e.value.args[0]
with pytest.raises(ValueError) as e:
qutip.QFunc(xs, xs)(state)
assert "must not have tensor structure" in e.value.args[0]
def test_QFunc_raises_if_insufficient_memory(self):
xs = np.linspace(-1, 1, 11)
state = qutip.rand_ket(4)
qfunc = qutip.QFunc(xs, xs, memory=0)
with pytest.raises(MemoryError) as e:
qfunc(state)
assert e.value.args[0].startswith("Refusing to precompute")
def test_qfunc_warns_if_insufficient_memory(self):
xs = np.linspace(-1, 1, 11)
state = qutip.rand_dm(4)
with pytest.warns(UserWarning) as e:
qutip.qfunc(state, xs, xs, precompute_memory=0)
assert (
e[0].message.args[0]
.startswith("Falling back to iterative algorithm")
)
@pytest.mark.parametrize('obj', [
pytest.param(np.eye(2, dtype=np.complex128), id='ndarray'),
pytest.param([[1, 0], [0, 1]], id='list'),
pytest.param(1, id='int'),
])
def test_failure_if_not_a_Qobj(self, obj):
xs = np.linspace(-1, 1, 11)
with pytest.raises(TypeError) as e:
qutip.qfunc(obj, xs, xs)
assert e.value.args[0].startswith("state must be Qobj")
qfunc = qutip.QFunc(xs, xs)
with pytest.raises(TypeError) as e:
qfunc(obj)
assert e.value.args[0].startswith("state must be Qobj")
# Use indirection so that the tests can still be collected if there's a bug
# in the generating QuTiP functions.
@pytest.mark.parametrize('state', [
pytest.param(lambda: qutip.rand_super(2), id='super'),
pytest.param(lambda: qutip.rand_ket(2).dag(), id='bra'),
pytest.param(lambda: 1j*qutip.rand_dm(2), id='non-dm operator'),
pytest.param(lambda: qutip.Qobj([[1, 0], [0, 0]], dims=[[2], [2, 1]]),
id='nonsquare dm'),
pytest.param(lambda: qutip.operator_to_vector(qutip.qeye(2)),
id='operator-ket'),
pytest.param(lambda: qutip.operator_to_vector(qutip.qeye(2)).dag(),
id='operator-bra'),
])
def test_failure_if_not_a_state(self, state):
xs = np.linspace(-1, 1, 11)
state = state()
with pytest.raises(ValueError) as e:
qutip.qfunc(state, xs, xs)
assert (
e.value.args[0].startswith("state must be a ket or density matrix")
)
qfunc = qutip.QFunc(xs, xs)
with pytest.raises(ValueError) as e:
qfunc(state)
assert (
e.value.args[0].startswith("state must be a ket or density matrix")
)
@pytest.mark.parametrize('g', [
pytest.param(np.sqrt(2), id='natural units'),
pytest.param(1, id='arb units'),
])
@pytest.mark.parametrize('n_ys', [5, 101])
@pytest.mark.parametrize('n_xs', [5, 101])
@pytest.mark.parametrize('dm', [True, False], ids=['dm', 'ket'])
@pytest.mark.parametrize('size', [5, 32])
def test_function_and_class_are_equivalent(self, size, dm, n_xs, n_ys, g):
xs = np.linspace(-1, 1, n_xs)
ys = np.linspace(0, 2, n_ys)
state = qutip.rand_dm(size) if dm else qutip.rand_ket(size)
function = qutip.qfunc(state, xs, ys, g)
class_ = qutip.QFunc(xs, ys, g)(state)
np.testing.assert_allclose(function, class_)
@pytest.mark.parametrize('g', [
pytest.param(np.sqrt(2), id='natural units'),
pytest.param(1, id='arb units'),
])
@pytest.mark.parametrize('n_ys', [5, 101])
@pytest.mark.parametrize('n_xs', [5, 101])
@pytest.mark.parametrize('size', [5, 32])
def test_iterate_and_precompute_are_equivalent(self, size, n_xs, n_ys, g):
xs = np.linspace(-1, 1, n_xs)
ys = np.linspace(0, 2, n_ys)
state = qutip.rand_dm(size)
iterate = qutip.qfunc(state, xs, ys, g, precompute_memory=None)
precompute = qutip.qfunc(state, xs, ys, g, precompute_memory=np.inf)
np.testing.assert_allclose(iterate, precompute)
@pytest.mark.parametrize('initial_size', [5, 8])
@pytest.mark.parametrize('dm', [True, False], ids=['dm', 'ket'])
def test_same_class_can_take_many_sizes(self, dm, initial_size):
xs = np.linspace(-1, 1, 11)
ys = np.linspace(0, 2, 11)
shape = np.meshgrid(xs, ys)[0].shape
sizes = initial_size + np.array([0, 1, -1, 4])
qfunc = qutip.QFunc(xs, ys)
for size in sizes:
state = qutip.rand_dm(size) if dm else qutip.rand_ket(size)
out = qfunc(state)
assert isinstance(out, np.ndarray)
assert out.shape == shape
@pytest.mark.parametrize('dm_first', [True, False])
def test_same_class_can_mix_ket_and_dm(self, dm_first):
dms = [True, False, True, False]
if not dm_first:
dms = dms[::-1]
xs = np.linspace(-1, 1, 11)
ys = np.linspace(0, 2, 11)
shape = np.meshgrid(xs, ys)[0].shape
qfunc = qutip.QFunc(xs, ys)
for dm in dms:
state = qutip.rand_dm(4) if dm else qutip.rand_ket(4)
out = qfunc(state)
assert isinstance(out, np.ndarray)
assert out.shape == shape
@pytest.mark.parametrize('n_ys', [5, 101])
@pytest.mark.parametrize('n_xs', [5, 101])
@pytest.mark.parametrize('mix', [0.1, 0.5])
def test_qfunc_is_linear(self, n_xs, n_ys, mix):
xs = np.linspace(-1, 1, n_xs)
ys = np.linspace(-1, 1, n_ys)
qfunc = qutip.QFunc(xs, ys)
left, right = qutip.rand_dm(5), qutip.rand_dm(5)
qleft, qright = qfunc(left), qfunc(right)
qboth = qfunc(mix*left + (1-mix)*right)
np.testing.assert_allclose(mix*qleft + (1-mix)*qright, qboth)
@pytest.mark.parametrize('n_ys', [5, 101])
@pytest.mark.parametrize('n_xs', [5, 101])
@pytest.mark.parametrize('size', [5, 32])
def test_ket_and_dm_give_same_result(self, n_xs, n_ys, size):
xs = np.linspace(-1, 1, n_xs)
ys = np.linspace(-1, 1, n_ys)
state = qutip.rand_ket(size)
qfunc = qutip.QFunc(xs, ys)
np.testing.assert_allclose(qfunc(state), qfunc(state.proj()))
@pytest.mark.parametrize('g', [
pytest.param(np.sqrt(2), id='natural units'),
pytest.param(1, id='arb units'),
])
@pytest.mark.parametrize('ys', [
pytest.param(np.linspace(-1, 1, 5), id='(-1,1,5)'),
pytest.param(np.linspace(0, 2, 3), id='(0,2,3)'),
])
@pytest.mark.parametrize('xs', [
pytest.param(np.linspace(-1, 1, 5), id='(-1,1,5)'),
pytest.param(np.linspace(0, 2, 3), id='(0,2,3)'),
])
@pytest.mark.parametrize('size', [3, 5])
def test_against_naive_implementation(self, xs, ys, g, size):
state = qutip.rand_dm(size)
state_np = state.full()
x, y = np.meshgrid(xs, ys)
alphas = 0.5*g * (x + 1j*y)
naive = np.empty(alphas.shape, dtype=np.float64)
for i, alpha in enumerate(alphas.flat):
coh = qutip.coherent(size, alpha, method='analytic').full()
naive.flat[i] = (coh.conj().T @ state_np @ coh).real
naive *= (0.5*g)**2 / np.pi
np.testing.assert_allclose(naive, qutip.qfunc(state, xs, ys, g))
np.testing.assert_allclose(naive, qutip.QFunc(xs, ys, g)(state))
def test_wigner_bell1_su2parity():
"""wigner: testing the SU2 parity of the first Bell state.
"""
psi = bell_state('00')
steps = 25
theta = np.tile(np.linspace(0, np.pi, steps), 2).reshape(2, steps)
phi = np.tile(np.linspace(0, 2 * np.pi, steps), 2).reshape(2, steps)
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[t, p] = np.real(((1 + np.sqrt(3)
* np.cos(theta[0, t]))
* (1 + np.sqrt(3)
* np.cos(theta[1, t]))
+ 3 * (np.sin(theta[0, t])
* np.exp(-1j * phi[0, p])
* np.sin(theta[1, t])
* np.exp(-1j * phi[1, p])
+ np.sin(theta[0, t])
* np.exp(1j * phi[0, p])
* np.sin(theta[1, t])
* np.exp(1j * phi[1, p]))
+ (1 - np.sqrt(3)
* np.cos(theta[0, t]))
* (1 - np.sqrt(3)
* np.cos(theta[1, t]))) / 8.)
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11)
@pytest.mark.slow
def test_wigner_bell4_su2parity():
"""wigner: testing the SU2 parity of the fourth Bell state.
"""
psi = bell_state('11')
steps = 25
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[t, p] = -0.5
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11)
@pytest.mark.slow
def test_wigner_bell4_fullparity():
"""wigner: testing the parity of the fourth Bell state using the parity of
the full space.
"""
psi = bell_state('11')
steps = 25
slicearray = ['l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[t, p] = -0.30901699
print("wigner anal: ", wigner_analyt)
wigner_theo = wigner_transform(psi, 0.5, True, steps, slicearray)
print("wigner theo: ", wigner_theo)
assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-4)
def test_parity():
"""wigner: testing the parity function.
"""
j = 0.5
assert_(_parity(2, j)[0, 0] - (1 - np.sqrt(3)) / 2. < 1e-11)
assert_(_parity(2, j)[0, 1] < 1e-11)
assert_(_parity(2, j)[1, 1] - (1 + np.sqrt(3)) / 2. < 1e-11)
assert_(_parity(2, j)[1, 0] < 1e-11)
@pytest.mark.slow
def test_wigner_pure_su2():
"""wigner: testing the SU2 wigner transformation of a pure state.
"""
psi = (ket([1]))
steps = 25
theta = np.linspace(0, np.pi, steps)
phi = np.linspace(0, 2 * np.pi, steps)
theta = theta[None, :]
phi = phi[None, :]
slicearray = ['l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[t, p] = (1 + np.sqrt(3) * np.cos(theta[0, t])) / 2.
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11)
@pytest.mark.slow
def test_wigner_ghz_su2parity():
"""wigner: testing the SU2 wigner transformation of the GHZ state.
"""
psi = (ket([0, 0, 0]) + ket([1, 1, 1])) / np.sqrt(2)
steps = 25
N = 3
theta = np.tile(np.linspace(0, np.pi, steps), N).reshape(N, steps)
phi = np.tile(np.linspace(0, 2 * np.pi, steps), N).reshape(N, steps)
slicearray = ['l', 'l', 'l']
wigner_analyt = np.zeros((steps, steps))
for t in range(steps):
for p in range(steps):
wigner_analyt[t, p] = np.real(((1 + np.sqrt(3)*np.cos(theta[0, t]))
* (1 + np.sqrt(3)
* np.cos(theta[1, t]))
* (1 + np.sqrt(3)
* np.cos(theta[2, t]))
+ 3**(3 / 2) * (np.sin(theta[0, t])
* np.exp(-1j * phi[0, p])
* np.sin(theta[1, t])
* np.exp(-1j * phi[1, p])
* np.sin(theta[2, t])
* np.exp(-1j * phi[2, p])
+ np.sin(theta[0, t])
* np.exp(1j * phi[0, p])
* np.sin(theta[1, t])
* np.exp(1j * phi[1, p])
* np.sin(theta[2, t])
* np.exp(1j * phi[2, p]))
+ (1 - np.sqrt(3)
* np.cos(theta[0, t]))
* (1 - np.sqrt(3)
* np.cos(theta[1, t]))
* (1 - np.sqrt(3)
* np.cos(theta[2, t]))) / 16.)
wigner_theo = wigner_transform(psi, 0.5, False, steps, slicearray)
assert_(np.sum(np.abs(wigner_analyt - wigner_theo)) < 1e-11)
@pytest.mark.slow
def test_angle_slicing():
"""wigner: tests angle slicing.
"""
psi1 = bell_state('00')
psi2 = bell_state('01')
psi3 = bell_state('10')
psi4 = bell_state('11')
steps = 25
j = 0.5
wigner1 = wigner_transform(psi1, j, False, steps, ['l', 'l'])
wigner2 = wigner_transform(psi2, j, False, steps, ['l', 'z'])
wigner3 = wigner_transform(psi3, j, False, steps, ['l', 'x'])
wigner4 = wigner_transform(psi4, j, False, steps, ['l', 'y'])
assert_(np.sum(np.abs(wigner2 - wigner1)) < 1e-11)
assert_(np.sum(np.abs(wigner3 - wigner2)) < 1e-11)
assert_(np.sum(np.abs(wigner4 - wigner3)) < 1e-11)
assert_(np.sum(np.abs(wigner4 - wigner1)) < 1e-11)
def test_wigner_coherent():
"wigner: test wigner function calculation for coherent states"
xvec = np.linspace(-5.0, 5.0, 100)
yvec = xvec
X, Y = np.meshgrid(xvec, yvec)
a = X + 1j * Y # consistent with g=2 option to wigner function
dx = xvec[1] - xvec[0]
dy = yvec[1] - yvec[0]
N = 20
beta = rand() + rand() * 1.0j
psi = coherent(N, beta)
# calculate the wigner function using qutip and analytic formula
W_qutip = wigner(psi, xvec, yvec, g=2)
W_qutip_cl = wigner(psi, xvec, yvec, g=2, method='clenshaw')
W_analytic = 2 / np.pi * np.exp(-2 * abs(a - beta) ** 2)
# check difference
assert_(np.sum(abs(W_qutip - W_analytic) ** 2) < 1e-4)
assert_(np.sum(abs(W_qutip_cl - W_analytic) ** 2) < 1e-4)
# check normalization
assert_(np.sum(W_qutip) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_qutip_cl) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_analytic) * dx * dy - 1.0 < 1e-8)
def test_wigner_fock():
"wigner: test wigner function calculation for Fock states"
xvec = np.linspace(-5.0, 5.0, 100)
yvec = xvec
X, Y = np.meshgrid(xvec, yvec)
a = X + 1j * Y # consistent with g=2 option to wigner function
dx = xvec[1] - xvec[0]
dy = yvec[1] - yvec[0]
N = 15
for n in [2, 3, 4, 5, 6]:
psi = fock(N, n)
# calculate the wigner function using qutip and analytic formula
W_qutip = wigner(psi, xvec, yvec, g=2)
W_qutip_cl = wigner(psi, xvec, yvec, g=2, method='clenshaw')
W_qutip_sparse = wigner(psi, xvec, yvec, g=2, sparse=True, method='clenshaw')
W_analytic = 2 / np.pi * (-1) ** n * \
np.exp(-2 * abs(a) ** 2) * np.polyval(laguerre(n), 4 * abs(a) ** 2)
# check difference
assert_(np.sum(abs(W_qutip - W_analytic)) < 1e-4)
assert_(np.sum(abs(W_qutip_cl - W_analytic)) < 1e-4)
assert_(np.sum(abs(W_qutip_sparse - W_analytic)) < 1e-4)
# check normalization
assert_(np.sum(W_qutip) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_qutip_cl) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_qutip_sparse) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_analytic) * dx * dy - 1.0 < 1e-8)
def test_wigner_compare_methods_dm():
"wigner: compare wigner methods for random density matrices"
xvec = np.linspace(-5.0, 5.0, 100)
yvec = xvec
X, Y = np.meshgrid(xvec, yvec)
# a = X + 1j * Y # consistent with g=2 option to wigner function
dx = xvec[1] - xvec[0]
dy = yvec[1] - yvec[0]
N = 15
for n in range(10):
# try ten different random density matrices
rho = rand_dm(N, 0.5 + rand() / 2)
# calculate the wigner function using qutip and analytic formula
W_qutip1 = wigner(rho, xvec, yvec, g=2)
W_qutip2 = wigner(rho, xvec, yvec, g=2, method='laguerre')
# check difference
assert_(np.sum(abs(W_qutip1 - W_qutip1)) < 1e-4)
# check normalization
assert_(np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8)
def test_wigner_compare_methods_ket():
"wigner: compare wigner methods for random state vectors"
xvec = np.linspace(-5.0, 5.0, 100)
yvec = xvec
X, Y = np.meshgrid(xvec, yvec)
# a = X + 1j * Y # consistent with g=2 option to wigner function
dx = xvec[1] - xvec[0]
dy = yvec[1] - yvec[0]
N = 15
for n in range(10):
# try ten different random density matrices
psi = rand_ket(N, 0.5 + rand() / 2)
# calculate the wigner function using qutip and analytic formula
W_qutip1 = wigner(psi, xvec, yvec, g=2)
W_qutip2 = wigner(psi, xvec, yvec, g=2, sparse=True)
# check difference
assert_(np.sum(abs(W_qutip1 - W_qutip2)) < 1e-4)
# check normalization
assert_(np.sum(W_qutip1) * dx * dy - 1.0 < 1e-8)
assert_(np.sum(W_qutip2) * dx * dy - 1.0 < 1e-8)
def test_wigner_fft_comparse_ket():
"Wigner: Compare Wigner fft and iterative for rand. ket"
N = 20
xvec = np.linspace(-10, 10, 128)
for i in range(3):
rho = rand_ket(N)
Wfft, yvec = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs(W - Wfft)
assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
def test_wigner_fft_comparse_dm():
"Wigner: Compare Wigner fft and iterative for rand. dm"
N = 20
xvec = np.linspace(-10, 10, 128)
for i in range(3):
rho = rand_dm(N)
Wfft, yvec = wigner(rho, xvec, xvec, method='fft')
W = wigner(rho, xvec, yvec, method='iterative')
Wdiff = abs(W - Wfft)
assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
def test_wigner_clenshaw_iter_dm():
"Wigner: Compare Wigner clenshaw and iterative for rand. dm"
N = 20
xvec = np.linspace(-10, 10, 128)
for i in range(3):
rho = rand_dm(N)
Wclen = wigner(rho, xvec, xvec, method='clenshaw')
W = wigner(rho, xvec, xvec, method='iterative')
Wdiff = abs(W - Wclen)
assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
def test_wigner_clenshaw_sp_iter_dm():
"Wigner: Compare Wigner sparse clenshaw and iterative for rand. dm"
N = 20
xvec = np.linspace(-10, 10, 128)
for i in range(3):
rho = rand_dm(N)
Wclen = wigner(rho, xvec, xvec, method='clenshaw', sparse=True)
W = wigner(rho, xvec, xvec, method='iterative')
Wdiff = abs(W - Wclen)
assert_equal(np.sum(abs(Wdiff)) < 1e-7, True)
@pytest.mark.parametrize(['spin'], [
pytest.param(1/2, id="spin-one-half"),
pytest.param(3, id="spin-three"),
pytest.param(13/2, id="spin-thirteen-half"),
pytest.param(7, id="spin-seven")
])
@pytest.mark.parametrize("pure", [
pytest.param(True, id="pure"),
pytest.param(False, id="mixed")
])
def test_spin_q_function(spin, pure):
d = int(2*spin + 1)
rho = rand_dm(d, pure=pure)
# Points at which to evaluate the spin Q function
theta = np.linspace(0, np.pi, 16, endpoint=True)
phi = np.linspace(-np.pi, np.pi, 32, endpoint=True)
Q, _, _ = qutip.spin_q_function(rho, theta, phi)
for k, (phi_prime, theta_prime) in enumerate(itertools.product(phi, theta)):
state = qutip.spin_coherent(spin, theta_prime, phi_prime)
direct_Q = (state.dag() * rho * state).norm()
assert_almost_equal(Q.flat[k], direct_Q, decimal=9)
@pytest.mark.parametrize(['spin'], [
pytest.param(1/2, id="spin-one-half"),
pytest.param(3, id="spin-three"),
pytest.param(13/2, id="spin-thirteen-half"),
pytest.param(7, id="spin-seven")
])
@pytest.mark.parametrize("pure", [
pytest.param(True, id="pure"),
pytest.param(False, id="mixed")
])
def test_spin_q_function_normalized(spin, pure):
d = int(2 * spin + 1)
rho = rand_dm(d, pure=pure)
# Points at which to evaluate the spin Q function
theta = np.linspace(0, np.pi, 128, endpoint=True)
phi = np.linspace(-np.pi, np.pi, 256, endpoint=True)
Q, THETA, _ = qutip.spin_q_function(rho, theta, phi)
norm = d / (4 * np.pi) * np.trapz(np.trapz(Q * np.sin(THETA), theta), phi)
assert_almost_equal(norm, 1, decimal=4)
@pytest.mark.parametrize(["spin"], [
pytest.param(1/2, id="spin-one-half"),
pytest.param(1, id="spin-one"),
pytest.param(3/2, id="spin-three-half"),
pytest.param(2, id="spin-two")
])
@pytest.mark.parametrize("pure", [
pytest.param(True, id="pure"),
pytest.param(False, id="mixed")
])
def test_spin_wigner_normalized(spin, pure):
d = int(2*spin + 1)
rho = rand_dm(d, pure=pure)
# Points at which to evaluate the spin Wigner function
theta = np.linspace(0, np.pi, 256, endpoint=True)
phi = np.linspace(-np.pi, np.pi, 512, endpoint=True)
W, THETA, PHI = qutip.spin_wigner(rho, theta, phi)
norm = np.trapz(np.trapz(W * np.sin(THETA) * np.sqrt(d / (4*np.pi)), theta), phi)
assert_almost_equal(norm, 1, decimal=4)
@pytest.mark.parametrize(['spin'], [
pytest.param(1 / 2, id="spin-one-half"),
pytest.param(1, id="spin-one"),
pytest.param(3 / 2, id="spin-three-half"),
pytest.param(2, id="spin-two")
])
@pytest.mark.parametrize("pure", [
pytest.param(True, id="pure"),
pytest.param(False, id="mixed")
])
def test_spin_wigner_overlap(spin, pure, n=5):
d = int(2*spin + 1)
rho = rand_dm(d, pure=pure)
# Points at which to evaluate the spin Wigner function
theta = np.linspace(0, np.pi, 256, endpoint=True)
phi = np.linspace(-np.pi, np.pi, 512, endpoint=True)
W, THETA, _ = qutip.spin_wigner(rho, theta, phi)
for k in range(n):
test_state = rand_dm(d)
state_overlap = (test_state*rho).tr().real
W_state, _, _ = qutip.spin_wigner(test_state, theta, phi)
W_overlap = np.trapz(
np.trapz(W_state * W * np.sin(THETA), theta), phi).real
assert_almost_equal(W_overlap, state_overlap, decimal=4)
if __name__ == "__main__":
run_module_suite()
| |
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova.pci import utils
from nova.pci import whitelist
LOG = logging.getLogger(__name__)
class PciDeviceStats(object):
"""PCI devices summary information.
According to the PCI SR-IOV spec, a PCI physical function can have up to
256 PCI virtual functions, thus the number of assignable PCI functions in
a cloud can be big. The scheduler needs to know all device availability
information in order to determine which compute hosts can support a PCI
request. Passing individual virtual device information to the scheduler
does not scale, so we provide summary information.
Usually the virtual functions provided by a host PCI device have the same
value for most properties, like vendor_id, product_id and class type.
The PCI stats class summarizes this information for the scheduler.
The pci stats information is maintained exclusively by compute node
resource tracker and updated to database. The scheduler fetches the
information and selects the compute node accordingly. If a compute
node is selected, the resource tracker allocates the devices to the
instance and updates the pci stats information.
This summary information will be helpful for cloud management also.
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type']
def __init__(self, stats=None):
super(PciDeviceStats, self).__init__()
# NOTE(sbauza): Stats are a PCIDevicePoolList object
self.pools = [pci_pool.to_dict()
for pci_pool in stats] if stats else []
self.pools.sort(key=lambda item: len(item))
def _equal_properties(self, dev, entry, matching_keys):
return all(dev.get(prop) == entry.get(prop)
for prop in matching_keys)
def _find_pool(self, dev_pool):
"""Return the first pool that matches dev."""
for pool in self.pools:
pool_keys = pool.copy()
del pool_keys['count']
del pool_keys['devices']
if (len(pool_keys.keys()) == len(dev_pool.keys()) and
self._equal_properties(dev_pool, pool_keys, dev_pool.keys())):
return pool
def _create_pool_keys_from_dev(self, dev):
"""create a stats pool dict that this dev is supposed to be part of
Note that this pool dict contains the stats pool's keys and their
values. 'count' and 'devices' are not included.
"""
# Don't add a device that doesn't have a matching device spec.
# This can happen during initial sync up with the controller
devspec = whitelist.get_pci_device_devspec(dev)
if not devspec:
return
tags = devspec.get_tags()
pool = {k: getattr(dev, k) for k in self.pool_keys}
if tags:
pool.update(tags)
return pool
def add_device(self, dev):
"""Add a device to its matching pool."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
dev_pool['count'] = 0
dev_pool['devices'] = []
self.pools.append(dev_pool)
self.pools.sort(key=lambda item: len(item))
pool = dev_pool
pool['count'] += 1
pool['devices'].append(dev)
@staticmethod
def _decrease_pool_count(pool_list, pool, count=1):
"""Decrement pool's size by count.
If pool becomes empty, remove pool from pool_list.
"""
if pool['count'] > count:
pool['count'] -= count
count = 0
else:
count -= pool['count']
pool_list.remove(pool)
return count
def remove_device(self, dev):
"""Remove one device from the first pool that it matches."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
raise exception.PciDevicePoolEmpty(
compute_node_id=dev.compute_node_id, address=dev.address)
pool['devices'].remove(dev)
self._decrease_pool_count(self.pools, pool)
def get_free_devs(self):
free_devs = []
for pool in self.pools:
free_devs.extend(pool['devices'])
return free_devs
def consume_requests(self, pci_requests, numa_cells=None):
alloc_devices = []
for request in pci_requests:
count = request.count
spec = request.spec
# For now, keep the same algorithm as during scheduling:
# a spec may be able to match multiple pools.
pools = self._filter_pools_for_spec(self.pools, spec)
if numa_cells:
pools = self._filter_pools_for_numa_cells(pools, numa_cells)
pools = self._filter_non_requested_pfs(request, pools)
# Failed to allocate the required number of devices
# Return the devices already allocated back to their pools
if sum([pool['count'] for pool in pools]) < count:
LOG.error(_LE("Failed to allocate PCI devices for instance."
" Unassigning devices back to pools."
" This should not happen, since the scheduler"
" should have accurate information, and allocation"
" during claims is controlled via a hold"
" on the compute node semaphore"))
for d in range(len(alloc_devices)):
self.add_device(alloc_devices.pop())
return None
for pool in pools:
if pool['count'] >= count:
num_alloc = count
else:
num_alloc = pool['count']
count -= num_alloc
pool['count'] -= num_alloc
for d in range(num_alloc):
pci_dev = pool['devices'].pop()
self._handle_device_dependents(pci_dev)
pci_dev.request_id = request.request_id
alloc_devices.append(pci_dev)
if count == 0:
break
return alloc_devices
def _handle_device_dependents(self, pci_dev):
"""Remove device dependents or a parent from pools.
In case the device is a PF, all of it's dependent VFs should
be removed from pools count, if these are present.
When the device is a VF, it's parent PF pool count should be
decreased, unless it is no longer in a pool.
"""
if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF:
vfs_list = objects.PciDeviceList.get_by_parent_address(
pci_dev._context,
pci_dev.compute_node_id,
pci_dev.address)
if vfs_list:
for vf in vfs_list:
self.remove_device(vf)
elif pci_dev.dev_type == fields.PciDeviceType.SRIOV_VF:
try:
parent = pci_dev.get_by_dev_addr(pci_dev._context,
pci_dev.compute_node_id,
pci_dev.parent_addr)
# Make sure not to decrease PF pool count if this parent has
# been already removed from pools
if parent in self.get_free_devs():
self.remove_device(parent)
except exception.PciDeviceNotFound:
return
@staticmethod
def _filter_pools_for_spec(pools, request_specs):
return [pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)]
@staticmethod
def _filter_pools_for_numa_cells(pools, numa_cells):
# Some systems don't report numa node info for pci devices, in
# that case None is reported in pci_device.numa_node, by adding None
# to numa_cells we allow assigning those devices to instances with
# numa topology
numa_cells = [None] + [cell.id for cell in numa_cells]
# filter out pools which numa_node is not included in numa_cells
return [pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}])
for cell in numa_cells)]
def _filter_non_requested_pfs(self, request, matching_pools):
# Remove SRIOV_PFs from pools, unless it has been explicitly requested
# This is especially needed in cases where PFs and VFs has the same
# product_id.
if all(spec.get('dev_type') != fields.PciDeviceType.SRIOV_PF for
spec in request.spec):
matching_pools = self._filter_pools_for_pfs(matching_pools)
return matching_pools
@staticmethod
def _filter_pools_for_pfs(pools):
return [pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.SRIOV_PF]
def _apply_request(self, pools, request, numa_cells=None):
# NOTE(vladikr): This code maybe open to race conditions.
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
count = request.count
matching_pools = self._filter_pools_for_spec(pools, request.spec)
if numa_cells:
matching_pools = self._filter_pools_for_numa_cells(matching_pools,
numa_cells)
matching_pools = self._filter_non_requested_pfs(request,
matching_pools)
if sum([pool['count'] for pool in matching_pools]) < count:
return False
else:
for pool in matching_pools:
count = self._decrease_pool_count(pools, pool, count)
if not count:
break
return True
def support_requests(self, requests, numa_cells=None):
"""Check if the pci requests can be met.
Scheduler checks compute node's PCI stats to decide if an
instance can be scheduled into the node. Support does not
mean real allocation.
If numa_cells is provided then only devices contained in
those nodes are considered.
"""
# note (yjiang5): this function has high possibility to fail,
# so no exception should be triggered for performance reason.
pools = copy.deepcopy(self.pools)
return all([self._apply_request(pools, r, numa_cells)
for r in requests])
def apply_requests(self, requests, numa_cells=None):
"""Apply PCI requests to the PCI stats.
This is used in multiple instance creation, when the scheduler has to
maintain how the resources are consumed by the instances.
If numa_cells is provided then only devices contained in
those nodes are considered.
"""
if not all([self._apply_request(self.pools, r, numa_cells)
for r in requests]):
raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self):
# 'devices' shouldn't be part of stats
pools = []
for pool in self.pools:
tmp = {k: v for k, v in six.iteritems(pool) if k != 'devices'}
pools.append(tmp)
return iter(pools)
def clear(self):
"""Clear all the stats maintained."""
self.pools = []
def __eq__(self, other):
return cmp(self.pools, other.pools) == 0
def __ne__(self, other):
return not (self == other)
def to_device_pools_obj(self):
"""Return the contents of the pools as a PciDevicePoolList object."""
stats = [x for x in self]
return pci_device_pool.from_pci_stats(stats)
| |
#########################################################
#
# DO NOT EDIT THIS FILE. IT IS GENERATED AUTOMATICALLY. #
# PLEASE LOOK INTO THE README FOR MORE INFORMATION. #
#
#########################################################
# coding: utf-8
# # Loading Pre-Trained Models
#
# ## Description
#
# In this tutorial, we will use the pre-trained `squeezenet` model from the [ModelZoo](https://github.com/caffe2/caffe2/wiki/Model-Zoo) to classify our own images. As input, we will provide the path (or URL) to an image we want to classify. It will also be helpful to know the [ImageNet object code](https://gist.githubusercontent.com/aaronmarkham/cd3a6b6ac071eca6f7b4a6e40e6038aa/raw/9edb4038a37da6b5a44c3b5bc52e448ff09bfe5b/alexnet_codes) for the image so we can verify our results. The 'object code' is nothing more than the integer label for the class used during training, for example "985" is the code for the class "daisy". Note, although we are using squeezenet here, this tutorial serves as a somewhat universal method for running inference on pretrained models.
#
# If you came from the [Image Pre-Processing Tutorial](https://caffe2.ai/docs/tutorial-image-pre-processing.html), you will see that we are using rescale and crop functions to prep the image, as well as reformatting the image to be CHW, BGR, and finally NCHW. We also correct for the image mean by either using the calculated mean from a provided npy file, or statically removing 128 as a placeholder average.
#
# Hopefully, you will find that loading pre-trained models is simple and syntactically concise. From a high level, these are the three required steps for running inference on a pretrained model:
#
# 1. Read the init and predict protobuf (.pb) files of the pretrained model
#
# with open("init_net.pb") as f:
# init_net = f.read()
# with open("predict_net.pb") as f:
# predict_net = f.read()
#
# 2. Initialize a Predictor in your workspace with the blobs from the protobufs
#
# p = workspace.Predictor(init_net, predict_net)
#
# 3. Run the net on some data and get the (softmax) results!
#
# results = p.run({'data': img})
#
# Note, assuming the last layer of the network is a softmax layer, the results come back as a multidimensional array of probabilities with length equal to the number of classes that the model was trained on. The probabilities may be indexed by the object code (integer type), so if you know the object code you can index the results array at that index to view the network's confidence that the input image is of that class.
#
# **Model Download Options**
#
# Although we will use `squeezenet` here, you can check out the [Model Zoo for pre-trained models](https://github.com/caffe2/caffe2/wiki/Model-Zoo) to browse/download a variety of pretrained models, or you can use Caffe2's `caffe2.python.models.download` module to easily acquire pre-trained models from [Github caffe2/models](http://github.com/caffe2/models).
#
# For our purposes, we will use the `models.download` module to download `squeezenet` into the `/caffe2/python/models` folder of our local Caffe2 installation with the following command:
#
# ```
# python -m caffe2.python.models.download -i squeezenet
# ```
#
# If the above download worked then you should have a directory named squeezenet in your `/caffe2/python/models` folder that contains `init_net.pb` and `predict_net.pb`. Note, if you do not use the `-i` flag, the model will be downloaded to your CWD, however it will still be a directory named squeezenet containing two protobuf files. Alternatively, if you wish to download all of the models, you can clone the entire repo using:
#
# ```
# git clone https://github.com/caffe2/models
# ```
#
# ## Code
#
# Before we start, lets take care of the required imports.
# In[1]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
import numpy as np
import skimage.io
import skimage.transform
from matplotlib import pyplot
import os
from caffe2.python import core, workspace, models
import urllib2
import operator
print("Required modules imported.")
# ### Inputs
#
# Here, we will specify the inputs to be used for this run, including the input image, the model location, the mean file (optional), the required size of the image, and the location of the label mapping file.
# In[2]:
# Configuration --- Change to your setup and preferences!
# This directory should contain the models downloaded from the model zoo. To run this
# tutorial, make sure there is a 'squeezenet' directory at this location that
# contains both the 'init_net.pb' and 'predict_net.pb'
CAFFE_MODELS = "~/caffe2/caffe2/python/models"
# Some sample images you can try, or use any URL to a regular image.
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/Whole-Lemon.jpg/1235px-Whole-Lemon.jpg"
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/7/7b/Orange-Whole-%26-Split.jpg"
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/a/ac/Pretzel.jpg"
# IMAGE_LOCATION = "https://cdn.pixabay.com/photo/2015/02/10/21/28/flower-631765_1280.jpg"
IMAGE_LOCATION = "images/flower.jpg"
# What model are we using?
# Format below is the model's: <folder, INIT_NET, predict_net, mean, input image size>
# You can switch 'squeezenet' out with 'bvlc_alexnet', 'bvlc_googlenet' or others that you have downloaded
MODEL = 'squeezenet', 'init_net.pb', 'predict_net.pb', 'ilsvrc_2012_mean.npy', 227
# codes - these help decypher the output and source from a list from ImageNet's object codes
# to provide an result like "tabby cat" or "lemon" depending on what's in the picture
# you submit to the CNN.
codes = "https://gist.githubusercontent.com/aaronmarkham/cd3a6b6ac071eca6f7b4a6e40e6038aa/raw/9edb4038a37da6b5a44c3b5bc52e448ff09bfe5b/alexnet_codes"
print("Config set!")
# ### Setup paths
#
# With the configs set, we can now load the mean file (if it exists), as well as the predict net and the init net.
# In[3]:
# set paths and variables from model choice and prep image
CAFFE_MODELS = os.path.expanduser(CAFFE_MODELS)
# mean can be 128 or custom based on the model
# gives better results to remove the colors found in all of the training images
MEAN_FILE = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[3])
if not os.path.exists(MEAN_FILE):
print("No mean file found!")
mean = 128
else:
print ("Mean file found!")
mean = np.load(MEAN_FILE).mean(1).mean(1)
mean = mean[:, np.newaxis, np.newaxis]
print("mean was set to: ", mean)
# some models were trained with different image sizes, this helps you calibrate your image
INPUT_IMAGE_SIZE = MODEL[4]
# make sure all of the files are around...
INIT_NET = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[1])
PREDICT_NET = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[2])
# Check to see if the files exist
if not os.path.exists(INIT_NET):
print("WARNING: " + INIT_NET + " not found!")
else:
if not os.path.exists(PREDICT_NET):
print("WARNING: " + PREDICT_NET + " not found!")
else:
print("All needed files found!")
# ### Image Preprocessing
#
# Now that we have our inputs specified and verified the existance of the input network, we can load the image and pre-processing the image for ingestion into a Caffe2 convolutional neural network! This is a very important step as the trained CNN requires a specifically sized input image whose values are from a particular distribution.
# In[4]:
# Function to crop the center cropX x cropY pixels from the input image
def crop_center(img,cropx,cropy):
y,x,c = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
# Function to rescale the input image to the desired height and/or width. This function will preserve
# the aspect ratio of the original image while making the image the correct scale so we can retrieve
# a good center crop. This function is best used with center crop to resize any size input images into
# specific sized images that our model can use.
def rescale(img, input_height, input_width):
# Get original aspect ratio
aspect = img.shape[1]/float(img.shape[0])
if(aspect>1):
# landscape orientation - wide image
res = int(aspect * input_height)
imgScaled = skimage.transform.resize(img, (input_width, res))
if(aspect<1):
# portrait orientation - tall image
res = int(input_width/aspect)
imgScaled = skimage.transform.resize(img, (res, input_height))
if(aspect == 1):
imgScaled = skimage.transform.resize(img, (input_width, input_height))
return imgScaled
# Load the image as a 32-bit float
# Note: skimage.io.imread returns a HWC ordered RGB image of some size
img = skimage.img_as_float(skimage.io.imread(IMAGE_LOCATION)).astype(np.float32)
print("Original Image Shape: " , img.shape)
# Rescale the image to comply with our desired input size. This will not make the image 227x227
# but it will make either the height or width 227 so we can get the ideal center crop.
img = rescale(img, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE)
print("Image Shape after rescaling: " , img.shape)
pyplot.figure()
pyplot.imshow(img)
pyplot.title('Rescaled image')
# Crop the center 227x227 pixels of the image so we can feed it to our model
img = crop_center(img, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE)
print("Image Shape after cropping: " , img.shape)
pyplot.figure()
pyplot.imshow(img)
pyplot.title('Center Cropped')
# switch to CHW (HWC --> CHW)
img = img.swapaxes(1, 2).swapaxes(0, 1)
print("CHW Image Shape: " , img.shape)
pyplot.figure()
for i in range(3):
# For some reason, pyplot subplot follows Matlab's indexing
# convention (starting with 1). Well, we'll just follow it...
pyplot.subplot(1, 3, i+1)
pyplot.imshow(img[i])
pyplot.axis('off')
pyplot.title('RGB channel %d' % (i+1))
# switch to BGR (RGB --> BGR)
img = img[(2, 1, 0), :, :]
# remove mean for better results
img = img * 255 - mean
# add batch size axis which completes the formation of the NCHW shaped input that we want
img = img[np.newaxis, :, :, :].astype(np.float32)
print("NCHW image (ready to be used as input): ", img.shape)
# ### Prepare the CNN and run the net!
#
# Now that the image is ready to be ingested by the CNN, let's open the protobufs, load them into the workspace, and run the net.
#
# In[5]:
# Read the contents of the input protobufs into local variables
with open(INIT_NET) as f:
init_net = f.read()
with open(PREDICT_NET) as f:
predict_net = f.read()
# Initialize the predictor from the input protobufs
p = workspace.Predictor(init_net, predict_net)
# Run the net and return prediction
results = p.run({'data': img})
# Turn it into something we can play with and examine which is in a multi-dimensional array
results = np.asarray(results)
print("results shape: ", results.shape)
# Quick way to get the top-1 prediction result
# Squeeze out the unnecessary axis. This returns a 1-D array of length 1000
preds = np.squeeze(results)
# Get the prediction and the confidence by finding the maximum value and index of maximum value in preds array
curr_pred, curr_conf = max(enumerate(preds), key=operator.itemgetter(1))
print("Prediction: ", curr_pred)
print("Confidence: ", curr_conf)
# ### Process Results
#
# Recall ImageNet is a 1000 class dataset and observe that it is no coincidence that the third axis of results is length 1000. This axis is holding the probability for each category in the pre-trained model. So when you look at the results array at a specific index, the number can be interpreted as the probability that the input belongs to the class corresponding to that index. Now that we have run the predictor and collected the results, we can interpret them by matching them to their corresponding english labels.
#
# In[6]:
# the rest of this is digging through the results
results = np.delete(results, 1)
index = 0
highest = 0
arr = np.empty((0,2), dtype=object)
arr[:,0] = int(10)
arr[:,1:] = float(10)
for i, r in enumerate(results):
# imagenet index begins with 1!
i=i+1
arr = np.append(arr, np.array([[i,r]]), axis=0)
if (r > highest):
highest = r
index = i
# top N results
N = 5
topN = sorted(arr, key=lambda x: x[1], reverse=True)[:N]
print("Raw top {} results: {}".format(N,topN))
# Isolate the indexes of the top-N most likely classes
topN_inds = [int(x[0]) for x in topN]
print("Top {} classes in order: {}".format(N,topN_inds))
# Now we can grab the code list and create a class Look Up Table
response = urllib2.urlopen(codes)
class_LUT = []
for line in response:
code, result = line.partition(":")[::2]
code = code.strip()
result = result.replace("'", "")
if code.isdigit():
class_LUT.append(result.split(",")[0][1:])
# For each of the top-N results, associate the integer result with an actual class
for n in topN:
print("Model predicts '{}' with {}% confidence".format(class_LUT[int(n[0])],float("{0:.2f}".format(n[1]*100))))
# ### Feeding Larger Batches
#
# Above is an example of how to feed one image at a time. We can achieve higher throughput if we feed multiple images at a time in a single batch. Recall, the data fed into the classifier is in 'NCHW' order, so to feed multiple images, we will expand the 'N' axis.
# In[7]:
# List of input images to be fed
images = ["images/cowboy-hat.jpg",
"images/cell-tower.jpg",
"images/Ducreux.jpg",
"images/pretzel.jpg",
"images/orangutan.jpg",
"images/aircraft-carrier.jpg",
"images/cat.jpg"]
# Allocate space for the batch of formatted images
NCHW_batch = np.zeros((len(images),3,227,227))
print ("Batch Shape: ",NCHW_batch.shape)
# For each of the images in the list, format it and place it in the batch
for i,curr_img in enumerate(images):
img = skimage.img_as_float(skimage.io.imread(curr_img)).astype(np.float32)
img = rescale(img, 227, 227)
img = crop_center(img, 227, 227)
img = img.swapaxes(1, 2).swapaxes(0, 1)
img = img[(2, 1, 0), :, :]
img = img * 255 - mean
NCHW_batch[i] = img
print("NCHW image (ready to be used as input): ", NCHW_batch.shape)
# Run the net on the batch
results = p.run([NCHW_batch.astype(np.float32)])
# Turn it into something we can play with and examine which is in a multi-dimensional array
results = np.asarray(results)
# Squeeze out the unnecessary axis
preds = np.squeeze(results)
print("Squeezed Predictions Shape, with batch size {}: {}".format(len(images),preds.shape))
# Describe the results
for i,pred in enumerate(preds):
print("Results for: '{}'".format(images[i]))
# Get the prediction and the confidence by finding the maximum value
# and index of maximum value in preds array
curr_pred, curr_conf = max(enumerate(pred), key=operator.itemgetter(1))
print("\tPrediction: ", curr_pred)
print("\tClass Name: ", class_LUT[int(curr_pred)])
print("\tConfidence: ", curr_conf)
| |
import os
import os.path
import uuid
from collections import defaultdict
import blitzdb
from blitzdb.backends.base import (
Backend as BaseBackend,
NotInTransaction,
)
from blitzdb.backends.file.index import (
Index,
TransactionalIndex,
)
from blitzdb.backends.file.queries import compile_query
from blitzdb.backends.file.queryset import QuerySet
from blitzdb.backends.file.serializers import (
JsonSerializer,
MarshalSerializer,
PickleSerializer,
)
from blitzdb.backends.file.store import (
Store,
TransactionalStore,
)
import six
store_classes = {
'transactional': TransactionalStore,
'basic': Store,
}
index_classes = {
'transactional': TransactionalIndex,
'basic': Index
}
serializer_classes = {
'pickle': PickleSerializer,
'json': JsonSerializer,
'marshal': MarshalSerializer
}
# will only be available if cjson is installed
try:
from blitzdb.backends.file.serializers import CJsonSerializer
serializer_classes['cjson'] = CJsonSerializer
except ImportError:
pass
class DatabaseIndexError(BaseException):
"""Gets raised when the index of the database is corrupted.
Ideally this should never happen. To recover from this error, you can call
the `rebuild_index` function of the file backend with the affected
collection and key as parameters.
"""
class Backend(BaseBackend):
"""A file-based database backend.
Uses flat files to store objects on the hard disk and file-based indexes to
optimize querying.
:param path: The path to the database. If non-existant it will be created
:param config:
The configuration dictionary. If not specified, Blitz will try to load
it from disk. If this fails, the default configuration will be used
instead.
.. warning::
It might seem tempting to use the `autocommit` config and not having to
worry about calling `commit` by hand. Please be advised that this can
incur a significant overhead in write time since a `commit` will
trigger a complete rewrite of all indexes to disk.
"""
# the default configuration values.
default_config = {
'indexes': {},
'store_class': 'transactional',
'index_class': 'transactional',
'index_store_class': 'basic',
'serializer_class': 'json',
'autocommit': False,
}
config_defaults = {}
def __init__(self, path, config=None, overwrite_config=False, **kwargs):
self._path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path)
self.collections = {}
self.stores = {}
self.in_transaction = False
self.indexes = defaultdict(lambda: {})
self.index_stores = defaultdict(lambda: {})
self.load_config(config, overwrite_config)
super(Backend, self).__init__(**kwargs)
@property
def autocommit(self):
return 'autocommit' in self.config and self.config['autocommit']
@autocommit.setter
def autocommit(self, value):
if value not in (True, False):
raise TypeError('Value must be boolean!')
self.config['autocommit'] = value
def begin(self):
"""Start a new transaction."""
if self.in_transaction: # we're already in a transaction...
self.commit()
self.in_transaction = True
for collection, store in self.stores.items():
store.begin()
indexes = self.indexes[collection]
for index in indexes.values():
index.begin()
@property
def StoreClass(self):
return store_classes[self.config['store_class']]
@property
def IndexClass(self):
return index_classes[self.config['index_class']]
@property
def IndexStoreClass(self):
return store_classes[self.config['index_store_class']]
@property
def SerializerClass(self):
return serializer_classes[self.config['serializer_class']]
def rollback(self):
"""Roll back a transaction."""
if not self.in_transaction:
raise NotInTransaction
for collection, store in self.stores.items():
store.rollback()
indexes = self.indexes[collection]
indexes_to_rebuild = []
for key, index in indexes.items():
try:
index.rollback()
except NotInTransaction:
# this index is "dirty" and needs to be rebuilt
# (probably it has been created within a transaction)
indexes_to_rebuild.append(key)
if indexes_to_rebuild:
self.rebuild_indexes(collection, indexes_to_rebuild)
self.in_transaction = False
def commit(self):
"""Commit all pending transactions to the database.
.. admonition:: Warning
This operation can be **expensive** in runtime if a large number of
documents (>100.000) is contained in the database, since it will
cause all database indexes to be written to disk.
"""
for collection in self.collections:
store = self.get_collection_store(collection)
store.commit()
indexes = self.get_collection_indexes(collection)
for index in indexes.values():
index.commit()
self.in_transaction = False
self.begin()
def rebuild_index(self, collection, key):
"""Rebuild a given index using the objects stored in the database.
:param collection:
The name of the collection for which to rebuild the index
:param key: The key of the index to be rebuilt
"""
return self.rebuild_indexes(collection, [key])
def create_index(self, cls_or_collection,
params=None, fields=None, ephemeral=False):
"""Create new index on the given collection/class with given parameters.
:param cls_or_collection:
The name of the collection or the class for which to create an
index
:param params: The parameters of the index
:param ephemeral: Whether to create a persistent or an ephemeral index
`params` expects either a dictionary of parameters or a string value.
In the latter case, it will interpret the string as the name of the key
for which an index is to be created.
If `ephemeral = True`, the index will be created only in memory and
will not be written to disk when :py:meth:`.commit` is called. This is
useful for optimizing query performance.
..notice::
By default, BlitzDB will create ephemeral indexes for all keys over
which you perform queries, so after you've run a query on a given
key for the first time, the second run will usually be much faster.
**Specifying keys**
Keys can be specified just like in MongoDB, using a dot ('.') to
specify nested keys.
.. code-block:: python
actor = Actor({'name' : 'Charlie Chaplin',
'foo' : {'value' : 'bar'}})
If you want to create an index on `actor['foo']['value']` , you can
just say
.. code-block:: python
backend.create_index(Actor,'foo.value')
.. warning::
Transcendental indexes (i.e. indexes transcending the boundaries of
referenced objects) are currently not supported by Blitz, which
means you can't create an index on an attribute value of a document
that is embedded in another document.
"""
if params:
return self.create_indexes(
cls_or_collection, [params], ephemeral=ephemeral)
elif fields:
params = []
if len(fields.items()) > 1:
raise ValueError(
'File backend currently '
'does not support multi-key indexes, sorry :/'
)
return self.create_indexes(
cls_or_collection,
[{'key': list(fields.keys())[0]}],
ephemeral=ephemeral)
else:
raise AttributeError('You must either specify params or fields!')
def get_pk_index(self, collection):
"""Return the primary key index for a given collection.
:param collection: the collection for which to return the primary index
:returns: the primary key index of the given collection
"""
cls = self.collections[collection]
if not cls.get_pk_name() in self.indexes[collection]:
self.create_index(cls.get_pk_name(), collection)
return self.indexes[collection][cls.get_pk_name()]
def load_config(self, config=None, overwrite_config=False):
config_file = self._path + '/config.json'
if os.path.exists(config_file):
with open(config_file, 'rb') as config_file:
# configuration is always stored in JSON format
self._config = JsonSerializer.deserialize(config_file.read())
else:
if config:
self._config = config.copy()
else:
self._config = {}
if overwrite_config and config:
self._config.update(config)
for key, value in self.default_config.items():
if key not in self._config:
self._config[key] = value
if 'version' not in self._config:
self._config['version'] = blitzdb.__version__
self.save_config()
def save_config(self):
config_file = self._path + '/config.json'
with open(config_file, 'wb') as config_file:
config_file.write(JsonSerializer.serialize(self._config))
@property
def config(self):
return self._config
@config.setter
def config(self, config):
self._config = config
self.save_config()
@property
def path(self):
return self._path
def get_collection_store(self, collection):
if collection not in self.stores:
self.stores[collection] = self.StoreClass(
{
'path': self.path + '/' + collection + '/objects',
'version': self._config['version'],
}
)
return self.stores[collection]
def get_index_store(self, collection, store_key):
if store_key not in self.index_stores[collection]:
self.index_stores[collection][store_key] = self.IndexStoreClass(
{
'path': (
self.path + '/' + collection + '/indexes/' + store_key
),
'version': self._config['version'],
}
)
return self.index_stores[collection][store_key]
def register(self, cls, parameters=None):
super(Backend, self).register(cls, parameters)
self.init_indexes(self.get_collection_for_cls(cls))
def get_storage_key_for(self, obj):
collection = self.get_collection_for_obj(obj)
pk_index = self.get_pk_index(collection)
try:
return pk_index.get_keys_for(obj.pk)[0]
except (KeyError, IndexError):
raise obj.DoesNotExist
def init_indexes(self, collection):
cls = self.collections[collection]
if collection in self._config['indexes']:
# If not pk index is present, we create one on the fly...
if not [idx for idx in self._config['indexes'][collection].values()
if idx['key'] == cls.get_pk_name()]:
self.create_index(collection, {'key': cls.get_pk_name()})
# We sort the indexes such that pk is always created first...
for index_params in sorted(
self._config['indexes'][collection].values(),
key=lambda x: 0 if x['key'] == cls.get_pk_name() else 1):
self.create_index(collection, index_params)
else:
# If no indexes are given, we just create a primary key index...
self.create_index(collection, {'key': cls.get_pk_name()})
def rebuild_indexes(self, collection, keys):
if not keys:
return
all_objects = self.filter(collection, {})
for key in keys:
index = self.indexes[collection][key]
index.clear()
for key in keys:
index = self.indexes[collection][key]
for obj in all_objects:
index.add_key(obj.attributes, obj._store_key)
index.commit()
def create_indexes(self, cls_or_collection, params_list, ephemeral=False):
indexes = []
keys = []
if not params_list:
return
if not isinstance(cls_or_collection, six.string_types):
collection = self.get_collection_for_cls(cls_or_collection)
else:
collection = cls_or_collection
for params in params_list:
if not isinstance(params, dict):
params = {'key': params}
if params['key'] in self.indexes[collection]:
return # Index already exists
if 'id' not in params:
params['id'] = uuid.uuid4().hex
if ephemeral:
index_store = None
else:
index_store = self.get_index_store(collection, params['id'])
index = self.IndexClass(
params,
serializer=lambda x: self.serialize(x, autosave=False),
deserializer=lambda x: self.deserialize(x),
store=index_store)
self.indexes[collection][params['key']] = index
if collection not in self._config['indexes']:
self._config['indexes'][collection] = {}
if not ephemeral:
self._config['indexes'][collection][params['key']] = params
self.save_config()
indexes.append(index)
# if the index failed to load from disk we rebuild it
if not index.loaded:
keys.append(params['key'])
self.rebuild_indexes(collection, keys)
return indexes
def get_collection_indexes(self, collection):
return self.indexes[collection] if collection in self.indexes else {}
def encode_attributes(self, attributes):
return self.SerializerClass.serialize(attributes)
def decode_attributes(self, data):
return self.SerializerClass.deserialize(data)
def get_object(self, cls, key):
collection = self.get_collection_for_cls(cls)
store = self.get_collection_store(collection)
try:
data = self.deserialize(
self.decode_attributes(store.get_blob(key)))
except IOError:
raise cls.DoesNotExist
obj = self.create_instance(cls, data)
return obj
def save(self, obj):
collection = self.get_collection_for_obj(obj)
indexes = self.get_collection_indexes(collection)
store = self.get_collection_store(collection)
if obj.pk is None:
obj.autogenerate_pk()
if hasattr(obj, 'pre_save') and callable(obj.pre_save):
obj.pre_save()
serialized_attributes = self.serialize(obj.attributes)
data = self.encode_attributes(serialized_attributes)
try:
store_key = (
self
.get_pk_index(collection)
.get_keys_for(obj.pk, include_uncommitted=True).pop()
)
except IndexError:
store_key = uuid.uuid4().hex
store.store_blob(data, store_key)
for key, index in indexes.items():
index.add_key(obj.attributes, store_key)
if self.config['autocommit']:
self.commit()
return obj
def delete_by_store_keys(self, collection, store_keys):
store = self.get_collection_store(collection)
indexes = self.get_collection_indexes(collection)
for store_key in store_keys:
try:
store.delete_blob(store_key)
except (KeyError, IOError):
pass
for index in indexes.values():
index.remove_key(store_key)
if self.config['autocommit']:
self.commit()
def delete(self, obj):
collection = self.get_collection_for_obj(obj)
primary_index = self.get_pk_index(collection)
if hasattr(obj, 'pre_delete') and callable(obj.pre_delete):
obj.pre_delete()
return self.delete_by_store_keys(
collection, primary_index.get_keys_for(obj.pk))
def get(self, cls, query):
objects = self.filter(cls, query)
if len(objects) == 0:
raise cls.DoesNotExist
elif len(objects) > 1:
raise cls.MultipleDocumentsReturned
return objects[0]
def sort(self, cls_or_collection, keys, key, order=QuerySet.ASCENDING):
if not isinstance(cls_or_collection, six.string_types):
collection = self.get_collection_for_cls(cls_or_collection)
cls = cls_or_collection
else:
collection = cls_or_collection
cls = self.get_cls_for_collection(collection)
if not isinstance(key, list) and not isinstance(key, tuple):
sort_keys = [(key, order)]
else:
sort_keys = key
indexes = self.get_collection_indexes(collection)
indexes_to_create = []
for sort_key, order in sort_keys:
if sort_key not in indexes:
indexes_to_create.append(sort_key)
self.create_indexes(cls, indexes_to_create, ephemeral=True)
def sort_by_keys(keys, sort_keys):
if not sort_keys:
return keys
(sort_key, order) = sort_keys[0]
_sorted_keys = indexes[sort_key].sort_keys(keys, order)
return [sort_by_keys(k, sort_keys[1:]) for k in _sorted_keys]
def flatten(l):
fl = []
for elem in l:
if isinstance(elem, list):
fl.extend(flatten(elem))
else:
fl.append(elem)
return fl
return flatten(sort_by_keys(keys, sort_keys))
def filter(self, cls_or_collection, query, initial_keys=None):
if not isinstance(query, dict):
raise AttributeError('Query parameters must be dict!')
if not isinstance(cls_or_collection, six.string_types):
collection = self.get_collection_for_cls(cls_or_collection)
cls = cls_or_collection
else:
collection = cls_or_collection
cls = self.get_cls_for_collection(collection)
store = self.get_collection_store(collection)
indexes = self.get_collection_indexes(collection)
compiled_query = compile_query(self.serialize(query, autosave=False))
indexes_to_create = []
def query_function(key, expression):
if key is None:
return QuerySet(
self,
cls,
store,
self.get_pk_index(collection).get_all_keys()
)
qs = QuerySet(
self,
cls,
store,
indexes[key].get_keys_for(expression)
)
return qs
def index_collector(key, expressions):
if (key not in indexes
and key not in indexes_to_create
and key is not None):
indexes_to_create.append(key)
return QuerySet(self, cls, store, [])
# We collect all the indexes that we need to create
compiled_query(index_collector)
if indexes_to_create:
self.create_indexes(cls, indexes_to_create, ephemeral=True)
query_set = compiled_query(query_function)
return query_set
| |
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import imath
import IECore
import IECoreScene
import IECoreHoudini
import unittest
import os
import shutil
class TestOpHolder( IECoreHoudini.TestCase ):
def testOpHolder(self):
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
op = geo.createNode( "ieOpHolder" )
self.assert_( op )
fn = IECoreHoudini.FnOpHolder( op )
self.assert_( fn )
return (op,fn)
# tests a basic op, the function set and that it cooks as expected
def testSimpleOp(self):
(op, fn) = self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised( cl )
self.assertNotEqual( fn.getParameterised(), None )
self.assertEqual( fn.getParameterised(), cl )
op.parm("parm_filename").set( self.__torusTestFile )
op.cook() # cook using Houdini's cook mechanism, NOT operate()
self.assertEqual( fn.getParameterised()["filename"].getValue(), IECore.StringData( self.__torusTestFile ) )
result = fn.getParameterised().resultParameter().getValue()
self.assertEqual( result, IECore.Reader.create( self.__torusTestFile ).read() )
# tests the alternative 'all in one' opHolder creator
def testAlternateCreator(self):
n = IECoreHoudini.FnOpHolder.create( "noise_deformer", "noiseDeformer", 1 )
self.assert_( n )
fn = IECoreHoudini.FnOpHolder( n )
self.assert_( fn )
op = fn.getParameterised()
self.assert_( op )
self.assertEqual( op.typeName(), "noiseDeformer" )
# tests creation within contexts (simulating from UIs)
def testContextCreator( self ) :
# test generic creation
n = IECoreHoudini.FnOpHolder.create( "vectorMaker", "vectors/V3fVectorCreator" )
self.assertEqual( n.path(), "/obj/vectorMaker/vectorMaker" )
# test contextArgs outside UI mode fallback to generic behaviour
contextArgs = { "toolname" : "ieOpHolder" }
n2 = IECoreHoudini.FnOpHolder.create( "vectorMaker", "vectors/V3fVectorCreator", contextArgs=contextArgs )
self.assertEqual( n2.path(), "/obj/vectorMaker1/vectorMaker" )
# test parent arg
geo = hou.node( "/obj" ).createNode( "geo", run_init_scripts=False )
n3 = IECoreHoudini.FnOpHolder.create( "vectorMaker", "vectors/V3fVectorCreator", parent=geo, contextArgs=contextArgs )
self.assertEqual( n3.path(), "/obj/geo1/vectorMaker" )
# test automatic conversion
contextArgs["shiftclick"] = True
n4 = IECoreHoudini.FnOpHolder.create( "noise", "noiseDeformer", parent=geo, contextArgs=contextArgs )
self.assertEqual( n4.path(), "/obj/geo1/noise" )
self.assertEqual( len(n4.outputConnectors()[0]), 1 )
self.assertEqual( n4.outputConnectors()[0][0].outputNode().type().name(), "ieCortexConverter" )
# test automatic conversion and output connections
mountain = geo.createNode( "mountain" )
contextArgs["outputnodename"] = mountain.path()
n5 = IECoreHoudini.FnOpHolder.create( "noise", "noiseDeformer", parent=geo, contextArgs=contextArgs )
self.assertEqual( n5.path(), "/obj/geo1/noise1" )
self.assertEqual( len(n5.outputConnectors()[0]), 1 )
converter = n5.outputConnectors()[0][0].outputNode()
self.assertEqual( converter.type().name(), "ieCortexConverter" )
self.assertEqual( len(converter.outputConnectors()[0]), 1 )
outputNode = converter.outputConnectors()[0][0].outputNode()
self.assertEqual( outputNode.type().name(), "mountain::2.0" if hou.applicationVersion()[0] >= 16 else "mountain" )
self.assertEqual( outputNode, mountain )
# test that a C++ op can be assigned using the function set
def testCppOp(self):
(op,fn) = self.testOpHolder()
mesh_normals = IECoreScene.MeshNormalsOp()
self.assert_( mesh_normals )
fn.setParameterised(mesh_normals)
self.assertEqual( fn.getParameterised().typeName(), "MeshNormalsOp" )
# test that we can wire opholders together
def testWireTogether(self):
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
v1_op = geo.createNode( "ieOpHolder", node_name="vector1" )
fn = IECoreHoudini.FnOpHolder( v1_op )
fn.setOp( "vectors/V3fVectorCreator", 1 )
v1_op.parm("parm_size").set(3)
v1_op.parmTuple("parm_value").set( (1,2,3) )
v2_op = geo.createNode( "ieOpHolder", node_name="vector2" )
fn = IECoreHoudini.FnOpHolder( v2_op )
fn.setOp( "vectors/V3fVectorCreator", 1 )
v2_op.parm("parm_size").set(3)
v2_op.parmTuple("parm_value").set( (4,5,6) )
add_op = geo.createNode( "ieOpHolder", node_name="add_vectors" )
fn = IECoreHoudini.FnOpHolder( add_op )
fn.setOp( "vectors/V3fVectorAdder", 1 )
print_op = geo.createNode( "ieOpHolder", node_name="print_values" )
fn = IECoreHoudini.FnOpHolder( print_op )
fn.setOp( "objectDebug", 1 )
print_op.parm("parm_quiet").set(True)
# connect our ops together
add_op.setInput( 0, v1_op )
add_op.setInput( 1, v2_op )
print_op.setInput( 0, add_op )
# cook and check our output
print_op.cook()
fn = IECoreHoudini.FnOpHolder(print_op)
result = fn.getParameterised().resultParameter().getValue()
self.assertEqual( result, IECore.V3fVectorData( [imath.V3f(5,7,9),imath.V3f(5,7,9),imath.V3f(5,7,9)] ) )
# test that a hip with opHolders wired together can be saved and reloaded & still evaluate
def testSaveLoad(self):
hou.hipFile.clear(suppress_save_prompt=True)
save_file = "test/opHolder_testData/opSave_test.hip"
self.testWireTogether()
# save scene
hou.hipFile.save(save_file)
# new scene
hou.hipFile.clear(suppress_save_prompt=True)
# open scene
hou.hipFile.load(save_file)
# check some parameters are ok
self.assertEqual( hou.node("/obj/geo1/vector1").parm("parm_size").eval(), 3 )
self.assertEqual( hou.node("/obj/geo1/vector1").parmTuple("parm_value").eval(), (1,2,3) )
self.assertEqual( hou.node("/obj/geo1/vector2").parm("parm_size").eval(), 3 )
self.assertEqual( hou.node("/obj/geo1/vector2").parmTuple("parm_value").eval(), (4,5,6) )
# check the result of our last opHolder
n = hou.node("/obj/geo1/print_values")
n.cook()
fn = IECoreHoudini.FnOpHolder(n)
result = fn.getParameterised().resultParameter().getValue()
self.assertEqual( result, IECore.V3fVectorData( [imath.V3f(5,7,9),imath.V3f(5,7,9),imath.V3f(5,7,9)] ) )
# tests changing op and inputs
def testChangingOp( self ) :
n = IECoreHoudini.FnOpHolder.create( "test_node", "vectors/V3fVectorCreator", 1 )
fn = IECoreHoudini.FnOpHolder( n )
op = fn.getParameterised()
self.assertEqual( len(n.inputConnectors()), 0 )
fn.setOp( "objectDebug", 1 )
self.assertEqual( len(n.inputConnectors()), 1 )
torus = n.createInputNode( 0, "torus" )
self.assertEqual( torus, n.inputConnections()[0].inputNode() )
self.assertEqual( 0, n.inputConnections()[0].inputIndex() )
fn.setOp( "vectors/V3fVectorAdder", 1 )
self.assertEqual( len(n.inputConnectors()), 2 )
self.assertEqual( torus, n.inputConnections()[0].inputNode() )
self.assertEqual( 0, n.inputConnections()[0].inputIndex() )
box = n.createInputNode( 1, "box" )
self.assertEqual( box, n.inputConnections()[1].inputNode() )
self.assertEqual( 1, n.inputConnections()[1].inputIndex() )
n.setInput( 0, None )
self.assertEqual( len(n.inputConnectors()), 2 )
self.assertEqual( len(n.inputConnections()), 1 )
self.assertEqual( box, n.inputConnections()[0].inputNode() )
self.assertEqual( 1, n.inputConnections()[0].inputIndex() )
fn.setOp( "objectDebug", 1 )
self.assertEqual( len(n.inputConnectors()), 1 )
self.assertEqual( box, n.inputConnections()[0].inputNode() )
self.assertEqual( 0, n.inputConnections()[0].inputIndex() )
fn.setOp( "vectors/V3fVectorCreator", 1 )
self.assertEqual( len(n.inputConnectors()), 0 )
self.assert_( not n.inputConnectors() )
# tests creation of a lot of opHolders
def testLotsQuickly(self):
n = []
for i in range(1000):
n.append( IECoreHoudini.FnOpHolder.create( "noise_deformer", "noiseDeformer", 1 ) )
for _n in n:
_n.destroy()
# test using the noiseDeformer op
def testModifyMesh(self):
(op, fn) = self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised( cl )
op.parm("parm_filename").set( self.__torusNormalsTestFile )
deformer = op.createOutputNode( "ieOpHolder" )
self.assert_( deformer )
cl = IECore.ClassLoader.defaultOpLoader().load("noiseDeformer", 1)()
self.assertEqual( cl.typeName(), "noiseDeformer" )
fn = IECoreHoudini.FnOpHolder( deformer )
fn.setParameterised( cl )
deformer.parm("parm_magnitude").set( 2.5 )
deformer.parmTuple("parm_frequency").set( (1,2,3) )
deformer.cook()
torus = IECore.Reader.create( self.__torusNormalsTestFile ).read()
result = fn.getParameterised().resultParameter().getValue()
self.assertEqual( len(result["P"].data), len(torus["P"].data) )
self.assertEqual( len(result["N"].data), len(torus["N"].data) )
self.assertNotEqual( result["P"], torus["P"] )
self.assertNotEqual( result["N"], torus["N"] )
return ( op, deformer )
# test the bbox on our Sop geometry is set correctly
def testOutputBBox(self):
(op,fn) = self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised( cl )
op.parm("parm_filename").set( self.__torusNormalsTestFile )
op.cook()
geo = op.geometry()
self.assert_( geo )
bbox = geo.boundingBox()
self.failUnless( bbox.isAlmostEqual(hou.BoundingBox(-1.5, -0.475528, -1.42658, 1.5, 0.475528, 1.42658)) )
deformer = op.createOutputNode( "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("noiseDeformer", 1)()
fn = IECoreHoudini.FnOpHolder( deformer )
fn.setParameterised( cl )
self.assertEqual( len(deformer.inputConnectors()), 1 )
deformer.parm("parm_magnitude").set(2)
deformer.cook()
geo2 = deformer.geometry()
self.assert_( geo2 )
bbox2 = geo2.boundingBox()
self.assert_( not bbox2.isAlmostEqual(hou.BoundingBox(-1.5, -0.475528, -1.42658, 1.5, 0.475528, 1.42658)) )
self.failUnless( bbox2.isAlmostEqual(hou.BoundingBox(-1.8938, -1.08025, -1.75561, 1.64279, 1.37116, 1.97013)) )
return ( geo, deformer )
# test an opHolder with 2 primitive inputs
def testMultipleInputs(self):
(geo, deformer) = self.testModifyMesh()
swap = geo.createOutputNode( "ieOpHolder", node_name="swapP" )
cl = IECore.ClassLoader.defaultOpLoader().load("swapAttribute", 1)()
fn = IECoreHoudini.FnOpHolder( swap )
fn.setParameterised( cl )
swap.setInput( 1, deformer )
swap.cook()
src = IECoreHoudini.FnOpHolder( geo ).getParameterised().resultParameter().getValue()
deformer = IECoreHoudini.FnOpHolder( deformer ).getParameterised().resultParameter().getValue()
result = cl.resultParameter().getValue()
self.failUnless( 'P' in result )
self.assertNotEqual( result['P'].data, src['P'].data)
self.assertEqual( result['P'].data, deformer['P'].data)
self.assertEqual( result['N'].data, src['N'].data)
self.assertNotEqual( result['N'].data, deformer['N'].data)
# tests compound parameter support
def testCompoundParameters(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/compoundParameters", 1)()
fn.setParameterised( cl )
# test we have the parameters & folders
num_folders = [ type(p.parmTemplate()).__name__ for p in op.spareParms()].count("FolderSetParmTemplate")
self.assertEqual( num_folders, 4 )
p = op.parm( "parm_compound_1_jy" )
self.assert_( p )
self.assertEqual( p.containingFolders(), ('Parameters', 'My Compound 1') )
p = op.parm( "parm_compound_2_kx" )
self.assert_( p )
self.assertEqual( p.containingFolders(), ('Parameters', 'My Compound 2') )
p = op.parm( "parm_compound_3_compound_4_some_int" )
self.assert_( p )
self.assertEqual( p.containingFolders(), ('Parameters', 'My Compound 3', 'My Compound 4') )
# test that houdini values get set on cortex parameters correctly
p = op.parmTuple( "parm_compound_3_compound_4_some_int" )
p.set( [345] )
self.assertEqual( cl.parameters()["compound_3"]["compound_4"]["some_int"].getValue().value, 123 )
op.cook()
self.assertEqual( cl.parameters()["compound_3"]["compound_4"]["some_int"].getValue().value, 345 )
p = op.parmTuple( "parm_compound_2_j" )
p.set( [123.456, 456.789, 0.0] )
self.assert_( ( cl.parameters()["compound_2"]["j"].getValue().value - imath.V3d( 8,16,32 ) ).length() < 0.001 )
op.cook()
self.assert_( ( cl.parameters()["compound_2"]["j"].getValue().value - imath.V3d( 123.456, 456.789, 0 ) ).length() < 0.001 )
# test that caching parameters works
op.parm( "__classReloadButton" ).pressButton()
op.cook()
self.assertEqual( cl.parameters()["compound_3"]["compound_4"]["some_int"].getValue().value, 345 )
self.assert_( ( cl.parameters()["compound_2"]["j"].getValue().value - imath.V3d( 123.456, 456.789, 0 ) ).length() < 0.001 )
def testObjectParameterConversion(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("objectDebug", 1)()
fn.setParameterised( cl )
op.parm("parm_quiet").set( True )
torus = op.createInputNode(0, "torus" )
torus.parm( "rows" ).set( 10 )
torus.parm( "cols" ).set( 10 )
op.cook()
result = cl.resultParameter().getValue()
self.assertEqual( len( op.errors() ), 0 )
self.assertEqual( result.typeId(), IECoreScene.TypeId.MeshPrimitive )
torus.parm("type").set(1)
# torus -> add (keep=1) -> op
# the add (keep=1) removes the primitives but keeps the points
add = op.createInputNode( 0, "add" )
add.parm( "keep" ).set( 1 )
add.setInput( 0, torus )
op.cook()
result = cl.resultParameter().getValue()
self.assertEqual( len( op.errors() ), 0 )
self.assertEqual( result.typeId(), IECoreScene.TypeId.PointsPrimitive )
op2 = op.createInputNode(0, "ieOpHolder")
fn2 = IECoreHoudini.FnOpHolder( op2 )
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn2.setParameterised(cl)
op2.parm("parm_filename").set( self.__torusTestFile )
op.cook()
result2 = fn.getParameterised().resultParameter().getValue()
self.assertEqual( len( op.errors() ), 0 )
self.assertEqual( result2.typeId(), IECoreScene.TypeId.MeshPrimitive )
self.assertEqual( result2["P"].data, result["P"].data )
def testObjectParameterWithMultipleTypesConversion( self ) :
( op, fn ) = self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load( "multiTypeObject", 1 )()
fn.setParameterised( cl )
torus = op.createInputNode( 0, "torus" )
torus.parm( "rows" ).set( 10 )
torus.parm( "cols" ).set( 10 )
op.cook()
result = cl.resultParameter().getValue()
self.assert_( not op.errors() )
self.assertEqual( result.typeId(), IECoreScene.TypeId.MeshPrimitive )
# torus -> add (keep=1) -> op
# the add (keep=1) removes the primitives but keeps the points
add = op.createInputNode( 0, "add" )
add.parm( "keep" ).set( 1 )
add.setInput( 0, torus )
op.cook()
result2 = cl.resultParameter().getValue()
self.assert_( not op.errors() )
self.assertEqual( result2.typeId(), IECoreScene.TypeId.PointsPrimitive )
op2 = op.createInputNode( 0, "ieOpHolder" )
fn2 = IECoreHoudini.FnOpHolder( op2 )
cl = IECore.ClassLoader.defaultOpLoader().load( "cobReader", 1 )()
fn2.setParameterised( cl )
op2.parm( "parm_filename" ).set( self.__torusTestFile )
op.cook()
result3 = fn.getParameterised().resultParameter().getValue()
self.assert_( not op.errors() )
self.assertEqual( result3.typeId(), IECoreScene.TypeId.MeshPrimitive )
self.assertEqual( result3["P"].data, result["P"].data )
cl = IECore.ClassLoader.defaultOpLoader().load( "vectors/V3fVectorAdder", 1 )()
fn2.setParameterised( cl )
fn2.getParameterised().parameters()['vector1'].setValue( result["P"].data )
fn2.getParameterised().parameters()['vector2'].setValue( result["P"].data )
op.cook()
result4 = fn.getParameterised().resultParameter().getValue()
self.assert_( not op.errors() )
self.assertEqual( result4.typeId(), IECoreScene.TypeId.PointsPrimitive )
self.assertEqual( result4["P"].data, result["P"].data + result["P"].data )
def testPrimitiveParameterConversion(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/primParam", 1)()
fn.setParameterised( cl )
torus = op.createInputNode(0, "torus" )
torus.parm( "rows" ).set( 10 )
torus.parm( "cols" ).set( 10 )
op.cook()
result = cl.resultParameter().getValue()
self.assertEqual( len( op.errors() ), 0 )
self.assertEqual( result.typeId(), IECoreScene.TypeId.MeshPrimitive )
torus.parm("type").set(1)
# torus -> add (keep=1) -> op
# the add (keep=1) removes the primitives but keeps the points
add = op.createInputNode( 0, "add" )
add.parm( "keep" ).set( 1 )
add.setInput( 0, torus )
op.cook()
result = cl.resultParameter().getValue()
self.assertEqual( len( op.errors() ), 0 )
self.assertEqual( result.typeId(), IECoreScene.TypeId.PointsPrimitive )
op2 = op.createInputNode(0, "ieOpHolder")
fn = IECoreHoudini.FnOpHolder( op2 )
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised(cl)
op2.parm("parm_filename").set( self.__torusTestFile )
op.cook()
result2 = fn.getParameterised().resultParameter().getValue()
self.assertEqual( len( op.errors() ), 0 )
self.assertEqual( result2.typeId(), IECoreScene.TypeId.MeshPrimitive )
self.assertEqual( result2["P"].data, result["P"].data )
def testPointsParameterConversion(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/pointParam", 1)()
fn.setParameterised( cl )
cob = op.createInputNode(0, "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn2 = IECoreHoudini.FnOpHolder( cob )
fn2.setParameterised( cl )
cob.parm("parm_filename").set( self.__torusTestFile )
self.assertRaises( hou.OperationFailed, op.cook )
self.assertNotEqual( len( op.errors() ), 0 )
cob = op.createInputNode(0, "torus" )
op.cook() # should pass because torus will be converted to points
self.assertEqual( fn.getParameterised()['input'].getValue().typeId(), IECoreScene.TypeId.PointsPrimitive )
self.assertEqual( fn.getParameterised().resultParameter().getValue().typeId(), IECoreScene.TypeId.PointsPrimitive )
def testPolygonsParameterConversion(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/polyParam", 1)()
fn.setParameterised( cl )
cob = op.createInputNode(0, "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn2 = IECoreHoudini.FnOpHolder( cob )
fn2.setParameterised( cl )
cob.parm("parm_filename").set( self.__torusTestFile )
op.cook() # should pass because we have a mesh primitive
torus = op.createInputNode(0, "torus" )
op.cook() # should pass because torus will be converted to mesh
self.assertEqual( fn.getParameterised()['input'].getValue().typeId(), IECoreScene.TypeId.MeshPrimitive )
self.assertEqual( fn.getParameterised().resultParameter().getValue().typeId(), IECoreScene.TypeId.MeshPrimitive )
op2 = torus.createOutputNode( "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/pointParam", 1)()
fn = IECoreHoudini.FnOpHolder( op2 )
fn.setParameterised( cl )
op2.cook()
self.assertEqual( fn.getParameterised().resultParameter().getValue().typeId(), IECoreScene.TypeId.PointsPrimitive )
op.setInput( 0, op2 )
self.assertRaises( hou.OperationFailed, op.cook )
self.assertNotEqual( len( op.errors() ), 0 )
def testInputConnectionsSaveLoad( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
( holder, fn ) = self.testOpHolder()
fn.setOp( "meshMerge", 1 )
holderPath = holder.path()
torusPath = holder.createInputNode( 0, "torus" ).path()
boxPath = holder.createInputNode( 1, "box" ).path()
self.assertEqual( len(holder.inputs()), 2 )
self.assertEqual( holder.inputs()[0].path(), torusPath )
self.assertEqual( holder.inputs()[1].path(), boxPath )
hip = "test/opHolder_testData/opSave_test.hip"
hou.hipFile.save( hip )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( hip )
holder = hou.node( holderPath )
self.assertEqual( len(holder.inputs()), 2 )
self.assertEqual( holder.inputs()[0].path(), torusPath )
self.assertEqual( holder.inputs()[1].path(), boxPath )
def testInvalidValidation(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised( cl )
op.parm("parm_filename").set( self.__torusTestFile )
op2 = op.createOutputNode( "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/pointParam", 1)()
fn = IECoreHoudini.FnOpHolder(op2)
fn.setParameterised(cl)
self.assertRaises( hou.OperationFailed, op2.cook )
self.assertNotEqual( len( op2.errors() ), 0 )
def testInvalidOp(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("noiseDeformer", 1)()
fn.setParameterised( cl )
self.assertRaises( hou.OperationFailed, op.cook )
self.assertNotEqual( len( op.errors() ), 0 )
def testMatchString(self):
(op,fn)=self.testOpHolder()
op.parm( "__classMatchString" ).set( "*" )
op.parm( "__className" ).set( "cobReader" )
op.parm( "__className" ).pressButton()
cl = fn.getParameterised()
self.assertEqual( cl.typeName(), "cobReader" )
op.parm( "__classMatchString" ).set( "object*" )
results = fn.classNames()
self.assertEqual( len(fn.classNames()), 1 )
op.parm( "__className" ).set( "cobReader" ) # this still works, should it be invalid?
op.parm( "__className" ).pressButton()
cl = fn.getParameterised()
self.assertEqual( cl.typeName(), "cobReader" )
op.parm( "__classMatchString" ).set("*")
self.failUnless( len(fn.classNames()) > 1 )
def testCategories( self ) :
( op, fn ) = self.testOpHolder()
op.parm( "__classMatchString" ).set( "*" )
self.assertEqual( op.parm( "__classCategory" ).eval(), "" )
op.parm( "__className" ).set( "cobReader" )
self.assertEqual( op.parm( "__classCategory" ).eval(), "" )
op.parm( "__className" ).pressButton()
self.assertEqual( fn.getParameterised().typeName(), "cobReader" )
self.assertEqual( fn.getParameterised().path, "cobReader" )
op.parm( "__className" ).set( "vectors/V3fVectorCreator" )
op.parm( "__className" ).pressButton()
self.assertEqual( op.parm( "__classCategory" ).eval(), "vectors" )
self.assertEqual( fn.getParameterised().typeName(), "V3fVectorCreator" )
self.assertEqual( fn.getParameterised().path, "vectors/V3fVectorCreator" )
op.parm( "__className" ).set( "" )
op.parm( "__className" ).pressButton()
self.assertEqual( op.parm( "__classCategory" ).eval(), "vectors" )
op.parm( "__classCategory" ).set( "" )
op.parm( "__classCategory" ).pressButton()
self.assertRaises( hou.OperationFailed, op.cook )
self.assertEqual( op.parm( "__className" ).eval(), "" )
op.parm( "__className" ).set( "parameters/compoundParameters" )
op.parm( "__className" ).pressButton()
self.assertEqual( op.parm( "__classCategory" ).eval(), "parameters" )
self.assertEqual( fn.getParameterised().typeName(), "compoundParameters" )
self.assertEqual( fn.getParameterised().path, "parameters/compoundParameters" )
op.parm( "__className" ).set( "parameters/primitives/pointParam" )
op.parm( "__className" ).pressButton()
self.assertEqual( op.parm( "__classCategory" ).eval(), "parameters/primitives" )
self.assertEqual( fn.getParameterised().typeName(), "pointParam" )
self.assertEqual( fn.getParameterised().path, "parameters/primitives/pointParam" )
op.parm( "__classCategory" ).set( "" )
op.parm( "__classCategory" ).pressButton()
self.failUnless( len(fn.classNames()) > 4 )
op.parm( "__classMatchString" ).set( "parameters/*" )
self.assertEqual( len(fn.classNames()), 5 )
def testSetOpValues( self ) :
( holder, fn ) = self.testOpHolder()
op = IECore.ClassLoader.defaultOpLoader().load( "noiseDeformer", 1 )()
fn.setOp( op )
self.assertEqual( tuple(op.parameters()['frequency'].defaultValue.value), holder.parmTuple( "parm_frequency" ).parmTemplate().defaultValue() )
self.assertEqual( tuple(op.parameters()['frequency'].defaultValue.value), holder.parmTuple( "parm_frequency" ).eval() )
self.assertEqual( tuple(op.parameters()['frequency'].getTypedValue()), holder.parmTuple( "parm_frequency" ).eval() )
( holder2, fn2 ) = self.testOpHolder()
op.parameters()['frequency'].setTypedValue( imath.V3f( 0.2, 0.4, 0.6 ) )
fn2.setOp( op )
self.assertEqual( tuple(op.parameters()['frequency'].defaultValue.value), holder2.parmTuple( "parm_frequency" ).parmTemplate().defaultValue() )
self.assertNotEqual( tuple(op.parameters()['frequency'].defaultValue.value), holder2.parmTuple( "parm_frequency" ).eval() )
self.assertEqual( tuple(op.parameters()['frequency'].getTypedValue()), holder2.parmTuple( "parm_frequency" ).eval() )
def testParameterDescriptions( self ) :
( holder, fn ) = self.testOpHolder()
fn.setOp( "parameters/compoundParameters" )
parameters = fn.getOp().parameters()
self.assertEqual( parameters['blah'].description, holder.parm( "parm_blah" ).parmTemplate().help() )
self.assertEqual( parameters['compound_1']['j'].description, holder.parmTuple( "parm_compound_1_j" ).parmTemplate().help() )
self.assertEqual( parameters['compound_1']['k'].description, holder.parmTuple( "parm_compound_1_k" ).parmTemplate().help() )
self.assertEqual( parameters['compound_3']['compound_4']['some_int'].description, holder.parm( "parm_compound_3_compound_4_some_int" ).parmTemplate().help() )
self.assertEqual( parameters['compound_5']['bool_1'].description, holder.parm( "parm_compound_5_bool_1" ).parmTemplate().help() )
def testNumericPresetMenus( self ) :
# at present, Int/FloatParameters only support presetsOnly presets, due to the limitations of hou.MenuParmTemplate
( holder, fn ) = self.testOpHolder()
holder.createInputNode( 0, "box" )
fn.setOp( "parameters/primitives/preset", 1 )
parm = holder.parm( "parm_switch" )
self.failUnless( isinstance( parm, hou.Parm ) )
template = parm.parmTemplate()
self.failUnless( isinstance( template, hou.MenuParmTemplate ) )
# the int values are stored as strings in this crazy Houdini world
self.assertEqual( template.menuItems(), ( "20", "30" ) )
self.assertEqual( template.menuLabels(), ( "A", "B" ) )
self.assertEqual( template.defaultValue(), 0 )
self.assertEqual( template.defaultValueAsString(), "20" )
self.assertEqual( parm.eval(), 0 )
self.assertEqual( parm.evalAsString(), "20" )
# but on the op values are really the ints we require
op = fn.getOp()
self.assertEqual( op["switch"].getTypedValue(), 20 )
parm.set( 1 )
holder.cook()
self.assertEqual( op["switch"].getTypedValue(), 30 )
# Houdini 16 does not allow ordered menu parms to be set to non-menu items
# if the parm is set to an index, and the index doesn't exist, then the parm is set to the closest item menu
if hou.applicationVersion()[0] < 16:
parm.set( 2 )
self.assertRaises( hou.OperationFailed, holder.cook )
parm.set( -1 )
self.assertRaises( hou.OperationFailed, holder.cook )
parm.set( 0 )
holder.cook()
self.failUnless( not holder.errors() )
newHolder = holder.parent().createNode( "ieOpHolder" )
newFn = IECoreHoudini.FnOpHolder( newHolder )
op["switch"].setTypedValue( 30 )
newFn.setOp( op )
newParm = newHolder.parm( "parm_switch" )
self.assertEqual( newParm.eval(), 1 )
self.assertEqual( newParm.evalAsString(), "30" )
def testMessageHandling( self ) :
( holder, fn ) = self.testOpHolder()
fn.setOp( "noiseDeformer" )
self.assertRaises( hou.OperationFailed, holder.cook )
self.failUnless( "Must have primvar 'N' in primitive!" in "".join( holder.errors() ) )
torus = holder.createInputNode( 0, "torus" )
self.assertRaises( hou.OperationFailed, holder.cook )
self.failUnless( "Must have primvar 'N' in primitive!" in "".join( holder.errors() ) )
holder2 = holder.createInputNode( 0, "ieOpHolder" )
fn2 = IECoreHoudini.FnOpHolder( holder2 )
fn2.setOp( "meshNormalsOp" )
holder2.setInput( 0, torus )
holder.cook()
self.assertEqual( len( holder.errors() ), 0 )
self.assertEqual( len( holder2.errors() ), 0 )
fn2.setOp( "objectDebug", 2 )
self.assertEqual( len( holder2.errors() ), 0 )
self.assertEqual( len( holder2.warnings() ), 0 )
holder2.parm( "parm_messageLevel" ).set( int(IECore.MessageHandler.Level.Warning) )
holder2.cook()
self.assertEqual( len( holder2.errors() ), 0 )
self.assertNotEqual( len(holder2.warnings()), 0 )
holder2.parm( "parm_messageLevel" ).set( int(IECore.MessageHandler.Level.Error) )
self.assertRaises( hou.OperationFailed, holder2.cook )
self.assertNotEqual( len( holder2.errors() ), 0 )
self.assertEqual( len( holder2.warnings() ), 0 )
def testAnimatedValues( self ) :
noise = IECoreHoudini.FnOpHolder.create( "test", "noiseDeformer", 1 )
fn = IECoreHoudini.FnOpHolder( noise )
noise.parm( "parm_magnitude" ).setExpression( "$FF" )
hou.setFrame( 1 )
self.assertEqual( noise.evalParm( "parm_magnitude" ), 1 )
self.assertEqual( fn.getOp().parameters()["magnitude"].getTypedValue(), 1 )
hou.setFrame( 12.25 )
self.assertEqual( noise.evalParm( "parm_magnitude" ), 12.25 )
# values haven't been flushed yet
self.assertAlmostEqual( fn.getOp().parameters()["magnitude"].getTypedValue(), 1 )
# so we flush them
fn.setParameterisedValues()
self.assertAlmostEqual( fn.getOp().parameters()["magnitude"].getTypedValue(), 12.25 )
def namedScene( self, opType ) :
holder = IECoreHoudini.FnOpHolder.create( "holder", opType, 1 )
geo = holder.parent()
boxA = geo.createNode( "box" )
nameA = boxA.createOutputNode( "name" )
nameA.parm( "name1" ).set( "boxA" )
boxB = geo.createNode( "box" )
transformB = boxB.createOutputNode( "xform" )
transformB.parm( "tx" ).set( 5 )
nameB = transformB.createOutputNode( "name" )
nameB.parm( "name1" ).set( "boxB" )
boxC = geo.createNode( "box" )
transformC = boxC.createOutputNode( "xform" )
transformC.parm( "tx" ).set( 10 )
nameC = transformC.createOutputNode( "name" )
nameC.parm( "name1" ).set( "boxC" )
merge = geo.createNode( "merge" )
merge.setInput( 0, nameA )
merge.setInput( 1, nameB )
merge.setInput( 2, nameC )
converter = merge.createOutputNode( "ieCortexConverter" )
converter.parm( "resultType" ).set( 0 ) # Cortex
holder.setInput( 0, converter )
return holder
def testMultipleOperations( self ) :
holder = self.namedScene( "meshNormalsOp" )
def verify( passThrough = [] ) :
geo = holder.geometry()
self.assertEqual( len( holder.errors() ), 0 )
self.assertEqual( len( holder.warnings() ), 0 )
self.assertEqual( len(geo.prims()), 3 )
names = [ "boxA", "boxB", "boxC" ]
for i in range( 0, len(geo.prims()) ) :
prim = geo.prims()[i]
self.assertEqual( prim.type(), hou.primType.Custom )
self.assertEqual( prim.attribValue( "name" ), names[i] )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( holder ).convert()
self.assertTrue( result.isInstanceOf( IECore.TypeId.CompoundObject ) )
self.assertEqual( len(result), 3 )
for name, child in result.items() :
self.assertTrue( child.isInstanceOf( IECoreScene.TypeId.MeshPrimitive ) )
self.assertTrue( child.arePrimitiveVariablesValid() )
if name in passThrough :
self.assertEqual( child.keys(), [ "P" ] )
else :
self.assertEqual( child.keys(), [ "N", "P" ] )
# normals were added to each mesh individually
verify()
# non-matching shapes were passed through unmodified
holder.parm( "parm_input_nameFilter" ).set( "* ^boxA" )
verify( passThrough = [ "boxA" ] )
# still operates multiple times for normal houdini geo
holder.inputConnections()[0].inputNode().bypass( True )
geo = holder.geometry()
self.assertEqual( len( holder.errors() ), 0 )
self.assertEqual( len( holder.warnings() ), 0 )
self.assertEqual( len(geo.prims()), 8 )
names = [ "boxA", "boxB", "boxC" ]
for i in range( 0, 6 ) :
prim = geo.prims()[i]
self.assertEqual( prim.type(), hou.primType.Polygon )
self.assertEqual( prim.attribValue( "name" ), "boxA" )
prim = geo.prims()[6]
self.assertEqual( prim.type(), hou.primType.Custom )
self.assertEqual( prim.attribValue( "name" ), "boxB" )
prim = geo.prims()[7]
self.assertEqual( prim.type(), hou.primType.Custom )
self.assertEqual( prim.attribValue( "name" ), "boxC" )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( holder )
self.assertEqual( converter, None )
# no nameFilter with normal geo compresses to one mesh
holder.parm( "parm_input_useNameFilter" ).set( False )
geo = holder.geometry()
self.assertEqual( len( holder.errors() ), 0 )
self.assertEqual( len( holder.warnings() ), 0 )
self.assertEqual( len(geo.prims()), 1 )
prim = geo.prims()[0]
self.assertEqual( prim.type(), hou.primType.Custom )
self.assertEqual( geo.findPrimAttrib( "name" ), None )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( holder ).convert()
self.assertTrue( result.isInstanceOf( IECoreScene.TypeId.MeshPrimitive ) )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result.keys(), [ "N", "P" ] )
# no nameFilter with CortexObjects may have unexpected results (because the input parameter wants a single mesh)
holder.inputConnections()[0].inputNode().bypass( False )
holder.cook()
self.assertEqual( len( holder.errors() ), 0 )
self.assertNotEqual( len(holder.warnings()), 0 )
def testNameFilterOnSecondaryInputs( self ) :
holder = self.namedScene( "meshMerge" )
torus = holder.parent().createNode( "torus" )
torus.parm( "rows" ).set( 10 )
torus.parm( "cols" ).set( 10 )
holder.setInput( 1, torus )
def verify( numMergedFaces, passThrough = [] ) :
geo = holder.geometry()
self.assertEqual( len( holder.errors() ), 0 )
self.assertEqual( len( holder.warnings() ), 0 )
self.assertEqual( len(geo.prims()), 3 )
names = [ "boxA", "boxB", "boxC" ]
for i in range( 0, len(geo.prims()) ) :
prim = geo.prims()[i]
self.assertEqual( prim.type(), hou.primType.Custom )
self.assertEqual( prim.attribValue( "name" ), names[i] )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( holder )
result = converter.convert()
self.assertTrue( result.isInstanceOf( IECore.TypeId.CompoundObject ) )
self.assertEqual( len(result), 3 )
for name, child in result.items():
self.assertTrue( child.isInstanceOf( IECoreScene.TypeId.MeshPrimitive ) )
self.assertTrue( child.arePrimitiveVariablesValid() )
if name in passThrough :
self.assertEqual( child.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ), 6 )
else :
self.assertEqual( child.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ), 6 + numMergedFaces )
# torus is merged with each box
verify( numMergedFaces = 100 )
# torus is not merged with the passThrough boxes
holder.parm( "parm_input_nameFilter" ).set( "* ^boxA" )
verify( numMergedFaces = 100, passThrough = [ "boxA" ] )
# multiple meshes in the second parameter may have unexpected results (because it wants a single mesh)
holder.setInput( 1, holder.inputConnections()[0].inputNode() )
holder.cook()
self.assertEqual( len( holder.errors() ), 0 )
self.assertNotEqual( len(holder.warnings()), 0 )
# a single mesh will merge
holder.parm( "parm_mesh_nameFilter" ).set( "boxB" )
verify( numMergedFaces = 6, passThrough = [ "boxA" ] )
# a bulk of normal houdini geo will also merge (it compresses to one mesh)
converter = holder.inputConnections()[0].inputNode().createOutputNode( "ieCortexConverter" )
holder.setInput( 1, converter )
holder.parm( "parm_mesh_nameFilter" ).set( "*" )
verify( numMergedFaces = 18, passThrough = [ "boxA" ] )
holder.parm( "parm_mesh_nameFilter" ).set( "* ^boxA" )
verify( numMergedFaces = 12, passThrough = [ "boxA" ] )
holder.parm( "parm_mesh_useNameFilter" ).set( False )
verify( numMergedFaces = 18, passThrough = [ "boxA" ] )
def setUp( self ) :
IECoreHoudini.TestCase.setUp( self )
self.__torusTestFile = "test/IECoreHoudini/data/torus.cob" if hou.applicationVersion()[0] < 14 else "test/IECoreHoudini/data/torusH14.cob"
self.__torusNormalsTestFile = "test/IECoreHoudini/data/torus_with_normals.cob"
if not os.path.exists( "test/opHolder_testData" ):
os.mkdir( "test/opHolder_testData" )
def tearDown( self ) :
if os.path.exists( "test/opHolder_testData" ):
shutil.rmtree( "test/opHolder_testData" )
if __name__ == "__main__":
unittest.main()
| |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.spanner_admin_database_v1.proto import (
spanner_database_admin_pb2 as google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class DatabaseAdminStub(object):
"""Cloud Spanner Database Admin API
The Cloud Spanner Database Admin API can be used to create, drop, and
list databases. It also enables updating the schema of pre-existing
databases.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListDatabases = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesResponse.FromString,
)
self.CreateDatabase = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.CreateDatabaseRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetDatabase = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.Database.FromString,
)
self.UpdateDatabaseDdl = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.DropDatabase = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.DropDatabaseRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetDatabaseDdl = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
class DatabaseAdminServicer(object):
"""Cloud Spanner Database Admin API
The Cloud Spanner Database Admin API can be used to create, drop, and
list databases. It also enables updating the schema of pre-existing
databases.
"""
def ListDatabases(self, request, context):
"""Lists Cloud Spanner databases.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateDatabase(self, request, context):
"""Creates a new Cloud Spanner database and starts to prepare it for serving.
The returned [long-running operation][google.longrunning.Operation] will
have a name of the format `<database_name>/operations/<operation_id>` and
can be used to track preparation of the database. The
[metadata][google.longrunning.Operation.metadata] field type is
[CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The
[response][google.longrunning.Operation.response] field type is
[Database][google.spanner.admin.database.v1.Database], if successful.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetDatabase(self, request, context):
"""Gets the state of a Cloud Spanner database.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateDatabaseDdl(self, request, context):
"""Updates the schema of a Cloud Spanner database by
creating/altering/dropping tables, columns, indexes, etc. The returned
[long-running operation][google.longrunning.Operation] will have a name of
the format `<database_name>/operations/<operation_id>` and can be used to
track execution of the schema change(s). The
[metadata][google.longrunning.Operation.metadata] field type is
[UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DropDatabase(self, request, context):
"""Drops (aka deletes) a Cloud Spanner database.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetDatabaseDdl(self, request, context):
"""Returns the schema of a Cloud Spanner database as a list of formatted
DDL statements. This method does not show pending schema updates, those may
be queried using the [Operations][google.longrunning.Operations] API.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetIamPolicy(self, request, context):
"""Sets the access control policy on a database resource.
Replaces any existing policy.
Authorization requires `spanner.databases.setIamPolicy`
permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIamPolicy(self, request, context):
"""Gets the access control policy for a database resource.
Returns an empty policy if a database exists but does
not have a policy set.
Authorization requires `spanner.databases.getIamPolicy` permission on
[resource][google.iam.v1.GetIamPolicyRequest.resource].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def TestIamPermissions(self, request, context):
"""Returns permissions that the caller has on the specified database resource.
Attempting this RPC on a non-existent Cloud Spanner database will
result in a NOT_FOUND error if the user has
`spanner.databases.list` permission on the containing Cloud
Spanner instance. Otherwise returns an empty set of permissions.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_DatabaseAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
"ListDatabases": grpc.unary_unary_rpc_method_handler(
servicer.ListDatabases,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesResponse.SerializeToString,
),
"CreateDatabase": grpc.unary_unary_rpc_method_handler(
servicer.CreateDatabase,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.CreateDatabaseRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetDatabase": grpc.unary_unary_rpc_method_handler(
servicer.GetDatabase,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.Database.SerializeToString,
),
"UpdateDatabaseDdl": grpc.unary_unary_rpc_method_handler(
servicer.UpdateDatabaseDdl,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"DropDatabase": grpc.unary_unary_rpc_method_handler(
servicer.DropDatabase,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.DropDatabaseRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetDatabaseDdl": grpc.unary_unary_rpc_method_handler(
servicer.GetDatabaseDdl,
request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.SerializeToString,
),
"SetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"GetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"TestIamPermissions": grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| |
from __future__ import unicode_literals
import os
import sys
import tempfile
import unittest
import warnings
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.files.temp import NamedTemporaryFile
from django.core.management import CommandError
from django.core.management.commands.dumpdata import ProxyModelWarning
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import (
TestCase, TransactionTestCase, mock, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.encoding import force_text
from .models import Article, ProxySpy, Spy, Tag, Visa
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Check that test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def testClassFixtures(self):
"Check that there were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin(object):
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = six.StringIO()
if filename:
filename = os.path.join(tempfile.gettempdir(), filename)
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'output': filename,
'use_natural_foreign_keys': natural_foreign_keys,
'use_natural_primary_keys': natural_primary_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
if filename:
with open(filename, "r") as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(
['fixtures.Category'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}]'
)
# ...and just fixtures.Article
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# ...and both
self._dumpdata_assert(
['fixtures.Category', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has '
'no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", '
'"fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a specific model twice
self._dumpdata_assert(
['fixtures.Article', 'fixtures.Article'],
(
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
)
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(
['fixtures.Article', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(
['fixtures', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Same again, but specify in the reverse order
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no '
'place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields":'
' {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(
['fixtures.Category', 'sites'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": '
'"example.com"}}]'
)
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]'
)
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(
['fixtures.person'],
'[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane '
'Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as '
'\\"Prince\\""}, "model": "fixtures.person"}]',
natural_primary_keys=True
)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is '
'great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, '
'"model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": '
'"2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML '
'identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", '
'"article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": '
'{"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, '
'"model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": '
'"fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", '
'"fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", '
'"fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], '
'["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": '
'"fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", '
'"user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person":'
' ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, '
'{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field '
'type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it '
'is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object '
'pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!'
'</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object '
'pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading '
'cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field>'
'</object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field>'
'<field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures'
'</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3'
'</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal'
'</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>'
'fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" '
'name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" '
'name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" '
'rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field '
'type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag">'
'<field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" '
'name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field>'
'<field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" '
'model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object>'
'<object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli'
'</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field '
'to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field>'
'<field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user'
'</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" '
'model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane'
' Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel">'
'<object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object>'
'<natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field>'
'</object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" '
'rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field '
'to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural>'
'<natural>auth</natural><natural>user</natural></object></field></object><object pk="1" '
'model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field '
'to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as '
'"Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object>'
'</django-objects>',
format='xml', natural_foreign_keys=True
)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites']
)
# Excluding a bogus app should throw an error
with self.assertRaisesMessage(management.CommandError, "No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with self.assertRaisesMessage(management.CommandError, "Unknown model in excludes: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk
)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": '
'"fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk),
use_base_manager=True
)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json'
)
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = six.StringIO()
new_io.isatty = lambda: True
with NamedTemporaryFile() as file:
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': file.name,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = six.StringIO()
new_io.isatty = lambda: True
options.update({'stdout': new_io, 'stderr': new_io})
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_dumpdata_proxy_without_concrete(self):
"""
A warning is displayed if a proxy model is dumped without its concrete
parent.
"""
ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(['fixtures.ProxySpy'], '[]')
warning = warning_list.pop()
self.assertEqual(warning.category, ProxyModelWarning)
self.assertEqual(
str(warning.message),
"fixtures.ProxySpy is a proxy model and won't be serialized."
)
def test_dumpdata_proxy_with_concrete(self):
"""
A warning isn't displayed if a proxy model is dumped with its concrete
parent.
"""
spy = ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(
['fixtures.ProxySpy', 'fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy.pk
)
self.assertEqual(len(warning_list), 0)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Verifies that loading a fixture which contains an invalid object
outputs an error message which contains the pk of the object
that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
def test_loaddata_app_option(self):
"""
Verifies that the --app option works.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_1' found."):
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = six.StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = force_text(output.getvalue())
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django '
'Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, '
'{"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" '
'model="fixtures.tag"><field type="CharField" name="name">copyright</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt'
'</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane '
'Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Prince</field></object></django-objects>',
format='xml', natural_foreign_keys=True
)
class NonExistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
available_apps = ['django.contrib.auth', 'django.contrib.contenttypes']
def test_loaddata_not_existent_fixture_file(self):
stdout_output = six.StringIO()
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', stdout=stdout_output)
@mock.patch('django.db.connection.enable_constraint_checking')
@mock.patch('django.db.connection.disable_constraint_checking')
def test_nonexistent_fixture_no_constraint_checking(
self, disable_constraint_checking, enable_constraint_checking):
"""
If no fixtures match the loaddata command, constraints checks on the
database shouldn't be disabled. This is performance critical on MSSQL.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', verbosity=0)
disable_constraint_checking.assert_not_called()
enable_constraint_checking.assert_not_called()
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
#
# Copyright 2021 The On Combining Bags to Better Learn from
# Label Proportions Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Code for Generalized Bags Training for Scenario VI."""
import pathlib
import pickle
import random
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
rng = np.random.default_rng(5443723)
path_to_root_data_dir = (pathlib.Path(__file__).parent /
"../../Data/").resolve()
directory_full_datasets = str(path_to_root_data_dir) + "/FullDatasets/"
root_for_experiments = str(path_to_root_data_dir) + "/"
cluster_bags_method = 6
datasetName_list = ["Ionosphere", "Australian", "Heart"]
n_features_list = [35, 15, 14]
n_clusters = 3
NUM_LABELS = 1
META_BATCH_SIZE = 32
NUM_BATCHES = 1000
T1 = NUM_LABELS - 1
T2 = T1 + 1
T3 = T1 + NUM_LABELS
T4 = T3 + 1
T5 = T4 + 1
mean_arr = np.array([0, 0, 0, 0])
correlation_matrix = np.array(
[[3.67647059e-01, -1.40782560e+00, -1.47058823e+00, -1.47058823e+00],
[-1.40782560e+00, 5.39096636e+00, 5.63130239e+00, 5.63130239e+00],
[-1.47058823e+00, 5.63130239e+00, 5.88235294e+00, 5.88235291e+00],
[-1.47058823e+00, 5.63130239e+00, 5.88235291e+00, 5.88235294e+00]])
mse = tf.keras.losses.MeanSquaredError()
mae = tf.keras.losses.MeanAbsoluteError()
kld = tf.keras.losses.KLDivergence()
def custom_loss_ell2sq_genbag(y_true_extended, y_pred):
"""L_2^2 Generalized Bags Loss."""
y_combined = tf.concat((y_pred, y_true_extended), axis=1)
cumu_loss = 0
loss = 0
# pylint: disable=cell-var-from-loop
for partition_num in range(META_BATCH_SIZE):
y_combined_sliced = tf.gather(
y_combined,
tf.where(
tf.equal(y_combined[:, T5],
tf.constant(partition_num, dtype=float)))[:, 0])
y_combined_sliced_pred = tf.gather(
y_combined_sliced, tf.range(T2), axis=1)
y_combined_sliced_actual = tf.gather(
y_combined_sliced, tf.range(T2, T4), axis=1)
wts = tf.gather(y_combined_sliced, [T4], axis=1)
wtd_y_combined_sliced_pred = y_combined_sliced_pred * wts
wtd_y_combined_sliced_actual = y_combined_sliced_actual * wts
wtd_sum_y_combined_sliced_pred = tf.reduce_sum(
wtd_y_combined_sliced_pred, axis=0)
wtd_sum_y_combined_sliced_actual = tf.reduce_sum(
wtd_y_combined_sliced_actual, axis=0)
loss = mse(
y_true=wtd_sum_y_combined_sliced_actual,
y_pred=wtd_sum_y_combined_sliced_pred)
cumu_loss = cumu_loss + loss
print()
tf.print("Step loss: ", cumu_loss)
return cumu_loss
def custom_loss_abs_genbag(y_true_extended, y_pred):
"""L_1 Generalized Bags Loss."""
y_combined = tf.concat((y_pred, y_true_extended), axis=1)
cumu_loss = 0
loss = 0
# pylint: disable=cell-var-from-loop
for partition_num in range(META_BATCH_SIZE):
y_combined_sliced = tf.gather(
y_combined,
tf.where(
tf.equal(y_combined[:, T5],
tf.constant(partition_num, dtype=float)))[:, 0])
y_combined_sliced_pred = tf.gather(
y_combined_sliced, tf.range(T2), axis=1)
y_combined_sliced_actual = tf.gather(
y_combined_sliced, tf.range(T2, T4), axis=1)
wts = tf.gather(y_combined_sliced, [T4], axis=1)
wtd_y_combined_sliced_pred = y_combined_sliced_pred * wts
wtd_y_combined_sliced_actual = y_combined_sliced_actual * wts
wtd_sum_y_combined_sliced_pred = tf.reduce_sum(
wtd_y_combined_sliced_pred, axis=0)
wtd_sum_y_combined_sliced_actual = tf.reduce_sum(
wtd_y_combined_sliced_actual, axis=0)
loss = mae(
y_true=wtd_sum_y_combined_sliced_actual,
y_pred=wtd_sum_y_combined_sliced_pred)
cumu_loss = cumu_loss + loss
print()
tf.print("Step loss: ", cumu_loss)
return cumu_loss
def custom_loss_kld_genbag(y_true_extended, y_pred):
"""KL-div Bags Loss."""
y_combined = tf.concat((y_pred, y_true_extended), axis=1)
cumu_loss = 0
# pylint: disable=cell-var-from-loop
for partition_num in range(META_BATCH_SIZE):
y_combined_sliced = tf.gather(
y_combined,
tf.where(
tf.equal(y_combined[:, T5],
tf.constant(partition_num, dtype=float)))[:, 0])
y_combined_sliced_pred = tf.gather(
y_combined_sliced, tf.range(T2), axis=1)
y_combined_sliced_actual = tf.gather(
y_combined_sliced, tf.range(T2, T4), axis=1)
wts = tf.gather(y_combined_sliced, [T4], axis=1)
wtd_y_combined_sliced_pred = y_combined_sliced_pred * wts
wtd_y_combined_sliced_actual = y_combined_sliced_actual * wts
def thisbagloss():
wtd_sum_y_combined_sliced_pred = tf.reduce_sum(
wtd_y_combined_sliced_pred, axis=0)
avg_sum_y_combined_sliced_pred = tf.divide(
wtd_sum_y_combined_sliced_pred, tf.reduce_sum(wts))
wtd_sum_y_combined_sliced_actual = tf.reduce_sum(
wtd_y_combined_sliced_actual, axis=0)
avg_sum_y_combined_sliced_actual = tf.divide(
wtd_sum_y_combined_sliced_actual, tf.reduce_sum(wts))
oneminus_avg_sum_y_combined_sliced_pred = (
-1.0) * avg_sum_y_combined_sliced_pred + 1.0
oneminus_avg_sum_y_combined_sliced_actual = (
-1.0) * avg_sum_y_combined_sliced_actual + 1.0
loss1 = kld(
y_true=avg_sum_y_combined_sliced_actual,
y_pred=avg_sum_y_combined_sliced_pred)
loss2 = kld(
y_true=oneminus_avg_sum_y_combined_sliced_actual,
y_pred=oneminus_avg_sum_y_combined_sliced_pred)
thisloss = loss1 + loss2
return thisloss
this_bag_loss = tf.cond(
tf.reduce_sum(wts) > 0.0, thisbagloss, lambda: 0.0)
cumu_loss = cumu_loss + this_bag_loss
print()
tf.print("Step loss: ", cumu_loss)
return cumu_loss
binacc = m = tf.keras.metrics.BinaryAccuracy()
def custom_binacc_func():
"""Custom Binary Accuracy using slices."""
def retfunc(y_true, y_pred):
return binacc(tf.gather(y_true, [0], axis=1), y_pred)
retfunc.__name__ = "BinaryAcc"
return retfunc
custom_binacc = custom_binacc_func()
auc = tf.keras.metrics.AUC()
def custom_auc(y_true, y_pred):
"""Custom AUC using slices."""
return auc(tf.gather(y_true, [0], axis=1), y_pred)
class Linear(keras.layers.Layer):
"""Linear Classifier."""
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="zeros", trainable=True)
self.b = self.add_weight(
shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.nn.sigmoid(tf.matmul(inputs, self.w) + self.b)
for datasetIndex, datasetName in enumerate(datasetName_list):
NUM_FEATURES = n_features_list[datasetIndex]
name_dir = root_for_experiments + datasetName + "/"
cluster_bags_methodoutfile = name_dir + datasetName + "_TFexpOutputClusterBags_" + str(
cluster_bags_method)
for foldnumber in range(1, 6):
folddir = name_dir + "Fold_" + str(foldnumber) + "/"
for splitnumber in range(1, 6):
splitdir = folddir + "Split_" + str(splitnumber) + "/"
test_data_file = splitdir + datasetName + "_" + str(
foldnumber) + "_" + str(splitnumber) + "-test.csv"
train_data_file = splitdir + datasetName + "_" + str(
foldnumber) + "_" + str(splitnumber) + "-train.csv"
cluster_dir = splitdir + "ClusterBags_" + str(cluster_bags_method) + "/"
random_seed = rng.integers(low=1000000, size=1)[0]
numpy_seed = rng.integers(low=1000000, size=1)[0]
tf_seed = rng.integers(low=1000000, size=1)[0]
random.seed(random_seed)
np.random.seed(numpy_seed)
tf.random.set_seed(tf_seed)
print()
print("*********starting************")
print(cluster_dir)
print("random_seed = ", random_seed)
print("numpy_seed = ", numpy_seed)
print("tf_seed = ", tf_seed)
# print(train_data_file)
train_df = pd.read_csv(train_data_file)
test_df = pd.read_csv(test_data_file)
distn_Id_to_bags_list = []
# first pick up the straddle
distn_Id_to_bags_list.append(
pickle.load(open(cluster_dir + "straddle_bags", "rb")))
# next the tail bags
for i in range(n_clusters):
distn_Id_to_bags_list.append(
pickle.load(open(cluster_dir + "tail_bags_" + str(i), "rb")))
print("distn_Id_to_bags_list")
print(distn_Id_to_bags_list)
print("correlation_matrix")
print(correlation_matrix)
def label_map(x):
if x == -1:
return 0
else:
return 1
list_of_features = []
for i in range(1, NUM_FEATURES):
list_of_features.append("x." + str(i))
list_of_features.append("constant")
# pylint: disable=unnecessary-lambda
train_df["label"] = train_df["label"].map(lambda x: label_map(x))
# pylint: disable=unnecessary-lambda
test_df["label"] = test_df["label"].map(lambda x: label_map(x))
train_bag_counts = train_df["bag"].value_counts()
myLinearModel_l2_S = keras.Sequential(
[Linear(input_dim=NUM_FEATURES, units=NUM_LABELS)])
myLinearModel_l2_S.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.0001),
loss=custom_loss_ell2sq_genbag)
myLinearModel_l2_R = keras.Sequential(
[Linear(input_dim=NUM_FEATURES, units=NUM_LABELS)])
myLinearModel_l2_R.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.0001),
loss=custom_loss_ell2sq_genbag)
myLinearModel_l1_S = keras.Sequential(
[Linear(input_dim=NUM_FEATURES, units=NUM_LABELS)])
myLinearModel_l1_S.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.0001),
loss=custom_loss_abs_genbag)
myLinearModel_l1_R = keras.Sequential(
[Linear(input_dim=NUM_FEATURES, units=NUM_LABELS)])
myLinearModel_l1_R.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.0001),
loss=custom_loss_abs_genbag)
myLinearModel_l2_U = keras.Sequential(
[Linear(input_dim=NUM_FEATURES, units=NUM_LABELS)])
myLinearModel_l2_U.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.0001),
loss=custom_loss_ell2sq_genbag)
myLinearModel_l1_U = keras.Sequential(
[Linear(input_dim=NUM_FEATURES, units=NUM_LABELS)])
myLinearModel_l1_U.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.0001),
loss=custom_loss_abs_genbag)
myLinearModel_KL_U = keras.Sequential(
[Linear(input_dim=NUM_FEATURES, units=NUM_LABELS)])
myLinearModel_KL_U.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.0001),
loss=custom_loss_kld_genbag)
for step in range(NUM_BATCHES):
print("Enter Batch: ", step)
batch_df = pd.DataFrame()
batch_rand_df = pd.DataFrame()
batch_unif_df = pd.DataFrame()
for genBagNo in range(META_BATCH_SIZE):
sampled_wts = np.random.multivariate_normal(mean_arr,
correlation_matrix)
random_wts = np.random.normal(size=1 + n_clusters)
unifbag_wts = np.eye(1 + n_clusters)[random.randrange(1 + n_clusters)]
genBag_df = pd.DataFrame()
genBag_rand_df = pd.DataFrame()
genBag_unif_df = pd.DataFrame()
for i in range(1 + n_clusters):
this_bag_list = random.sample(distn_Id_to_bags_list[i], 1)
this_bag_df = train_df.iloc[this_bag_list[0]].copy()
this_bag_rand_df = train_df.iloc[this_bag_list[0]].copy()
this_bag_unif_df = train_df.iloc[this_bag_list[0]].copy()
this_bag_df["wt"] = sampled_wts[i]
this_bag_rand_df["wt"] = random_wts[i]
this_bag_unif_df["wt"] = unifbag_wts[i]
genBag_df = genBag_df.append(this_bag_df, ignore_index=True)
genBag_rand_df = genBag_rand_df.append(
this_bag_rand_df, ignore_index=True)
genBag_unif_df = genBag_unif_df.append(
this_bag_unif_df, ignore_index=True)
genBag_df["genBagNo"] = genBagNo
genBag_rand_df["genBagNo"] = genBagNo
genBag_unif_df["genBagNo"] = genBagNo
batch_df = batch_df.append(genBag_df, ignore_index=True)
batch_rand_df = batch_rand_df.append(
genBag_rand_df, ignore_index=True)
batch_unif_df = batch_unif_df.append(
genBag_unif_df, ignore_index=True)
features_matrix = batch_df[list_of_features].to_numpy()
features_matrix_rand = batch_rand_df[list_of_features].to_numpy()
features_matrix_unif = batch_unif_df[list_of_features].to_numpy()
label_wt_genBagno_matrix = batch_df[["label", "wt",
"genBagNo"]].to_numpy(dtype=float)
label_wt_genBagno_matrix_rand = batch_rand_df[[
"label", "wt", "genBagNo"
]].to_numpy(dtype=float)
label_wt_genBagno_matrix_unif = batch_unif_df[[
"label", "wt", "genBagNo"
]].to_numpy(dtype=float)
myLinearModel_l2_S.train_on_batch(
x=features_matrix, y=label_wt_genBagno_matrix)
myLinearModel_l1_S.train_on_batch(
x=features_matrix, y=label_wt_genBagno_matrix)
myLinearModel_l2_R.train_on_batch(
x=features_matrix_rand, y=label_wt_genBagno_matrix_rand)
myLinearModel_l1_R.train_on_batch(
x=features_matrix_rand, y=label_wt_genBagno_matrix_rand)
myLinearModel_l2_U.train_on_batch(
x=features_matrix_unif, y=label_wt_genBagno_matrix_unif)
myLinearModel_l1_U.train_on_batch(
x=features_matrix_unif, y=label_wt_genBagno_matrix_unif)
myLinearModel_KL_U.train_on_batch(
x=features_matrix_unif, y=label_wt_genBagno_matrix_unif)
print("After training step: ", step)
print("Testing on Full Dataset")
myLinearModel_l2_S.compile(metrics=[custom_auc])
myLinearModel_l1_S.compile(metrics=[custom_auc])
myLinearModel_l2_R.compile(metrics=[custom_auc])
myLinearModel_l1_R.compile(metrics=[custom_auc])
myLinearModel_l2_U.compile(metrics=[custom_auc])
myLinearModel_l1_U.compile(metrics=[custom_auc])
myLinearModel_KL_U.compile(metrics=[custom_auc])
test_df["wt"] = 0
test_df["genBagNo"] = 0
features_matrix_test = test_df[list_of_features].to_numpy()
label_wt_genBagno_matrix_test = test_df[["label", "wt", "genBagNo"
]].to_numpy(dtype=float)
results_linear_l2_S = myLinearModel_l2_S.test_on_batch(
x=features_matrix_test, y=label_wt_genBagno_matrix_test)
results_linear_l1_S = myLinearModel_l1_S.test_on_batch(
x=features_matrix_test, y=label_wt_genBagno_matrix_test)
results_linear_l2_R = myLinearModel_l2_R.test_on_batch(
x=features_matrix_test, y=label_wt_genBagno_matrix_test)
results_linear_l1_R = myLinearModel_l1_R.test_on_batch(
x=features_matrix_test, y=label_wt_genBagno_matrix_test)
results_linear_l2_U = myLinearModel_l2_U.test_on_batch(
x=features_matrix_test, y=label_wt_genBagno_matrix_test)
results_linear_l1_U = myLinearModel_l1_U.test_on_batch(
x=features_matrix_test, y=label_wt_genBagno_matrix_test)
results_linear_KL_U = myLinearModel_KL_U.test_on_batch(
x=features_matrix_test, y=label_wt_genBagno_matrix_test)
results_string = (
datasetName + ", " + str(cluster_bags_method) + ", " +
str(splitnumber) + ", " + str(foldnumber) + ", " +
str(results_linear_l2_S[1]) + ", " + str(results_linear_l1_S[1]) +
", " + str(results_linear_l2_R[1]) + ", " +
str(results_linear_l1_R[1]) + ", " + str(results_linear_l2_U[1]) +
", " + str(results_linear_l1_U[1]) + ", " +
str(results_linear_KL_U[1]) + "\n")
print("Test Results: ")
print(results_string)
with open(cluster_bags_methodoutfile, "a") as filetoAppend:
filetoAppend.write(results_string)
| |
import copy
import abc
from scipy.optimize import minimize
from pyhawkes.utils.basis import CosineBasis
import autograd.numpy as np
from autograd import grad
### Nodes
class _NonlinearHawkesNodeBase(object):
"""
A single node of a nonlinear Hawkes process.
"""
__metaclass__ = abc.ABCMeta
constrained = False
def bias_bnds(self):
return None
def weight_bnds(self):
return None
@abc.abstractmethod
def link(self, psi):
raise NotImplementedError
@abc.abstractmethod
def invlink(self, lam):
raise NotImplementedError
def __init__(self, K, B, dt=1.0, sigma=np.inf, lmbda=np.inf):
self.K, self.B, self.dt, self.sigma, self.lmbda = K, B, dt, sigma, lmbda
# Initialize weights
self.w = np.zeros(1+self.K*self.B)
# List of event counts and filtered inputs
self.data_list = []
def add_data(self, F, S):
T = F.shape[0]
assert S.shape == (T,) and S.dtype in (np.int, np.uint, np.uint32)
if F.shape[1] == self.K * self.B:
F = np.hstack([np.ones((T,)), F])
else:
assert F.shape[1] == 1 + self.K * self.B
self.data_list.append((F, S))
def initialize_to_background_rate(self):
# self.w = abs(1e-6 * np.random.randn(*self.w.shape))
self.w = 1e-6 * np.ones_like(self.w)
if len(self.data_list) > 0:
N = 0
T = 0
for F,S in self.data_list:
N += S.sum(axis=0)
T += S.shape[0] * self.dt
lambda0 = self.invlink(N / float(T))
self.w[0] = lambda0
def log_likelihood(self, index=None):
if index is None:
data_list = self.data_list
else:
data_list = [self.data_list[index]]
ll = 0
for F,S in data_list:
psi = F.dot(self.w)
lam = self.link(psi)
ll += (S * np.log(lam) -lam*self.dt).sum()
return ll
def objective(self, w):
obj = 0
N = float(sum([np.sum(d[1]) for d in self.data_list]))
for F,S in self.data_list:
psi = np.dot(F, w)
lam = self.link(psi)
obj -= np.sum(S * np.log(lam) -lam*self.dt) / N
# assert np.isfinite(ll)
# Add penalties
obj += (0.5 * np.sum(w[1:]**2) / self.sigma**2) / N
obj += np.sum(np.abs(w[1:]) * self.lmbda) / N
# assert np.isfinite(obj)
return obj
def fit_with_bfgs(self):
"""
Fit the model with BFGS
"""
# If W_max is specified, set this as a bound
# if self.W_max is not None:
bnds = self.bias_bnds + self.weight_bnds * (self.K * self.B) \
if self.constrained else None
# else:
# bnds = [(None, None)] * (1 + self.K * self.B)
itr = [0]
def callback(w):
if itr[0] % 10 == 0:
print("Iteration: %03d\t LP: %.5f" % (itr[0], self.objective(w)))
itr[0] = itr[0] + 1
itr[0] = 0
x0 = self.w
res = minimize(self.objective, # Objective function
x0, # Initial value
jac=grad(self.objective), # Gradient of the objective
bounds=bnds, # Bounds on x
callback=callback)
self.w = res.x
def copy_node(self):
"""
Return a copy of the parameters of the node
"""
# Shallow copy the data
data_list = copy.copy(self.data_list)
self.data_list = []
# Make a deep copy without the data
node_copy = copy.deepcopy(self)
# Reset the data and return the data-less copy
self.data_list = data_list
return node_copy
class LinearHawkesNode(_NonlinearHawkesNodeBase):
constrained = True
@property
def bias_bnds(self):
return [(1e-16, None)]
@property
def weight_bnds(self):
return [(1e-16, None)]
def link(self, psi):
return psi
def invlink(self, lam):
return lam
class RectLinearHawkesNode(_NonlinearHawkesNodeBase):
def link(self, psi):
return 1e-16 + np.log(1.+np.exp(psi))
def invlink(self, lam):
return np.log(np.exp(lam) - 1.)
class ExpNonlinearHawkesNode(_NonlinearHawkesNodeBase):
def link(self, psi):
return np.exp(psi)
def invlink(self, lam):
return np.log(lam)
# Dummy class for a homogeneous Hawkes node
class HomogeneousPoissonNode(_NonlinearHawkesNodeBase):
def link(self, psi):
return psi
def invlink(self, lam):
return lam
def fit_with_bfgs(self):
# Rather than fitting, just initialize to background rate
self.initialize_to_background_rate()
self.w[1:] = 0
class _NonlinearHawkesProcessBase(object):
"""
Discrete time nonlinear Hawkes process, i.e. Poisson GLM
"""
__metaclass__ = abc.ABCMeta
_node_class = None
def __init__(self, K, dt=1.0, dt_max=10.0,
B=5, basis=None,
sigma=np.inf,
lmbda=0):
"""
Initialize a discrete time network Hawkes model with K processes.
:param K: Number of processes
:param dt: Time bin size
:param dt_max:
"""
self.K = K
self.dt = dt
self.dt_max = dt_max
self.sigma = sigma
self.lmbda = lmbda
# Initialize the basis
if basis is None:
self.B = B
self.basis = CosineBasis(self.B, self.dt, self.dt_max, norm=True,
allow_instantaneous=False)
else:
self.basis = basis
self.B = basis.B
# Initialize nodes
self.nodes = \
[self._node_class(self.K, self.B, dt=self.dt,
sigma=self.sigma, lmbda=self.lmbda)
for _ in range(self.K)]
def initialize_to_background_rate(self):
for node in self.nodes:
node.initialize_to_background_rate()
@property
def W(self):
full_W = np.array([node.w for node in self.nodes])
WB = full_W[:,1:].reshape((self.K,self.K, self.B))
# Weight matrix is summed over impulse response functions
WT = WB.sum(axis=2)
# Then we transpose so that the weight matrix is (outgoing x incoming)
W = WT.T
return W
@property
def G(self):
full_W = np.array([node.w for node in self.nodes])
WB = full_W[:,1:].reshape((self.K,self.K, self.B))
# Weight matrix is summed over impulse response functions
WT = WB.sum(axis=2)
# Impulse response weights are normalized weights
GT = WB / WT[:,:,None]
# Then we transpose so that the impuolse matrix is (outgoing x incoming x basis)
G = np.transpose(GT, [1,0,2])
# TODO: Decide if this is still necessary
for k1 in range(self.K):
for k2 in range(self.K):
if G[k1,k2,:].sum() < 1e-2:
G[k1,k2,:] = 1.0/self.B
return G
@property
def bias(self):
full_W = np.array([node.w for node in self.nodes])
return full_W[:,0]
def add_data(self, S, F=None):
"""
Add a data set to the list of observations.
First, filter the data with the impulse response basis,
then instantiate a set of parents for this data set.
:param S: a TxK matrix of of event counts for each time bin
and each process.
"""
assert isinstance(S, np.ndarray) and S.ndim == 2 and S.shape[1] == self.K \
and np.amin(S) >= 0 and S.dtype == np.int, \
"Data must be a TxK array of event counts"
T = S.shape[0]
if F is None:
# Filter the data into a TxKxB array
Ftens = self.basis.convolve_with_basis(S)
# Flatten this into a T x (KxB) matrix
# [F00, F01, F02, F10, F11, ... F(K-1)0, F(K-1)(B-1)]
F = Ftens.reshape((T, self.K * self.B))
assert np.allclose(F[:,0], Ftens[:,0,0])
if self.B > 1:
assert np.allclose(F[:,1], Ftens[:,0,1])
if self.K > 1:
assert np.allclose(F[:,self.B], Ftens[:,1,0])
# Prepend a column of ones
F = np.hstack((np.ones((T,1)), F))
for k,node in enumerate(self.nodes):
node.add_data(F, S[:,k])
def remove_data(self, index):
for node in self.nodes:
del node.data_list[index]
def log_likelihood(self, index=None):
ll = np.sum([node.log_likelihood(index=index) for node in self.nodes])
return ll
def heldout_log_likelihood(self, S, F=None):
self.add_data(S, F=F)
hll = self.log_likelihood(index=-1)
self.remove_data(-1)
return hll
def copy_sample(self):
"""
Return a copy of the parameters of the model
"""
# Shallow copy the data
nodes_original = copy.copy(self.nodes)
# Make a deep copy without the data
self.nodes = [n.copy_node() for n in nodes_original]
model_copy = copy.deepcopy(self)
# Reset the data and return the data-less copy
self.nodes = nodes_original
return model_copy
def fit_with_bfgs(self):
# TODO: This can be parallelized
for k, node in enumerate(self.nodes):
print("")
print("Fitting Node ", k)
node.fit_with_bfgs()
class StandardHawkesProcess(_NonlinearHawkesProcessBase):
_node_class = LinearHawkesNode
class ReluNonlinearHawkesProcess(_NonlinearHawkesProcessBase):
_node_class = RectLinearHawkesNode
class ExpNonlinearHawkesProcess(_NonlinearHawkesProcessBase):
_node_class = ExpNonlinearHawkesNode
class HomogeneousPoissonProcess(_NonlinearHawkesProcessBase):
_node_class = HomogeneousPoissonNode
| |
# -*- coding: utf-8 -*-
import time
import os
import sys
import sqlite3
from __init__ import *
class UsersDDBB():
"""
Class to deal with users who can access to the application.
Here, we can create/delete the database, inizialize/close the connections,
add/delete users and change passwords. All users who are included in this
database can access to the application.
The database which is created is called users.db, and can be found inside BBDD.
The name of the table is "USERS".
"""
def __init__(self):
"""
Inizializes the connection to the ddbb in which the users are defined
The self is defined
"""
self.connection = sqlite3.connect('BBDD/users.db')
self.cursor=self.connection.cursor()
def closeCon(self):
"""
Closes the connection
"""
self.cursor.close()
self.connection.close()
def restart(self):
"""
Restarts the connection to the database
"""
self.connection.commit()
self.cursor.close()
self.connection.close()
self.connection = sqlite3.connect('users.db')
self.cursor=self.connection.cursor()
def createTables(self):
"""
Creates table if it doesnt exist.
This option is never used for the application, but it is appropriate
to have a method which allows us to create the table if it has been
removed.
"""
self.cursor.execute('CREATE TABLE IF NOT EXISTS USERS (user varchar(255), password varchar(255), CONSTRAINT norepeat UNIQUE(user))')
def dropTables(self):
"""
This method removes the table "USERS". It is not convenient to use this
method.
"""
self.cursor.execute('DROP TABLE IF EXISTS USERS')
def insUs(self, user, password):
"""
This method inserts a new user inside the table.
This method will be called once a user tries to sign up.
Args:
user: this will be the username
password: this will be the password which will allow
the user to get logged in.
"""
t=(user, password)
try:
self.cursor.execute('INSERT INTO USERS (user, password) VALUES (?,?)', t)
self.connection.commit()
except sqlite3.IntegrityError:
logging.error("Row already exists. Please, try a new ID")
except sqlite3.OperationalError as er:
logging.error( "{!s}. Please, try it again in a few minutes".format(str(er)))
self.connection.rollback()
def deleteUs(self, user):
"""
This method allows to delete an existing user. However, the application
does not allow to do this, so this accion must be done at the
command line.
Args:
user: this will be the user to be deleted from the table
"""
try:
self.cursor.execute('DELETE FROM USERS WHERE user=?', [user])
self.connection.commit()
logging.info("Deleting user: {!s} ".format(str(user)))
except sqlite3.OperationalError as er:
logging.error( "{!s}. Please, try it again in a few minutes".format(str(er)))
self.connection.rollback()
def showUsers(self):
"""
This method shows all the users included inside the table.
Returns:
listUsers: List in which all the users are included. Every user is
added as a list: [username,password]
"""
self.cursor.execute('SELECT * FROM USERS')
listUsers=[]
for row in self.cursor:
listUsers.append(row)
return listUsers
def changePass(self, user, newpass):
"""
Changes the password assigned to a specific user
Args:
user: this is the username related to the user who wants to change
his/her password.
newpassword: new password
"""
try:
self.cursor.execute('UPDATE USERS SET password=? WHERE user=? ', [newpass, user])
logging.info("new password for {!s}".format(str(user)))
self.connection.commit()
except sqlite3.OperationalError as er:
logging.error( "{!s}. Please, try it again in a few minutes".format(str(er)))
self.connection.rollback()
class WordsDDBB():
"""
Class to deal with the words that every user include in their databases.
Here, we can insert/delete the database, inizialize/close the connections,
add/delete words and change passwords.
The database which is created is the name of the user, and each user has
his/her own database.
The name of the table is "WORDS".
"""
def __init__(self,username):
"""
Inizializes the connection to a specific ddbb .
The self is defined.
The name of the database is defined here, and it is defined as a class
variable. The connection is kept opened to allow the user to
execute another actions inside this database. It is necessary to
close the connection through the method closeCon().
Args:
username: name of the database
"""
self.file="BBDD/"+str(username)+".db"
self.connection = sqlite3.connect(self.file)
self.cursor=self.connection.cursor()
def fileToImport(self,file):
"""
Inizializes the connection to an external ddbb and open the connection.
It is important to know that the actions to be performed later will
be done inside this database if it is not closed through the method
closeCon().
Args:
file: database file
"""
self.cursor.close()
self.connection.close()
self.connection = sqlite3.connect(file)
self.cursor=self.connection.cursor()
def closeCon(self):
"""
Closes the connection
"""
self.cursor.close()
self.connection.close()
def restart(self):
"""
Restarts the connection
"""
self.connection.commit()
self.cursor.close()
self.connection.close()
self.connection = sqlite3.connect(self.file)
self.cursor=self.connection.cursor()
def createTables(self):
"""
Creates the table if it doesnt exist.
This table is created when the database is created (after the signup)
"""
self.cursor.execute('CREATE TABLE IF NOT EXISTS WORDS (word varchar(255), example varchar(255), meaning varchar(255), syntaxis varchar(255),category int, day varchar(255))')
def dropTables(self):
"""
This method removes the table "USERS". It is not convenient to use this
method.
"""
self.cursor.execute('DROP TABLE IF EXISTS WORDS')
def insWord(self, word, example, meaning, syntaxis,category,when):
"""
This method inserts a new word inside the table.
Args:
word: new word to be included
example: example in which the word is used
meaning: meaning of the word in the user's native language
syntaxis: type of word (verb, preposition...)
category: level. By default, all words are iniziated with the first
level.
when: day in which the word is created.
"""
t=(word, example, meaning, syntaxis,category,when)
try:
self.cursor.execute('INSERT INTO WORDS (word, example, meaning, syntaxis,category,day) VALUES (?,?,?,?,?,?)', t)
self.connection.commit()
logging.info("Inserting new word: {!s} ".format(str(word)))
except sqlite3.OperationalError as er:
logging.error( "{!s}. Please, try it again in a few minutes".format(str(er)))
self.connection.rollback()
def deleteWord(self, word,wordtype):
"""
This method allows to delete an existing word.
Args:
word: word that the user ask to delete
"""
try:
self.cursor.execute('DELETE FROM WORDS WHERE word=? AND syntaxis=?', [word,wordtype])
self.connection.commit()
logging.info("Deleting word: {!s} ".format(str(word)))
except sqlite3.OperationalError as er:
logging.error( "{!s}. Please, try it again in a few minutes".format(str(er)))
self.connection.rollback()
def updateLevel(self, word, meaning, newLevel):
"""
Changes the level assigned to a specific word
Args:
word: word whose level will be updated.
meaning: meaning of word whose level will be updated.
newLevel: new level
"""
try:
self.cursor.execute('UPDATE WORDS SET category=? WHERE word=? AND meaning=?', [newLevel, word, meaning])
self.connection.commit()
except sqlite3.OperationalError as er:
logging.error( "{!s}. Please, try it again in a few minutes".format(str(er)))
self.connection.rollback()
def resetLevels(self):
"""
Updates the level associated with all the words to 1
"""
try:
self.cursor.execute('UPDATE WORDS SET category=?', [1])
logging.info( "levels have been reseted")
self.connection.commit()
except sqlite3.OperationalError as er:
logging.error( "{!s}. Please, try it again in a few minutes".format(str(er)))
self.connection.rollback()
def showWords(self):
"""
This method shows all the words included inside the table.
Returns:
listWords: List in which all the word are included. They are ordered
depending on its word name. Every word is added as a list:
[word, example, meaning, word type,level,creation day]
"""
self.cursor.execute('SELECT * FROM WORDS ORDER BY word')
listWords=[]
for row in self.cursor:
listWords.append(list(row))
return listWords
def showWordsByLevel(self,level):
"""
This method shows all the words associated with a specific level.
Args:
level: This method will show the words with this level.
Returns:
listWords: List in which all the words meeting the requirement are
included. Every word is added as a list:
[word, example, meaning, word type,level,creation day]
"""
self.cursor.execute('SELECT * FROM WORDS WHERE category=?', [level])
listWords=[]
for row in self.cursor:
listWords.append(list(row))
return listWords
def showWordswordType(self,wordType):
"""
This method shows all the words associated with a specific word type.
Args:
wordType: This method will show the words with this word type.
Returns:
listWords: List in which all the words meeting the requirement are
included. Every word is added as a list:
[word, example, meaning, word type,level,creation day]
"""
self.cursor.execute('SELECT * FROM WORDS WHERE syntaxis=?', [wordType])
listWords=[]
for row in self.cursor:
listWords.append(list(row))
return listWords
def showWordsByLevelwordType(self,level,wordType):
"""
This method shows all the words associated with a specific level and
word type.
Args:
wordType: This method will show the words with this word type.
level: This method will show the words with this level.
Returns:
listWords: List in which all the words meeting the requirement are
included. Every word is added as a list:
[word, example, meaning, word type,level,creation day]
"""
self.cursor.execute('SELECT * FROM WORDS WHERE category=? AND syntaxis=?', [level,wordType])
listWords=[]
for row in self.cursor:
listWords.append(list(row))
return listWords
def showWord(self,word):
"""
This method shows all the words associated with a specific word name.
Args:
word: This method will show the words with this name.
Returns:
listWords: List in which all the words meeting the requirement are
included. Every word is added as a list:
[word, example, meaning, word type,level,creation day]
Note: We have to take into account that the same word can have different
inputs associated with different meanings or word types.
"""
self.cursor.execute('SELECT * FROM WORDS WHERE word=?', [word])
listWords=[]
for row in self.cursor:
listWords.append(row)
return listWords
def showAll(self):
"""
This method shows all the words included in a specific database.
Returns:
listWords: List in which all the words are included. Every word is
added as a list:
[word, example, meaning, word type,level,creation day]
Note: We have to take into account that the same word can have different
inputs associated with different meanings or word types.
"""
self.cursor.execute('SELECT * FROM WORDS')
listWords=[]
for row in self.cursor:
listWords.append(list(row))
return listWords
def showWordsAllLevels(self):
"""
This method allows to show how many words there are for each level
Returns:
stats: dictionary in which we show the level and the number of words
included inside that level.
totalWords: total number of words, regardless the level.
"""
stats={}
totalWords=0
for i in list(range(1,11)):
self.cursor.execute('SELECT * FROM WORDS WHERE category=?', [i])
num=0
for row in self.cursor:
num+=1
stats[str(i)]=str(num)
for i in stats:
totalWords+=int(stats[i])
return stats,totalWords
def showWordsAllTypes(self):
"""
This method allows to show how many words there are for each word type
Returns:
stats: dictionary in which we show the word type and the number of
words included inside that type.
totalWords: total number of words, regardless the word type.
"""
stats={}
totalWords=0
for i in ["s","v","prep","conj","adv","adj","phrase/idiom"]:
self.cursor.execute('SELECT * FROM WORDS WHERE syntaxis=?', [i])
num=0
for row in self.cursor:
num+=1
stats[str(i)]=str(num)
for i in stats:
totalWords+=int(stats[i])
return stats,totalWords
if __name__=="__main__":
C=WordsDDBB("admin")
C.dropTables()
C.createTables()
now=time.localtime( time.time())
mm=str(now.tm_mon) if len(str(now.tm_mon))==2 else "0"+str(now.tm_mon)
dd=str(now.tm_mday) if len(str(now.tm_mday))==2 else "0"+str(now.tm_mday)
day="{!s}/{!s}/{!s}".format(str(now.tm_year),mm,dd)
file=open("BBDD/bkp/words20180105.txt","r")
for line in file:
line=line.replace("\n","")
data=line.split("|")
if len(data)==3:
data.append("")
elif len(data)>4 or len(data)<3:
print(data)
C.insWord(data[0].strip(),data[3].strip(),data[2].strip(),data[1].strip(),1,day)
file.close()
C.closeCon()
| |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class NumericPlugTest( GafferTest.TestCase ) :
def testConstructor( self ) :
f = Gaffer.FloatPlug()
self.assertEqual( f.defaultValue(), 0 )
self.assertEqual( f.getName(), "FloatPlug" )
self.assertEqual( f.getValue(), 0 )
f = Gaffer.FloatPlug( direction=Gaffer.Plug.Direction.Out, defaultValue = 1,
minValue = -1, maxValue = 10 )
self.assertEqual( f.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( f.defaultValue(), 1 )
self.assertEqual( f.minValue(), -1 )
self.assertEqual( f.maxValue(), 10 )
f = Gaffer.FloatPlug( defaultValue=10, name="a" )
self.assertEqual( f.defaultValue(), 10 )
self.assertEqual( f.getName(), "a" )
self.assertEqual( f.typeName(), "Gaffer::FloatPlug" )
self.assertEqual( f.getValue(), 10 )
def testHaveMinMaxValues( self ) :
f = Gaffer.FloatPlug()
self.assertEqual( f.hasMinValue(), False )
self.assertEqual( f.hasMaxValue(), False )
f = Gaffer.FloatPlug( minValue=1 )
self.assertEqual( f.hasMinValue(), True )
self.assertEqual( f.hasMaxValue(), False )
f = Gaffer.FloatPlug( maxValue=1 )
self.assertEqual( f.hasMinValue(), False )
self.assertEqual( f.hasMaxValue(), True )
def testRunTimeTyping( self ) :
f = Gaffer.FloatPlug()
i = Gaffer.IntPlug()
self.assert_( f.isInstanceOf( Gaffer.FloatPlug.staticTypeId() ) )
self.assert_( not f.isInstanceOf( Gaffer.IntPlug.staticTypeId() ) )
self.assert_( not i.isInstanceOf( Gaffer.FloatPlug.staticTypeId() ) )
self.assert_( i.isInstanceOf( Gaffer.IntPlug.staticTypeId() ) )
def testAcceptsInput( self ) :
i = Gaffer.IntPlug()
o = Gaffer.IntPlug( direction=Gaffer.Plug.Direction.Out )
s = Gaffer.StringPlug( direction=Gaffer.Plug.Direction.Out )
self.failUnless( i.acceptsInput( o ) )
self.failIf( i.acceptsInput( s ) )
def testAcceptsNoneInput( self ) :
p = Gaffer.IntPlug( "hello" )
self.failUnless( p.acceptsInput( None ) )
def testAppliesMinMaxInSetValue( self ) :
i = Gaffer.IntPlug( "i", defaultValue = 1, minValue = 0, maxValue = 10 )
i.setValue( 5 )
self.assertEqual( i.getValue(), 5 )
i.setValue( -1 )
self.assertEqual( i.getValue(), 0 )
i.setValue( 11 )
self.assertEqual( i.getValue(), 10 )
def testSetInputShortcut( self ) :
n1 = GafferTest.AddNode()
n2 = GafferTest.AddNode()
inputChangedSignals = GafferTest.CapturingSlot( n1.plugInputChangedSignal() )
self.assertEqual( len( inputChangedSignals ), 0 )
dirtiedSignals = GafferTest.CapturingSlot( n1.plugDirtiedSignal() )
self.assertEqual( len( dirtiedSignals ), 0 )
n1["op1"].setInput( n2["sum"] )
# we should get signals the first time
self.assertEqual( len( inputChangedSignals ), 1 )
self.assertEqual( len( dirtiedSignals ), 2 )
self.assertEqual( dirtiedSignals[0][0].getName(), "op1" )
self.assertEqual( dirtiedSignals[1][0].getName(), "sum" )
n1["op1"].setInput( n2["sum"] )
# but the second time there should be no signalling,
# because it was the same.
self.assertEqual( len( inputChangedSignals ), 1 )
self.assertEqual( len( dirtiedSignals ), 2 )
def testSetToDefault( self ) :
i = Gaffer.IntPlug( "i", defaultValue = 10 )
self.assertEqual( i.getValue(), 10 )
i.setValue( 1 )
self.assertEqual( i.getValue(), 1 )
i.setToDefault()
self.assertEqual( i.getValue(), 10 )
def testDisconnectRevertsToPreviousValue( self ) :
n1 = GafferTest.AddNode()
n2 = GafferTest.AddNode()
n2["op1"].setValue( 1010 )
self.assertEqual( n2["op1"].getValue(), 1010 )
n2["op1"].setInput( n1["sum"] )
self.failUnless( n2["op1"].getInput().isSame( n1["sum"] ) )
self.assertEqual( n2["op1"].getValue(), 0 )
n2["op1"].setInput( None )
self.assertEqual( n2["op1"].getInput(), None )
self.assertEqual( n2["op1"].getValue(), 1010 )
def testDisconnectEmitsPlugSet( self ) :
n1 = GafferTest.AddNode()
n2 = GafferTest.AddNode()
n2["op1"].setInput( n1["sum"] )
set = GafferTest.CapturingSlot( n2.plugSetSignal() )
n2["op1"].setInput( None )
self.assertEqual( len( set ), 1 )
self.failUnless( set[0][0].isSame( n2["op1"] ) )
def testDefaultValue( self ) :
p = Gaffer.IntPlug(
"p",
Gaffer.Plug.Direction.In,
10,
0,
20
)
self.assertEqual( p.defaultValue(), 10 )
self.assertEqual( p.getValue(), 10 )
p.setValue( 5 )
self.assertEqual( p.getValue(), 5 )
self.assertEqual( p.defaultValue(), 10 )
p.setToDefault()
self.assertEqual( p.defaultValue(), 10 )
self.assertEqual( p.getValue(), 10 )
p.setValue( 5 )
self.assertEqual( p.getValue(), 5 )
self.assertEqual( p.defaultValue(), 10 )
def testDefaultValueHash( self ) :
p1 = Gaffer.IntPlug(
"p",
Gaffer.Plug.Direction.In,
10
)
p2 = Gaffer.IntPlug(
"p",
Gaffer.Plug.Direction.In,
20
)
p3 = Gaffer.IntPlug(
"p",
Gaffer.Plug.Direction.In,
20
)
self.assertNotEqual( p1.hash(), p2.hash() )
self.assertEqual( p2.hash(), p3.hash() )
def testReadOnlySetValueRaises( self ) :
p = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.ReadOnly )
self.assertRaises( RuntimeError, p.setValue, 10 )
def testRepr( self ) :
p1 = Gaffer.IntPlug(
"p",
Gaffer.Plug.Direction.Out,
10,
-10,
100,
Gaffer.Plug.Flags.Default & ~Gaffer.Plug.Flags.AcceptsInputs,
)
p2 = eval( repr( p1 ) )
self.assertEqual( p1.getName(), p2.getName() )
self.assertEqual( p1.direction(), p2.direction() )
self.assertEqual( p1.defaultValue(), p2.defaultValue() )
self.assertEqual( p1.minValue(), p2.minValue() )
self.assertEqual( p1.maxValue(), p2.maxValue() )
self.assertEqual( p1.getFlags(), p2.getFlags() )
def testCreateCounterpart( self ) :
p1 = Gaffer.IntPlug(
"p",
Gaffer.Plug.Direction.Out,
10,
-10,
100,
Gaffer.Plug.Flags.Default & ~Gaffer.Plug.Flags.AcceptsInputs,
)
p2 = p1.createCounterpart( "c", Gaffer.Plug.Direction.In )
self.assertEqual( p2.getName(), "c" )
self.assertEqual( p2.direction(), Gaffer.Plug.Direction.In )
self.assertEqual( p2.getFlags(), p1.getFlags() )
self.assertEqual( p2.defaultValue(), p1.defaultValue() )
self.assertEqual( p2.minValue(), p1.minValue() )
self.assertEqual( p2.maxValue(), p1.maxValue() )
def testNoUndoMergingForDefaultMergeGroup( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.IntPlug()
self.assertFalse( s.undoAvailable() )
s["n"]["p"].setValue( 10 )
self.assertFalse( s.undoAvailable() )
with Gaffer.UndoContext( s ) :
s["n"]["p"].setValue( 20 )
self.assertTrue( s.undoAvailable() )
with Gaffer.UndoContext( s ) :
s["n"]["p"].setValue( 30 )
self.assertTrue( s.undoAvailable() )
s.undo()
self.assertTrue( s.undoAvailable() )
self.assertEqual( s["n"]["p"].getValue(), 20 )
s.undo()
self.assertEqual( s["n"]["p"].getValue(), 10 )
self.assertFalse( s.undoAvailable() )
def testUndoMerging( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.IntPlug()
self.assertEqual( s["n"]["p"].getValue(), 0 )
self.assertFalse( s.undoAvailable() )
cs = GafferTest.CapturingSlot( s["n"].plugSetSignal() )
with Gaffer.UndoContext( s, mergeGroup="test" ) :
s["n"]["p"].setValue( 1 )
self.assertEqual( len( cs ), 1 )
self.assertEqual( s["n"]["p"].getValue(), 1 )
self.assertTrue( s.undoAvailable() )
with Gaffer.UndoContext( s, mergeGroup="test" ) :
s["n"]["p"].setValue( 2 )
self.assertEqual( len( cs ), 2 )
self.assertEqual( s["n"]["p"].getValue(), 2 )
self.assertTrue( s.undoAvailable() )
with Gaffer.UndoContext( s, mergeGroup="test2" ) :
s["n"]["p"].setValue( 3 )
self.assertEqual( len( cs ), 3 )
self.assertEqual( s["n"]["p"].getValue(), 3 )
self.assertTrue( s.undoAvailable() )
s.undo()
self.assertEqual( len( cs ), 4 )
self.assertEqual( s["n"]["p"].getValue(), 2 )
self.assertTrue( s.undoAvailable() )
s.undo()
self.assertEqual( len( cs ), 5 )
self.assertEqual( s["n"]["p"].getValue(), 0 )
self.assertFalse( s.undoAvailable() )
s.redo()
self.assertEqual( len( cs ), 6 )
self.assertEqual( s["n"]["p"].getValue(), 2 )
self.assertTrue( s.undoAvailable() )
s.undo()
self.assertEqual( len( cs ), 7 )
self.assertEqual( s["n"]["p"].getValue(), 0 )
self.assertFalse( s.undoAvailable() )
s.redo()
s.redo()
self.assertEqual( len( cs ), 9 )
self.assertEqual( s["n"]["p"].getValue(), 3 )
self.assertTrue( s.undoAvailable() )
s.undo()
s.undo()
self.assertEqual( len( cs ), 11 )
self.assertEqual( s["n"]["p"].getValue(), 0 )
self.assertFalse( s.undoAvailable() )
def testNoUndoMergingForDifferentPlugs( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p1"] = Gaffer.IntPlug()
s["n"]["p2"] = Gaffer.IntPlug()
self.assertFalse( s.undoAvailable() )
with Gaffer.UndoContext( s, mergeGroup="test" ) :
s["n"]["p1"].setValue( 20 )
self.assertTrue( s.undoAvailable() )
self.assertEqual( s["n"]["p1"].getValue(), 20 )
self.assertEqual( s["n"]["p2"].getValue(), 0 )
with Gaffer.UndoContext( s, mergeGroup="test" ) :
s["n"]["p2"].setValue( 30 )
self.assertTrue( s.undoAvailable() )
self.assertEqual( s["n"]["p1"].getValue(), 20 )
self.assertEqual( s["n"]["p2"].getValue(), 30 )
s.undo()
self.assertTrue( s.undoAvailable() )
self.assertEqual( s["n"]["p1"].getValue(), 20 )
self.assertEqual( s["n"]["p2"].getValue(), 0 )
s.undo()
self.assertFalse( s.undoAvailable() )
self.assertEqual( s["n"]["p1"].getValue(), 0 )
self.assertEqual( s["n"]["p2"].getValue(), 0 )
def testConnectionFromBool( self ) :
i = Gaffer.IntPlug()
f = Gaffer.FloatPlug()
b = Gaffer.BoolPlug()
i.setInput( b )
f.setInput( b )
self.assertEqual( b.getValue(), False )
self.assertEqual( i.getValue(), 0 )
self.assertEqual( f.getValue(), 0 )
b.setValue( True )
self.assertEqual( b.getValue(), True )
self.assertEqual( i.getValue(), 1 )
self.assertEqual( f.getValue(), 1 )
def testIntermediateConversions( self ) :
f1 = Gaffer.FloatPlug()
i = Gaffer.IntPlug()
f2 = Gaffer.FloatPlug()
i.setInput( f1 )
f2.setInput( i )
f1.setValue( 10.2 )
self.assertEqual( f2.getValue(), 10 )
f1.setValue( 100.8 )
self.assertEqual( f2.getValue(), 100 )
def testNoChildrenAccepted( self ) :
p1 = Gaffer.IntPlug()
p2 = Gaffer.IntPlug()
self.assertFalse( p1.acceptsChild( p2 ) )
self.assertRaises( RuntimeError, p1.addChild, p2 )
def testPrecomputedHash( self ) :
n = GafferTest.AddNode()
n["op1"].setValue( 10 )
n["op2"].setValue( 20 )
self.assertEqual( n["sum"].getValue(), 30 )
self.assertEqual( n.numHashCalls, 1 )
self.assertEqual( n.numComputeCalls, 1 )
h = n["sum"].hash()
self.assertEqual( n.numHashCalls, 2 )
self.assertEqual( n.numComputeCalls, 1 )
self.assertEqual( n["sum"].getValue( _precomputedHash = h ), 30 )
self.assertEqual( n.numHashCalls, 2 )
self.assertEqual( n.numComputeCalls, 1 )
if __name__ == "__main__":
unittest.main()
| |
"""Compressed Sparse Row matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csr_matrix', 'isspmatrix_csr']
import numpy as np
from .base import spmatrix
from ._sparsetools import (csr_tocsc, csr_tobsr, csr_count_blocks,
get_csr_submatrix)
from .sputils import upcast, get_index_dtype
from .compressed import _cs_matrix
class csr_matrix(_cs_matrix):
"""
Compressed Sparse Row matrix
This can be instantiated in several ways:
csr_matrix(D)
with a dense matrix or rank-2 ndarray D
csr_matrix(S)
with another sparse matrix S (equivalent to S.tocsr())
csr_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their
corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
If the shape parameter is not supplied, the matrix dimensions
are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of stored values, including explicit zeros
data
CSR format data array of the matrix
indices
CSR format index array of the matrix
indptr
CSR format index pointer array of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSR format
- efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
- efficient row slicing
- fast matrix vector products
Disadvantages of the CSR format
- slow column slicing operations (consider CSC)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> csr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
As an example of how to construct a CSR matrix incrementally,
the following snippet builds a term-document matrix from texts:
>>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]]
>>> indptr = [0]
>>> indices = []
>>> data = []
>>> vocabulary = {}
>>> for d in docs:
... for term in d:
... index = vocabulary.setdefault(term, len(vocabulary))
... indices.append(index)
... data.append(1)
... indptr.append(len(indices))
...
>>> csr_matrix((data, indices, indptr), dtype=int).toarray()
array([[2, 1, 0, 0],
[0, 1, 1, 1]])
"""
format = 'csr'
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
M, N = self.shape
from .csc import csc_matrix
return csc_matrix((self.data, self.indices,
self.indptr), shape=(N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def tolil(self, copy=False):
from .lil import lil_matrix
lil = lil_matrix(self.shape,dtype=self.dtype)
self.sum_duplicates()
ptr,ind,dat = self.indptr,self.indices,self.data
rows, data = lil.rows, lil.data
for n in range(self.shape[0]):
start = ptr[n]
end = ptr[n+1]
rows[n] = ind[start:end].tolist()
data[n] = dat[start:end].tolist()
return lil
tolil.__doc__ = spmatrix.tolil.__doc__
def tocsr(self, copy=False):
if copy:
return self.copy()
else:
return self
tocsr.__doc__ = spmatrix.tocsr.__doc__
def tocsc(self, copy=False):
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, self.shape[0]))
indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csr_tocsc(self.shape[0], self.shape[1],
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csc import csc_matrix
A = csc_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tobsr(self, blocksize=None, copy=True):
from .bsr import bsr_matrix
if blocksize is None:
from .spfuncs import estimate_blocksize
return self.tobsr(blocksize=estimate_blocksize(self))
elif blocksize == (1,1):
arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
return bsr_matrix(arg1, shape=self.shape, copy=copy)
else:
R,C = blocksize
M,N = self.shape
if R < 1 or C < 1 or M % R != 0 or N % C != 0:
raise ValueError('invalid blocksize %s' % blocksize)
blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(N//C, blks))
indptr = np.empty(M//R+1, dtype=idx_dtype)
indices = np.empty(blks, dtype=idx_dtype)
data = np.zeros((blks,R,C), dtype=self.dtype)
csr_tobsr(M, N, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr, indices, data.ravel())
return bsr_matrix((data,indices,indptr), shape=self.shape)
tobsr.__doc__ = spmatrix.tobsr.__doc__
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self, x):
"""swap the members of x if this is a column-oriented matrix
"""
return x
def __iter__(self):
indptr = np.zeros(2, dtype=self.indptr.dtype)
shape = (1, self.shape[1])
i0 = 0
for i1 in self.indptr[1:]:
indptr[1] = i1 - i0
indices = self.indices[i0:i1]
data = self.data[i0:i1]
yield csr_matrix((data, indices, indptr), shape=shape, copy=True)
i0 = i1
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += M
if i < 0 or i >= M:
raise IndexError('index (%d) out of range' % i)
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i, i + 1, 0, N)
return csr_matrix((data, indices, indptr), shape=(1, N),
dtype=self.dtype, copy=False)
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSR matrix (column vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += N
if i < 0 or i >= N:
raise IndexError('index (%d) out of range' % i)
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, 0, M, i, i + 1)
return csr_matrix((data, indices, indptr), shape=(M, 1),
dtype=self.dtype, copy=False)
def _get_intXarray(self, row, col):
return self.getrow(row)._minor_index_fancy(col)
def _get_intXslice(self, row, col):
if col.step in (1, None):
return self._get_submatrix(row, col, copy=True)
# TODO: uncomment this once it's faster:
# return self.getrow(row)._minor_slice(col)
M, N = self.shape
start, stop, stride = col.indices(N)
ii, jj = self.indptr[row:row+2]
row_indices = self.indices[ii:jj]
row_data = self.data[ii:jj]
if stride > 0:
ind = (row_indices >= start) & (row_indices < stop)
else:
ind = (row_indices <= start) & (row_indices > stop)
if abs(stride) > 1:
ind &= (row_indices - start) % stride == 0
row_indices = (row_indices[ind] - start) // stride
row_data = row_data[ind]
row_indptr = np.array([0, len(row_indices)])
if stride < 0:
row_data = row_data[::-1]
row_indices = abs(row_indices[::-1])
shape = (1, int(np.ceil(float(stop - start) / stride)))
return csr_matrix((row_data, row_indices, row_indptr), shape=shape,
dtype=self.dtype, copy=False)
def _get_sliceXint(self, row, col):
if row.step in (1, None):
return self._get_submatrix(row, col, copy=True)
return self._major_slice(row)._get_submatrix(minor=col)
def _get_sliceXarray(self, row, col):
return self._major_slice(row)._minor_index_fancy(col)
def _get_arrayXint(self, row, col):
return self._major_index_fancy(row)._get_submatrix(minor=col)
def _get_arrayXslice(self, row, col):
if col.step not in (1, None):
col = np.arange(*col.indices(self.shape[1]))
return self._get_arrayXarray(row, col)
return self._major_index_fancy(row)._get_submatrix(minor=col)
def isspmatrix_csr(x):
"""Is x of csr_matrix type?
Parameters
----------
x
object to check for being a csr matrix
Returns
-------
bool
True if x is a csr matrix, False otherwise
Examples
--------
>>> from scipy.sparse import csr_matrix, isspmatrix_csr
>>> isspmatrix_csr(csr_matrix([[5]]))
True
>>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc
>>> isspmatrix_csr(csc_matrix([[5]]))
False
"""
return isinstance(x, csr_matrix)
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest import test
from tempest_ec2.tests.thirdparty.boto import test as boto_test
from tempest_ec2.tests.thirdparty.boto.utils import s3
from tempest_ec2.tests.thirdparty.boto.utils import wait
CONF = config.CONF
LOG = logging.getLogger(__name__)
class InstanceRunTest(boto_test.BotoTestCase):
@classmethod
def setup_clients(cls):
super(InstanceRunTest, cls).setup_clients()
cls.s3_client = cls.s3_client
cls.ec2_client = cls.ec2api_client
@classmethod
def resource_setup(cls):
super(InstanceRunTest, cls).resource_setup()
if not cls.conclusion['A_I_IMAGES_READY']:
raise cls.skipException("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
cls.zone = CONF.boto.aws_zone
cls.materials_path = CONF.boto.s3_materials_path
ami_manifest = CONF.boto.ami_manifest
aki_manifest = CONF.boto.aki_manifest
ari_manifest = CONF.boto.ari_manifest
cls.instance_type = CONF.boto.instance_type
cls.bucket_name = data_utils.rand_name("s3bucket")
cls.keypair_name = data_utils.rand_name("keypair")
cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
cls.keypair_name)
bucket = cls.s3_client.create_bucket(cls.bucket_name)
cls.addResourceCleanUp(cls.destroy_bucket,
cls.s3_client.connection_data,
cls.bucket_name)
s3.s3_upload_dir(bucket, cls.materials_path)
cls.images = {"ami":
{"name": data_utils.rand_name("ami-name"),
"location": cls.bucket_name + "/" + ami_manifest},
"aki":
{"name": data_utils.rand_name("aki-name"),
"location": cls.bucket_name + "/" + aki_manifest},
"ari":
{"name": data_utils.rand_name("ari-name"),
"location": cls.bucket_name + "/" + ari_manifest}}
for image_type in ("aki", "ari"):
image = cls.images[image_type]
image["image_id"] = cls.ec2_client.register_image(
name=image["name"],
image_location=image["location"])
cls.addResourceCleanUp(cls.ec2_client.deregister_image,
image["image_id"])
image = cls.images["ami"]
image["image_id"] = cls.ec2_client.register_image(
name=image["name"],
image_location=image["location"],
kernel_id=cls.images["aki"]["image_id"],
ramdisk_id=cls.images["ari"]["image_id"])
cls.addResourceCleanUp(cls.ec2_client.deregister_image,
image["image_id"])
for image in cls.images.itervalues():
def _state():
retr = cls.ec2_client.get_image(image["image_id"])
return retr.state
state = wait.state_wait(_state, "available")
if state != "available":
for _image in cls.images.itervalues():
cls.ec2_client.deregister_image(_image["image_id"])
raise exceptions.EC2RegisterImageException(
image_id=image["image_id"])
def _terminate_reservation(self, reservation, rcuk):
for instance in reservation.instances:
instance.terminate()
for instance in reservation.instances:
self.assertInstanceStateWait(instance, '_GONE')
self.cancelResourceCleanUp(rcuk)
@test.idempotent_id('c881fbb7-d56e-4054-9d76-1c3a60a207b0')
def test_run_idempotent_instances(self):
# EC2 run instances idempotently
def _run_instance(client_token):
reservation = self.ec2_client.run_instances(
image_id=self.images["ami"]["image_id"],
kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
client_token=client_token)
rcuk = self.addResourceCleanUp(self.destroy_reservation,
reservation)
return (reservation, rcuk)
reservation_1, rcuk_1 = _run_instance('token_1')
reservation_2, rcuk_2 = _run_instance('token_2')
reservation_1a, rcuk_1a = _run_instance('token_1')
self.assertIsNotNone(reservation_1)
self.assertIsNotNone(reservation_2)
self.assertIsNotNone(reservation_1a)
# same reservation for token_1
self.assertEqual(reservation_1.id, reservation_1a.id)
# Cancel cleanup -- since it's a duplicate, it's
# handled by rcuk1
self.cancelResourceCleanUp(rcuk_1a)
self._terminate_reservation(reservation_1, rcuk_1)
self._terminate_reservation(reservation_2, rcuk_2)
@test.idempotent_id('2ea26a39-f96c-48fc-8374-5c10ec184c67')
def test_run_stop_terminate_instance(self):
# EC2 run, stop and terminate instance
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
self._terminate_reservation(reservation, rcuk)
@test.idempotent_id('3d77225a-5cec-4e54-a017-9ebf11a266e6')
def test_run_stop_terminate_instance_with_tags(self):
# EC2 run, stop and terminate instance with tags
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
instance.add_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
td = dict((item.name, item.value) for item in tags)
self.assertIn('key1', td)
self.assertEqual('value1', td['key1'])
tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
td = dict((item.name, item.value) for item in tags)
self.assertIn('key1', td)
self.assertEqual('value1', td['key1'])
tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
td = dict((item.name, item.value) for item in tags)
self.assertIn('key1', td)
self.assertEqual('value1', td['key1'])
tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
td = dict((item.name, item.value) for item in tags)
self.assertNotIn('key1', td)
for instance in reservation.instances:
instance.remove_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
# NOTE: Volume-attach and detach causes metadata (tags) to be created
# for the volume. So exclude them while asserting.
self.assertNotIn('key1', tags)
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
self._terminate_reservation(reservation, rcuk)
@test.idempotent_id('252945b5-3294-4fda-ae21-928a42f63f76')
def test_run_terminate_instance(self):
# EC2 run, terminate immediately
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
for instance in reservation.instances:
instance.terminate()
self.assertInstanceStateWait(instance, '_GONE')
@test.idempotent_id('ab836c29-737b-4101-9fb9-87045eaf89e9')
def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
sec_group_name = data_utils.rand_name("securitygroup")
group_desc = sec_group_name + " security group description "
security_group = self.ec2_client.create_security_group(sec_group_name,
group_desc)
self.addResourceCleanUp(self.destroy_security_group_wait,
security_group)
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="icmp",
cidr_ip="0.0.0.0/0",
from_port=-1,
to_port=-1))
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22))
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
key_name=self.keypair_name,
security_groups=(sec_group_name,))
LOG.debug("Instance booted - state: %s",
reservation.instances[0].state)
self.addResourceCleanUp(self.destroy_reservation,
reservation)
volume = self.ec2_client.create_volume(CONF.volume.volume_size,
self.zone)
LOG.debug("Volume created - status: %s", volume.status)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
LOG.debug("Instance now running - state: %s", instance.state)
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
self.assertTrue(address.associate(instance.id))
rcuk_da = self.addResourceCleanUp(address.disassociate)
# TODO(afazekas): ping test. dependecy/permission ?
self.assertVolumeStatusWait(volume, "available")
# NOTE(afazekas): it may be reports available before it is available
ssh = remote_client.RemoteClient(address.public_ip,
CONF.compute.ssh_user,
pkey=self.keypair.material)
text = data_utils.rand_name("Pattern text for console output")
try:
resp = ssh.write_to_console(text)
except Exception:
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
else:
console_output = instance.get_console_output().output
LOG.debug('Console output for %s\nbody=\n%s',
instance.id, console_output)
raise
self.assertFalse(resp)
def _output():
output = instance.get_console_output()
return output.output
wait.re_search_wait(_output, text)
part_lines = ssh.get_partitions().split('\n')
volume.attach(instance.id, "/dev/vdh")
def _volume_state():
"""Return volume state realizing that 'in-use' is overloaded."""
volume.update(validate=True)
status = volume.status
attached = volume.attach_data.status
LOG.debug("Volume %s is in status: %s, attach_status: %s",
volume.id, status, attached)
# Nova reports 'in-use' on 'attaching' volumes because we
# have a single volume status, and EC2 has 2. Ensure that
# if we aren't attached yet we return something other than
# 'in-use'
if status == 'in-use' and attached != 'attached':
return 'attaching'
else:
return status
wait.re_search_wait(_volume_state, "in-use")
# NOTE(afazekas): Different Hypervisor backends names
# differently the devices,
# now we just test is the partition number increased/decrised
def _part_state():
current = ssh.get_partitions().split('\n')
LOG.debug("Partition map for instance: %s", current)
if current > part_lines:
return 'INCREASE'
if current < part_lines:
return 'DECREASE'
return 'EQUAL'
wait.state_wait(_part_state, 'INCREASE')
part_lines = ssh.get_partitions().split('\n')
# TODO(afazekas): Resource compare to the flavor settings
volume.detach()
self.assertVolumeStatusWait(volume, "available")
wait.state_wait(_part_state, 'DECREASE')
instance.stop()
address.disassociate()
self.assertAddressDisassociatedWait(address)
self.cancelResourceCleanUp(rcuk_da)
address.release()
self.assertAddressReleasedWait(address)
self.cancelResourceCleanUp(rcuk_a)
LOG.debug("Instance %s state: %s", instance.id, instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
# TODO(afazekas): move steps from teardown to the test case
# TODO(afazekas): Snapshot/volume read/write test case
| |
"""Support for Microsoft face recognition."""
import asyncio
import json
import logging
import aiohttp
from aiohttp.hdrs import CONTENT_TYPE
import async_timeout
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_TIMEOUT, ATTR_NAME
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
ATTR_CAMERA_ENTITY = 'camera_entity'
ATTR_GROUP = 'group'
ATTR_PERSON = 'person'
CONF_AZURE_REGION = 'azure_region'
DATA_MICROSOFT_FACE = 'microsoft_face'
DEFAULT_TIMEOUT = 10
DOMAIN = 'microsoft_face'
FACE_API_URL = "api.cognitive.microsoft.com/face/v1.0/{0}"
SERVICE_CREATE_GROUP = 'create_group'
SERVICE_CREATE_PERSON = 'create_person'
SERVICE_DELETE_GROUP = 'delete_group'
SERVICE_DELETE_PERSON = 'delete_person'
SERVICE_FACE_PERSON = 'face_person'
SERVICE_TRAIN_GROUP = 'train_group'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_AZURE_REGION, default='westus'): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_GROUP_SERVICE = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
})
SCHEMA_PERSON_SERVICE = SCHEMA_GROUP_SERVICE.extend({
vol.Required(ATTR_GROUP): cv.slugify,
})
SCHEMA_FACE_SERVICE = vol.Schema({
vol.Required(ATTR_PERSON): cv.string,
vol.Required(ATTR_GROUP): cv.slugify,
vol.Required(ATTR_CAMERA_ENTITY): cv.entity_id,
})
SCHEMA_TRAIN_SERVICE = vol.Schema({
vol.Required(ATTR_GROUP): cv.slugify,
})
async def async_setup(hass, config):
"""Set up Microsoft Face."""
entities = {}
face = MicrosoftFace(
hass,
config[DOMAIN].get(CONF_AZURE_REGION),
config[DOMAIN].get(CONF_API_KEY),
config[DOMAIN].get(CONF_TIMEOUT),
entities
)
try:
# read exists group/person from cloud and create entities
await face.update_store()
except HomeAssistantError as err:
_LOGGER.error("Can't load data from face api: %s", err)
return False
hass.data[DATA_MICROSOFT_FACE] = face
async def async_create_group(service):
"""Create a new person group."""
name = service.data[ATTR_NAME]
g_id = slugify(name)
try:
await face.call_api(
'put', "persongroups/{0}".format(g_id), {'name': name})
face.store[g_id] = {}
entities[g_id] = MicrosoftFaceGroupEntity(hass, face, g_id, name)
await entities[g_id].async_update_ha_state()
except HomeAssistantError as err:
_LOGGER.error("Can't create group '%s' with error: %s", g_id, err)
hass.services.async_register(
DOMAIN, SERVICE_CREATE_GROUP, async_create_group,
schema=SCHEMA_GROUP_SERVICE)
async def async_delete_group(service):
"""Delete a person group."""
g_id = slugify(service.data[ATTR_NAME])
try:
await face.call_api('delete', "persongroups/{0}".format(g_id))
face.store.pop(g_id)
entity = entities.pop(g_id)
hass.states.async_remove(entity.entity_id)
except HomeAssistantError as err:
_LOGGER.error("Can't delete group '%s' with error: %s", g_id, err)
hass.services.async_register(
DOMAIN, SERVICE_DELETE_GROUP, async_delete_group,
schema=SCHEMA_GROUP_SERVICE)
async def async_train_group(service):
"""Train a person group."""
g_id = service.data[ATTR_GROUP]
try:
await face.call_api(
'post', "persongroups/{0}/train".format(g_id))
except HomeAssistantError as err:
_LOGGER.error("Can't train group '%s' with error: %s", g_id, err)
hass.services.async_register(
DOMAIN, SERVICE_TRAIN_GROUP, async_train_group,
schema=SCHEMA_TRAIN_SERVICE)
async def async_create_person(service):
"""Create a person in a group."""
name = service.data[ATTR_NAME]
g_id = service.data[ATTR_GROUP]
try:
user_data = await face.call_api(
'post', "persongroups/{0}/persons".format(g_id), {'name': name}
)
face.store[g_id][name] = user_data['personId']
await entities[g_id].async_update_ha_state()
except HomeAssistantError as err:
_LOGGER.error("Can't create person '%s' with error: %s", name, err)
hass.services.async_register(
DOMAIN, SERVICE_CREATE_PERSON, async_create_person,
schema=SCHEMA_PERSON_SERVICE)
async def async_delete_person(service):
"""Delete a person in a group."""
name = service.data[ATTR_NAME]
g_id = service.data[ATTR_GROUP]
p_id = face.store[g_id].get(name)
try:
await face.call_api(
'delete', "persongroups/{0}/persons/{1}".format(g_id, p_id))
face.store[g_id].pop(name)
await entities[g_id].async_update_ha_state()
except HomeAssistantError as err:
_LOGGER.error("Can't delete person '%s' with error: %s", p_id, err)
hass.services.async_register(
DOMAIN, SERVICE_DELETE_PERSON, async_delete_person,
schema=SCHEMA_PERSON_SERVICE)
async def async_face_person(service):
"""Add a new face picture to a person."""
g_id = service.data[ATTR_GROUP]
p_id = face.store[g_id].get(service.data[ATTR_PERSON])
camera_entity = service.data[ATTR_CAMERA_ENTITY]
camera = hass.components.camera
try:
image = await camera.async_get_image(hass, camera_entity)
await face.call_api(
'post',
"persongroups/{0}/persons/{1}/persistedFaces".format(
g_id, p_id),
image.content,
binary=True
)
except HomeAssistantError as err:
_LOGGER.error("Can't delete person '%s' with error: %s", p_id, err)
hass.services.async_register(
DOMAIN, SERVICE_FACE_PERSON, async_face_person,
schema=SCHEMA_FACE_SERVICE)
return True
class MicrosoftFaceGroupEntity(Entity):
"""Person-Group state/data Entity."""
def __init__(self, hass, api, g_id, name):
"""Initialize person/group entity."""
self.hass = hass
self._api = api
self._id = g_id
self._name = name
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def entity_id(self):
"""Return entity id."""
return "{0}.{1}".format(DOMAIN, self._id)
@property
def state(self):
"""Return the state of the entity."""
return len(self._api.store[self._id])
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attr = {}
for name, p_id in self._api.store[self._id].items():
attr[name] = p_id
return attr
class MicrosoftFace:
"""Microsoft Face api for HomeAssistant."""
def __init__(self, hass, server_loc, api_key, timeout, entities):
"""Initialize Microsoft Face api."""
self.hass = hass
self.websession = async_get_clientsession(hass)
self.timeout = timeout
self._api_key = api_key
self._server_url = "https://{0}.{1}".format(server_loc, FACE_API_URL)
self._store = {}
self._entities = entities
@property
def store(self):
"""Store group/person data and IDs."""
return self._store
async def update_store(self):
"""Load all group/person data into local store."""
groups = await self.call_api('get', 'persongroups')
tasks = []
for group in groups:
g_id = group['personGroupId']
self._store[g_id] = {}
self._entities[g_id] = MicrosoftFaceGroupEntity(
self.hass, self, g_id, group['name'])
persons = await self.call_api(
'get', "persongroups/{0}/persons".format(g_id))
for person in persons:
self._store[g_id][person['name']] = person['personId']
tasks.append(self._entities[g_id].async_update_ha_state())
if tasks:
await asyncio.wait(tasks, loop=self.hass.loop)
async def call_api(self, method, function, data=None, binary=False,
params=None):
"""Make an api call."""
headers = {"Ocp-Apim-Subscription-Key": self._api_key}
url = self._server_url.format(function)
payload = None
if binary:
headers[CONTENT_TYPE] = "application/octet-stream"
payload = data
else:
headers[CONTENT_TYPE] = "application/json"
if data is not None:
payload = json.dumps(data).encode()
else:
payload = None
try:
with async_timeout.timeout(self.timeout, loop=self.hass.loop):
response = await getattr(self.websession, method)(
url, data=payload, headers=headers, params=params)
answer = await response.json()
_LOGGER.debug("Read from microsoft face api: %s", answer)
if response.status < 300:
return answer
_LOGGER.warning("Error %d microsoft face api %s",
response.status, response.url)
raise HomeAssistantError(answer['error']['message'])
except aiohttp.ClientError:
_LOGGER.warning("Can't connect to microsoft face api")
except asyncio.TimeoutError:
_LOGGER.warning("Timeout from microsoft face api %s", response.url)
raise HomeAssistantError("Network error on microsoft face api.")
| |
# Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import urllib
import httplib2
from keystoneclient.v2_0 import client as keyclient
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.plugins.ibm.common import config # noqa
from neutron.plugins.ibm.common import constants
from neutron import wsgi
LOG = logging.getLogger(__name__)
SDNVE_VERSION = '2.0'
SDNVE_ACTION_PREFIX = '/sdnve'
SDNVE_RETRIES = 0
SDNVE_RETRIY_INTERVAL = 1
SDNVE_TENANT_TYPE_OVERLAY = u'DOVE'
SDNVE_URL = 'https://%s:%s%s'
class RequestHandler(object):
'''Handles processing requests to and responses from controller.'''
def __init__(self, controller_ips=None, port=None, ssl=None,
base_url=None, userid=None, password=None,
timeout=10, formats=None):
'''Initializes the RequestHandler for communication with controller
Following keyword arguments are used; if not specified, default
values are used.
:param port: Username for authentication.
:param timeout: Time out for http requests.
:param userid: User id for accessing controller.
:param password: Password for accessing the controller.
:param base_url: The base url for the controller.
:param controller_ips: List of controller IP addresses.
:param formats: Supported formats.
'''
self.port = port or cfg.CONF.SDNVE.port
self.timeout = timeout
self._s_meta = None
self.connection = None
self.httpclient = httplib2.Http(
disable_ssl_certificate_validation=True)
self.cookie = None
userid = userid or cfg.CONF.SDNVE.userid
password = password or cfg.CONF.SDNVE.password
if (userid and password):
self.httpclient.add_credentials(userid, password)
self.base_url = base_url or cfg.CONF.SDNVE.base_url
self.controller_ips = controller_ips or cfg.CONF.SDNVE.controller_ips
LOG.info(_LI("The IP addr of available SDN-VE controllers: %s"),
self.controller_ips)
self.controller_ip = self.controller_ips[0]
LOG.info(_LI("The SDN-VE controller IP address: %s"),
self.controller_ip)
self.new_controller = False
self.format = formats or cfg.CONF.SDNVE.format
self.version = SDNVE_VERSION
self.action_prefix = SDNVE_ACTION_PREFIX
self.retries = SDNVE_RETRIES
self.retry_interval = SDNVE_RETRIY_INTERVAL
def serialize(self, data):
'''Serializes a dictionary with a single key.'''
if isinstance(data, dict):
return wsgi.Serializer().serialize(data, self.content_type())
elif data:
raise TypeError(_("unable to serialize object type: '%s'") %
type(data))
def deserialize(self, data, status_code):
'''Deserializes an xml or json string into a dictionary.'''
# NOTE(mb): Temporary fix for backend controller requirement
data = data.replace("router_external", "router:external")
if status_code == httplib.NO_CONTENT:
return data
try:
deserialized_data = wsgi.Serializer(
metadata=self._s_meta).deserialize(data, self.content_type())
deserialized_data = deserialized_data['body']
except Exception:
deserialized_data = data
return deserialized_data
def content_type(self, format=None):
'''Returns the mime-type for either 'xml' or 'json'.'''
return 'application/%s' % (format or self.format)
def delete(self, url, body=None, headers=None, params=None):
return self.do_request("DELETE", url, body=body,
headers=headers, params=params)
def get(self, url, body=None, headers=None, params=None):
return self.do_request("GET", url, body=body,
headers=headers, params=params)
def post(self, url, body=None, headers=None, params=None):
return self.do_request("POST", url, body=body,
headers=headers, params=params)
def put(self, url, body=None, headers=None, params=None):
return self.do_request("PUT", url, body=body,
headers=headers, params=params)
def do_request(self, method, url, body=None, headers=None,
params=None, connection_type=None):
status_code = -1
replybody_deserialized = ''
if body:
body = self.serialize(body)
self.headers = headers or {'Content-Type': self.content_type()}
if self.cookie:
self.headers['cookie'] = self.cookie
if self.controller_ip != self.controller_ips[0]:
controllers = [self.controller_ip]
else:
controllers = []
controllers.extend(self.controller_ips)
for controller_ip in controllers:
serverurl = SDNVE_URL % (controller_ip, self.port, self.base_url)
myurl = serverurl + url
if params and isinstance(params, dict):
myurl += '?' + urllib.urlencode(params, doseq=1)
try:
LOG.debug("Sending request to SDN-VE. url: "
"%(myurl)s method: %(method)s body: "
"%(body)s header: %(header)s ",
{'myurl': myurl, 'method': method,
'body': body, 'header': self.headers})
resp, replybody = self.httpclient.request(
myurl, method=method, body=body, headers=self.headers)
LOG.debug(("Response recd from SDN-VE. resp: %(resp)s"
"body: %(body)s"),
{'resp': resp.status, 'body': replybody})
status_code = resp.status
except Exception as e:
LOG.error(_LE("Error: Could not reach server: %(url)s "
"Exception: %(excp)s."),
{'url': myurl, 'excp': e})
self.cookie = None
continue
if status_code not in constants.HTTP_ACCEPTABLE:
LOG.debug("Error message: %(reply)s -- Status: %(status)s",
{'reply': replybody, 'status': status_code})
else:
LOG.debug("Received response status: %s", status_code)
if resp.get('set-cookie'):
self.cookie = resp['set-cookie']
replybody_deserialized = self.deserialize(
replybody,
status_code)
LOG.debug("Deserialized body: %s", replybody_deserialized)
if controller_ip != self.controller_ip:
# bcast the change of controller
self.new_controller = True
self.controller_ip = controller_ip
return (status_code, replybody_deserialized)
return (httplib.REQUEST_TIMEOUT, 'Could not reach server(s)')
class Client(RequestHandler):
'''Client for SDNVE controller.'''
def __init__(self):
'''Initialize a new SDNVE client.'''
super(Client, self).__init__()
self.keystoneclient = KeystoneClient()
resource_path = {
'network': "ln/networks/",
'subnet': "ln/subnets/",
'port': "ln/ports/",
'tenant': "ln/tenants/",
'router': "ln/routers/",
'floatingip': "ln/floatingips/",
}
def process_request(self, body):
'''Processes requests according to requirements of controller.'''
if self.format == 'json':
body = dict(
(k.replace(':', '_'), v) for k, v in body.items()
if attributes.is_attr_set(v))
return body
def sdnve_list(self, resource, **params):
'''Fetches a list of resources.'''
res = self.resource_path.get(resource, None)
if not res:
LOG.info(_LI("Bad resource for forming a list request"))
return 0, ''
return self.get(res, params=params)
def sdnve_show(self, resource, specific, **params):
'''Fetches information of a certain resource.'''
res = self.resource_path.get(resource, None)
if not res:
LOG.info(_LI("Bad resource for forming a show request"))
return 0, ''
return self.get(res + specific, params=params)
def sdnve_create(self, resource, body):
'''Creates a new resource.'''
res = self.resource_path.get(resource, None)
if not res:
LOG.info(_LI("Bad resource for forming a create request"))
return 0, ''
body = self.process_request(body)
status, data = self.post(res, body=body)
return (status, data)
def sdnve_update(self, resource, specific, body=None):
'''Updates a resource.'''
res = self.resource_path.get(resource, None)
if not res:
LOG.info(_LI("Bad resource for forming a update request"))
return 0, ''
body = self.process_request(body)
return self.put(res + specific, body=body)
def sdnve_delete(self, resource, specific):
'''Deletes the specified resource.'''
res = self.resource_path.get(resource, None)
if not res:
LOG.info(_LI("Bad resource for forming a delete request"))
return 0, ''
return self.delete(res + specific)
def _tenant_id_conversion(self, osid):
return osid
def sdnve_get_tenant_byid(self, os_tenant_id):
sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id)
resp, content = self.sdnve_show('tenant', sdnve_tenant_id)
if resp in constants.HTTP_ACCEPTABLE:
tenant_id = content.get('id')
tenant_type = content.get('network_type')
if tenant_type == SDNVE_TENANT_TYPE_OVERLAY:
tenant_type = constants.TENANT_TYPE_OVERLAY
return tenant_id, tenant_type
return None, None
def sdnve_check_and_create_tenant(self, os_tenant_id, network_type=None):
if not os_tenant_id:
return
tenant_id, tenant_type = self.sdnve_get_tenant_byid(os_tenant_id)
if tenant_id:
if not network_type:
return tenant_id
if tenant_type != network_type:
LOG.info(_LI("Non matching tenant and network types: "
"%(ttype)s %(ntype)s"),
{'ttype': tenant_type, 'ntype': network_type})
return
return tenant_id
# Have to create a new tenant
sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id)
if not network_type:
network_type = self.keystoneclient.get_tenant_type(os_tenant_id)
if network_type == constants.TENANT_TYPE_OVERLAY:
network_type = SDNVE_TENANT_TYPE_OVERLAY
pinn_desc = ("Created by SDN-VE Neutron Plugin, OS project name = " +
self.keystoneclient.get_tenant_name(os_tenant_id))
res, content = self.sdnve_create('tenant',
{'id': sdnve_tenant_id,
'name': os_tenant_id,
'network_type': network_type,
'description': pinn_desc})
if res not in constants.HTTP_ACCEPTABLE:
return
return sdnve_tenant_id
def sdnve_get_controller(self):
if self.new_controller:
self.new_controller = False
return self.controller_ip
class KeystoneClient(object):
def __init__(self, username=None, tenant_name=None, password=None,
auth_url=None):
keystone_conf = cfg.CONF.keystone_authtoken
username = username or keystone_conf.admin_user
tenant_name = tenant_name or keystone_conf.admin_tenant_name
password = password or keystone_conf.admin_password
# FIXME(ihrachys): plugins should not construct keystone URL
# from configuration file and should instead rely on service
# catalog contents
auth_url = auth_url or utils.get_keystone_url(keystone_conf)
self.overlay_signature = cfg.CONF.SDNVE.overlay_signature
self.of_signature = cfg.CONF.SDNVE.of_signature
self.default_tenant_type = cfg.CONF.SDNVE.default_tenant_type
self.client = keyclient.Client(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url)
def get_tenant_byid(self, id):
try:
return self.client.tenants.get(id)
except Exception:
LOG.exception(_LE("Did not find tenant: %r"), id)
def get_tenant_type(self, id):
tenant = self.get_tenant_byid(id)
if tenant:
description = tenant.description
if description:
if (description.find(self.overlay_signature) >= 0):
return constants.TENANT_TYPE_OVERLAY
if (description.find(self.of_signature) >= 0):
return constants.TENANT_TYPE_OF
return self.default_tenant_type
def get_tenant_name(self, id):
tenant = self.get_tenant_byid(id)
if tenant:
return tenant.name
return 'not found'
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib.plugins import directory
from oslo_db import exception as db_exc
from oslo_log import log
from oslo_utils import uuidutils
import six
from sqlalchemy import or_
from sqlalchemy.orm import exc
from neutron._i18n import _
from neutron.db import api as db_api
from neutron.db.models import securitygroup as sg_models
from neutron.db import models_v2
from neutron.objects import ports as port_obj
from neutron.plugins.ml2 import models
from neutron.services.segments import exceptions as seg_exc
LOG = log.getLogger(__name__)
# limit the number of port OR LIKE statements in one query
MAX_PORTS_PER_QUERY = 500
@db_api.context_manager.writer
def add_port_binding(context, port_id):
record = models.PortBinding(
port_id=port_id,
vif_type=portbindings.VIF_TYPE_UNBOUND)
context.session.add(record)
return record
@db_api.context_manager.writer
def set_binding_levels(context, levels):
if levels:
for level in levels:
level.persist_state_to_session(context.session)
LOG.debug("For port %(port_id)s, host %(host)s, "
"set binding levels %(levels)s",
{'port_id': levels[0].port_id,
'host': levels[0].host,
'levels': levels})
else:
LOG.debug("Attempted to set empty binding levels")
@db_api.context_manager.reader
def get_binding_levels(context, port_id, host):
if host:
result = (context.session.query(models.PortBindingLevel).
filter_by(port_id=port_id, host=host).
order_by(models.PortBindingLevel.level).
all())
LOG.debug("For port %(port_id)s, host %(host)s, "
"got binding levels %(levels)s",
{'port_id': port_id,
'host': host,
'levels': result})
return result
@db_api.context_manager.writer
def clear_binding_levels(context, port_id, host):
if host:
for l in (context.session.query(models.PortBindingLevel).
filter_by(port_id=port_id, host=host)):
context.session.delete(l)
LOG.debug("For port %(port_id)s, host %(host)s, "
"cleared binding levels",
{'port_id': port_id,
'host': host})
def ensure_distributed_port_binding(context, port_id, host, router_id=None):
with db_api.context_manager.reader.using(context):
record = (context.session.query(models.DistributedPortBinding).
filter_by(port_id=port_id, host=host).first())
if record:
return record
try:
with db_api.context_manager.writer.using(context):
record = models.DistributedPortBinding(
port_id=port_id,
host=host,
router_id=router_id,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vnic_type=portbindings.VNIC_NORMAL,
status=n_const.PORT_STATUS_DOWN)
context.session.add(record)
return record
except db_exc.DBDuplicateEntry:
LOG.debug("Distributed Port %s already bound", port_id)
with db_api.context_manager.reader.using(context):
return (context.session.query(models.DistributedPortBinding).
filter_by(port_id=port_id, host=host).one())
def delete_distributed_port_binding_if_stale(context, binding):
if not binding.router_id and binding.status == n_const.PORT_STATUS_DOWN:
with db_api.context_manager.writer.using(context):
LOG.debug("Distributed port: Deleting binding %s", binding)
context.session.delete(binding)
def get_port(context, port_id):
"""Get port record for update within transaction."""
with db_api.context_manager.reader.using(context):
try:
# Set enable_eagerloads to True, so that lazy load can be
# proceed later.
record = (context.session.query(models_v2.Port).
enable_eagerloads(True).
filter(models_v2.Port.id.startswith(port_id)).
one())
return record
except exc.NoResultFound:
return
except exc.MultipleResultsFound:
LOG.error("Multiple ports have port_id starting with %s",
port_id)
return
@db_api.context_manager.reader
def get_port_from_device_mac(context, device_mac):
LOG.debug("get_port_from_device_mac() called for mac %s", device_mac)
ports = port_obj.Port.get_objects(context, mac_address=device_mac)
return ports.pop() if ports else None
def get_ports_and_sgs(context, port_ids):
"""Get ports from database with security group info."""
# break large queries into smaller parts
if len(port_ids) > MAX_PORTS_PER_QUERY:
LOG.debug("Number of ports %(pcount)s exceeds the maximum per "
"query %(maxp)s. Partitioning queries.",
{'pcount': len(port_ids), 'maxp': MAX_PORTS_PER_QUERY})
return (get_ports_and_sgs(context, port_ids[:MAX_PORTS_PER_QUERY]) +
get_ports_and_sgs(context, port_ids[MAX_PORTS_PER_QUERY:]))
LOG.debug("get_ports_and_sgs() called for port_ids %s", port_ids)
if not port_ids:
# if port_ids is empty, avoid querying to DB to ask it for nothing
return []
ports_to_sg_ids = get_sg_ids_grouped_by_port(context, port_ids)
return [make_port_dict_with_security_groups(port, sec_groups)
for port, sec_groups in six.iteritems(ports_to_sg_ids)]
def get_sg_ids_grouped_by_port(context, port_ids):
sg_ids_grouped_by_port = {}
sg_binding_port = sg_models.SecurityGroupPortBinding.port_id
with db_api.context_manager.reader.using(context):
# partial UUIDs must be individually matched with startswith.
# full UUIDs may be matched directly in an IN statement
partial_uuids = set(port_id for port_id in port_ids
if not uuidutils.is_uuid_like(port_id))
full_uuids = set(port_ids) - partial_uuids
or_criteria = [models_v2.Port.id.startswith(port_id)
for port_id in partial_uuids]
if full_uuids:
or_criteria.append(models_v2.Port.id.in_(full_uuids))
query = context.session.query(
models_v2.Port,
sg_models.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_models.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(or_(*or_criteria))
for port, sg_id in query:
if port not in sg_ids_grouped_by_port:
sg_ids_grouped_by_port[port] = []
if sg_id:
sg_ids_grouped_by_port[port].append(sg_id)
return sg_ids_grouped_by_port
def make_port_dict_with_security_groups(port, sec_groups):
plugin = directory.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = sec_groups
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_binding_host(context, port_id):
try:
with db_api.context_manager.reader.using(context):
query = (context.session.query(models.PortBinding).
filter(models.PortBinding.port_id.startswith(port_id)))
query = query.filter(
models.PortBinding.status == n_const.ACTIVE).one()
except exc.NoResultFound:
LOG.debug("No active binding found for port %(port_id)s",
{'port_id': port_id})
return
except exc.MultipleResultsFound:
LOG.error("Multiple ports have port_id starting with %s",
port_id)
return
return query.host
@db_api.context_manager.reader
def generate_distributed_port_status(context, port_id):
# an OR'ed value of status assigned to parent port from the
# distributedportbinding bucket
query = context.session.query(models.DistributedPortBinding)
final_status = n_const.PORT_STATUS_BUILD
for bind in query.filter(models.DistributedPortBinding.port_id == port_id):
if bind.status == n_const.PORT_STATUS_ACTIVE:
return bind.status
elif bind.status == n_const.PORT_STATUS_DOWN:
final_status = bind.status
return final_status
def get_distributed_port_binding_by_host(context, port_id, host):
with db_api.context_manager.reader.using(context):
binding = (context.session.query(models.DistributedPortBinding).
filter(models.DistributedPortBinding.port_id.startswith(port_id),
models.DistributedPortBinding.host == host).first())
if not binding:
LOG.debug("No binding for distributed port %(port_id)s with host "
"%(host)s", {'port_id': port_id, 'host': host})
return binding
def get_distributed_port_bindings(context, port_id):
with db_api.context_manager.reader.using(context):
bindings = (context.session.query(models.DistributedPortBinding).
filter(models.DistributedPortBinding.port_id.startswith(
port_id)).all())
if not bindings:
LOG.debug("No bindings for distributed port %s", port_id)
return bindings
@db_api.context_manager.reader
def partial_port_ids_to_full_ids(context, partial_ids):
"""Takes a list of the start of port IDs and returns full IDs.
Returns dictionary of partial IDs to full IDs if a single match
is found.
"""
result = {}
to_full_query = (context.session.query(models_v2.Port.id).
filter(or_(*[models_v2.Port.id.startswith(p)
for p in partial_ids])))
candidates = [match[0] for match in to_full_query]
for partial_id in partial_ids:
matching = [c for c in candidates if c.startswith(partial_id)]
if len(matching) == 1:
result[partial_id] = matching[0]
continue
if len(matching) < 1:
LOG.info("No ports have port_id starting with %s", partial_id)
elif len(matching) > 1:
LOG.error("Multiple ports have port_id starting with %s",
partial_id)
return result
@db_api.context_manager.reader
def get_port_db_objects(context, port_ids):
"""Takes a list of port_ids and returns matching port db objects.
return format is a dictionary keyed by passed in IDs with db objects
for values or None if the port was not present.
"""
port_qry = (context.session.query(models_v2.Port).
filter(models_v2.Port.id.in_(port_ids)))
result = {p: None for p in port_ids}
for port in port_qry:
result[port.id] = port
return result
@db_api.context_manager.reader
def is_dhcp_active_on_any_subnet(context, subnet_ids):
if not subnet_ids:
return False
return bool(context.session.query(models_v2.Subnet).
enable_eagerloads(False).filter_by(enable_dhcp=True).
filter(models_v2.Subnet.id.in_(subnet_ids)).count())
def _prevent_segment_delete_with_port_bound(resource, event, trigger,
context, segment,
for_net_delete=False):
"""Raise exception if there are any ports bound with segment_id."""
if for_net_delete:
# don't check for network deletes
return
with db_api.context_manager.reader.using(context):
segment_id = segment['id']
query = context.session.query(models_v2.Port)
query = query.join(
models.PortBindingLevel,
models.PortBindingLevel.port_id == models_v2.Port.id)
query = query.filter(models.PortBindingLevel.segment_id == segment_id)
port_ids = [p.id for p in query]
# There are still some ports in the segment, segment should not be deleted
# TODO(xiaohhui): Should we delete the dhcp port automatically here?
if port_ids:
reason = _("The segment is still bound with port(s) "
"%s") % ", ".join(port_ids)
raise seg_exc.SegmentInUse(segment_id=segment_id, reason=reason)
def subscribe():
registry.subscribe(_prevent_segment_delete_with_port_bound,
resources.SEGMENT,
events.BEFORE_DELETE)
subscribe()
| |
import importlib
import json
import os
import gettext as gettext_module
from django import http
from django.apps import apps
from django.conf import settings
from django.template import Context, Template
from django.utils.translation import check_for_language, to_locale, get_language
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
app_configs = apps.get_app_configs()
allowable_packages = set(app_config.name for app_config in app_configs)
allowable_packages.add('django.conf')
packages = [p for p in packages if p in allowable_packages]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.